xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c (revision 994aeacbb3c039b4f3e02e76e6d39407920e76c6)
1 /*
2  * Copyright 2007-8 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie
24  *          Alex Deucher
25  */
26 
27 #include <drm/amdgpu_drm.h>
28 #include "amdgpu.h"
29 #include "amdgpu_i2c.h"
30 #include "atom.h"
31 #include "amdgpu_connectors.h"
32 #include "amdgpu_display.h"
33 #include "soc15_common.h"
34 #include "gc/gc_11_0_0_offset.h"
35 #include "gc/gc_11_0_0_sh_mask.h"
36 #include <asm/div64.h>
37 
38 #include <linux/pci.h>
39 #include <linux/pm_runtime.h>
40 #include <drm/drm_crtc_helper.h>
41 #include <drm/drm_damage_helper.h>
42 #include <drm/drm_drv.h>
43 #include <drm/drm_edid.h>
44 #include <drm/drm_fb_helper.h>
45 #include <drm/drm_gem_framebuffer_helper.h>
46 #include <drm/drm_fourcc.h>
47 #include <drm/drm_modeset_helper.h>
48 #include <drm/drm_vblank.h>
49 
50 /**
51  * amdgpu_display_hotplug_work_func - work handler for display hotplug event
52  *
53  * @work: work struct pointer
54  *
55  * This is the hotplug event work handler (all ASICs).
56  * The work gets scheduled from the IRQ handler if there
57  * was a hotplug interrupt.  It walks through the connector table
58  * and calls hotplug handler for each connector. After this, it sends
59  * a DRM hotplug event to alert userspace.
60  *
61  * This design approach is required in order to defer hotplug event handling
62  * from the IRQ handler to a work handler because hotplug handler has to use
63  * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
64  * sleep).
65  */
amdgpu_display_hotplug_work_func(struct work_struct * work)66 void amdgpu_display_hotplug_work_func(struct work_struct *work)
67 {
68 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
69 						  hotplug_work.work);
70 	struct drm_device *dev = adev_to_drm(adev);
71 	struct drm_mode_config *mode_config = &dev->mode_config;
72 	struct drm_connector *connector;
73 	struct drm_connector_list_iter iter;
74 
75 	mutex_lock(&mode_config->mutex);
76 	drm_connector_list_iter_begin(dev, &iter);
77 	drm_for_each_connector_iter(connector, &iter)
78 		amdgpu_connector_hotplug(connector);
79 	drm_connector_list_iter_end(&iter);
80 	mutex_unlock(&mode_config->mutex);
81 	/* Just fire off a uevent and let userspace tell us what to do */
82 	drm_helper_hpd_irq_event(dev);
83 }
84 
85 static int amdgpu_display_framebuffer_init(struct drm_device *dev,
86 					   struct amdgpu_framebuffer *rfb,
87 					   const struct drm_mode_fb_cmd2 *mode_cmd,
88 					   struct drm_gem_object *obj);
89 
amdgpu_display_flip_callback(struct dma_fence * f,struct dma_fence_cb * cb)90 static void amdgpu_display_flip_callback(struct dma_fence *f,
91 					 struct dma_fence_cb *cb)
92 {
93 	struct amdgpu_flip_work *work =
94 		container_of(cb, struct amdgpu_flip_work, cb);
95 
96 	dma_fence_put(f);
97 	schedule_work(&work->flip_work.work);
98 }
99 
amdgpu_display_flip_handle_fence(struct amdgpu_flip_work * work,struct dma_fence ** f)100 static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
101 					     struct dma_fence **f)
102 {
103 	struct dma_fence *fence = *f;
104 
105 	if (fence == NULL)
106 		return false;
107 
108 	*f = NULL;
109 
110 	if (!dma_fence_add_callback(fence, &work->cb,
111 				    amdgpu_display_flip_callback))
112 		return true;
113 
114 	dma_fence_put(fence);
115 	return false;
116 }
117 
amdgpu_display_flip_work_func(struct work_struct * __work)118 static void amdgpu_display_flip_work_func(struct work_struct *__work)
119 {
120 	struct delayed_work *delayed_work =
121 		container_of(__work, struct delayed_work, work);
122 	struct amdgpu_flip_work *work =
123 		container_of(delayed_work, struct amdgpu_flip_work, flip_work);
124 	struct amdgpu_device *adev = work->adev;
125 	struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
126 
127 	struct drm_crtc *crtc = &amdgpu_crtc->base;
128 	unsigned long flags;
129 	unsigned int i;
130 	int vpos, hpos;
131 
132 	for (i = 0; i < work->shared_count; ++i)
133 		if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
134 			return;
135 
136 	/* Wait until we're out of the vertical blank period before the one
137 	 * targeted by the flip
138 	 */
139 	if (amdgpu_crtc->enabled &&
140 	    (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
141 						&vpos, &hpos, NULL, NULL,
142 						&crtc->hwmode)
143 	     & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
144 	    (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
145 	    (int)(work->target_vblank -
146 		  amdgpu_get_vblank_counter_kms(crtc)) > 0) {
147 		schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
148 		return;
149 	}
150 
151 	/* We borrow the event spin lock for protecting flip_status */
152 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
153 
154 	/* Do the flip (mmio) */
155 	adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
156 
157 	/* Set the flip status */
158 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
159 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
160 
161 
162 	drm_dbg_vbl(adev_to_drm(adev),
163 		    "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
164 		    amdgpu_crtc->crtc_id, amdgpu_crtc, work);
165 
166 }
167 
168 /*
169  * Handle unpin events outside the interrupt handler proper.
170  */
amdgpu_display_unpin_work_func(struct work_struct * __work)171 static void amdgpu_display_unpin_work_func(struct work_struct *__work)
172 {
173 	struct amdgpu_flip_work *work =
174 		container_of(__work, struct amdgpu_flip_work, unpin_work);
175 	int r;
176 
177 	/* unpin of the old buffer */
178 	r = amdgpu_bo_reserve(work->old_abo, true);
179 	if (likely(r == 0)) {
180 		amdgpu_bo_unpin(work->old_abo);
181 		amdgpu_bo_unreserve(work->old_abo);
182 	} else
183 		DRM_ERROR("failed to reserve buffer after flip\n");
184 
185 	amdgpu_bo_unref(&work->old_abo);
186 	kfree(work->shared);
187 	kfree(work);
188 }
189 
amdgpu_display_crtc_page_flip_target(struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_pending_vblank_event * event,uint32_t page_flip_flags,uint32_t target,struct drm_modeset_acquire_ctx * ctx)190 int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
191 				struct drm_framebuffer *fb,
192 				struct drm_pending_vblank_event *event,
193 				uint32_t page_flip_flags, uint32_t target,
194 				struct drm_modeset_acquire_ctx *ctx)
195 {
196 	struct drm_device *dev = crtc->dev;
197 	struct amdgpu_device *adev = drm_to_adev(dev);
198 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
199 	struct drm_gem_object *obj;
200 	struct amdgpu_flip_work *work;
201 	struct amdgpu_bo *new_abo;
202 	unsigned long flags;
203 	u64 tiling_flags;
204 	int i, r;
205 
206 	work = kzalloc(sizeof(*work), GFP_KERNEL);
207 	if (work == NULL)
208 		return -ENOMEM;
209 
210 	INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
211 	INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
212 
213 	work->event = event;
214 	work->adev = adev;
215 	work->crtc_id = amdgpu_crtc->crtc_id;
216 	work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
217 
218 	/* schedule unpin of the old buffer */
219 	obj = crtc->primary->fb->obj[0];
220 
221 	/* take a reference to the old object */
222 	work->old_abo = gem_to_amdgpu_bo(obj);
223 	amdgpu_bo_ref(work->old_abo);
224 
225 	obj = fb->obj[0];
226 	new_abo = gem_to_amdgpu_bo(obj);
227 
228 	/* pin the new buffer */
229 	r = amdgpu_bo_reserve(new_abo, false);
230 	if (unlikely(r != 0)) {
231 		DRM_ERROR("failed to reserve new abo buffer before flip\n");
232 		goto cleanup;
233 	}
234 
235 	if (!adev->enable_virtual_display) {
236 		new_abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
237 		r = amdgpu_bo_pin(new_abo,
238 				  amdgpu_display_supported_domains(adev, new_abo->flags));
239 		if (unlikely(r != 0)) {
240 			DRM_ERROR("failed to pin new abo buffer before flip\n");
241 			goto unreserve;
242 		}
243 	}
244 
245 	r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
246 	if (unlikely(r != 0)) {
247 		DRM_ERROR("%p bind failed\n", new_abo);
248 		goto unpin;
249 	}
250 
251 	r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
252 				&work->shared_count,
253 				&work->shared);
254 	if (unlikely(r != 0)) {
255 		DRM_ERROR("failed to get fences for buffer\n");
256 		goto unpin;
257 	}
258 
259 	amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
260 	amdgpu_bo_unreserve(new_abo);
261 
262 	if (!adev->enable_virtual_display)
263 		work->base = amdgpu_bo_gpu_offset(new_abo);
264 	work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
265 		amdgpu_get_vblank_counter_kms(crtc);
266 
267 	/* we borrow the event spin lock for protecting flip_wrok */
268 	spin_lock_irqsave(&crtc->dev->event_lock, flags);
269 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
270 		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
271 		spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
272 		r = -EBUSY;
273 		goto pflip_cleanup;
274 	}
275 
276 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
277 	amdgpu_crtc->pflip_works = work;
278 
279 
280 	DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
281 					 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
282 	/* update crtc fb */
283 	crtc->primary->fb = fb;
284 	spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
285 	amdgpu_display_flip_work_func(&work->flip_work.work);
286 	return 0;
287 
288 pflip_cleanup:
289 	if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
290 		DRM_ERROR("failed to reserve new abo in error path\n");
291 		goto cleanup;
292 	}
293 unpin:
294 	if (!adev->enable_virtual_display)
295 		amdgpu_bo_unpin(new_abo);
296 
297 unreserve:
298 	amdgpu_bo_unreserve(new_abo);
299 
300 cleanup:
301 	amdgpu_bo_unref(&work->old_abo);
302 	for (i = 0; i < work->shared_count; ++i)
303 		dma_fence_put(work->shared[i]);
304 	kfree(work->shared);
305 	kfree(work);
306 
307 	return r;
308 }
309 
amdgpu_display_crtc_set_config(struct drm_mode_set * set,struct drm_modeset_acquire_ctx * ctx)310 int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
311 				   struct drm_modeset_acquire_ctx *ctx)
312 {
313 	struct drm_device *dev;
314 	struct amdgpu_device *adev;
315 	struct drm_crtc *crtc;
316 	bool active = false;
317 	int ret;
318 
319 	if (!set || !set->crtc)
320 		return -EINVAL;
321 
322 	dev = set->crtc->dev;
323 
324 	ret = pm_runtime_get_sync(dev->dev);
325 	if (ret < 0)
326 		goto out;
327 
328 	ret = drm_crtc_helper_set_config(set, ctx);
329 
330 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
331 		if (crtc->enabled)
332 			active = true;
333 
334 	pm_runtime_mark_last_busy(dev->dev);
335 
336 	adev = drm_to_adev(dev);
337 	/* if we have active crtcs and we don't have a power ref,
338 	 * take the current one
339 	 */
340 	if (active && !adev->have_disp_power_ref) {
341 		adev->have_disp_power_ref = true;
342 		return ret;
343 	}
344 	/* if we have no active crtcs, then go to
345 	 * drop the power ref we got before
346 	 */
347 	if (!active && adev->have_disp_power_ref)
348 		adev->have_disp_power_ref = false;
349 out:
350 	/* drop the power reference we got coming in here */
351 	pm_runtime_put_autosuspend(dev->dev);
352 	return ret;
353 }
354 
355 static const char *encoder_names[41] = {
356 	"NONE",
357 	"INTERNAL_LVDS",
358 	"INTERNAL_TMDS1",
359 	"INTERNAL_TMDS2",
360 	"INTERNAL_DAC1",
361 	"INTERNAL_DAC2",
362 	"INTERNAL_SDVOA",
363 	"INTERNAL_SDVOB",
364 	"SI170B",
365 	"CH7303",
366 	"CH7301",
367 	"INTERNAL_DVO1",
368 	"EXTERNAL_SDVOA",
369 	"EXTERNAL_SDVOB",
370 	"TITFP513",
371 	"INTERNAL_LVTM1",
372 	"VT1623",
373 	"HDMI_SI1930",
374 	"HDMI_INTERNAL",
375 	"INTERNAL_KLDSCP_TMDS1",
376 	"INTERNAL_KLDSCP_DVO1",
377 	"INTERNAL_KLDSCP_DAC1",
378 	"INTERNAL_KLDSCP_DAC2",
379 	"SI178",
380 	"MVPU_FPGA",
381 	"INTERNAL_DDI",
382 	"VT1625",
383 	"HDMI_SI1932",
384 	"DP_AN9801",
385 	"DP_DP501",
386 	"INTERNAL_UNIPHY",
387 	"INTERNAL_KLDSCP_LVTMA",
388 	"INTERNAL_UNIPHY1",
389 	"INTERNAL_UNIPHY2",
390 	"NUTMEG",
391 	"TRAVIS",
392 	"INTERNAL_VCE",
393 	"INTERNAL_UNIPHY3",
394 	"HDMI_ANX9805",
395 	"INTERNAL_AMCLK",
396 	"VIRTUAL",
397 };
398 
399 static const char *hpd_names[6] = {
400 	"HPD1",
401 	"HPD2",
402 	"HPD3",
403 	"HPD4",
404 	"HPD5",
405 	"HPD6",
406 };
407 
amdgpu_display_print_display_setup(struct drm_device * dev)408 void amdgpu_display_print_display_setup(struct drm_device *dev)
409 {
410 	struct drm_connector *connector;
411 	struct amdgpu_connector *amdgpu_connector;
412 	struct drm_encoder *encoder;
413 	struct amdgpu_encoder *amdgpu_encoder;
414 	struct drm_connector_list_iter iter;
415 	uint32_t devices;
416 	int i = 0;
417 
418 	drm_connector_list_iter_begin(dev, &iter);
419 	DRM_INFO("AMDGPU Display Connectors\n");
420 	drm_for_each_connector_iter(connector, &iter) {
421 		amdgpu_connector = to_amdgpu_connector(connector);
422 		DRM_INFO("Connector %d:\n", i);
423 		DRM_INFO("  %s\n", connector->name);
424 		if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
425 			DRM_INFO("  %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
426 		if (amdgpu_connector->ddc_bus) {
427 			DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
428 				 amdgpu_connector->ddc_bus->rec.mask_clk_reg,
429 				 amdgpu_connector->ddc_bus->rec.mask_data_reg,
430 				 amdgpu_connector->ddc_bus->rec.a_clk_reg,
431 				 amdgpu_connector->ddc_bus->rec.a_data_reg,
432 				 amdgpu_connector->ddc_bus->rec.en_clk_reg,
433 				 amdgpu_connector->ddc_bus->rec.en_data_reg,
434 				 amdgpu_connector->ddc_bus->rec.y_clk_reg,
435 				 amdgpu_connector->ddc_bus->rec.y_data_reg);
436 			if (amdgpu_connector->router.ddc_valid)
437 				DRM_INFO("  DDC Router 0x%x/0x%x\n",
438 					 amdgpu_connector->router.ddc_mux_control_pin,
439 					 amdgpu_connector->router.ddc_mux_state);
440 			if (amdgpu_connector->router.cd_valid)
441 				DRM_INFO("  Clock/Data Router 0x%x/0x%x\n",
442 					 amdgpu_connector->router.cd_mux_control_pin,
443 					 amdgpu_connector->router.cd_mux_state);
444 		} else {
445 			if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
446 			    connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
447 			    connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
448 			    connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
449 			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
450 			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
451 				DRM_INFO("  DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
452 		}
453 		DRM_INFO("  Encoders:\n");
454 		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
455 			amdgpu_encoder = to_amdgpu_encoder(encoder);
456 			devices = amdgpu_encoder->devices & amdgpu_connector->devices;
457 			if (devices) {
458 				if (devices & ATOM_DEVICE_CRT1_SUPPORT)
459 					DRM_INFO("    CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
460 				if (devices & ATOM_DEVICE_CRT2_SUPPORT)
461 					DRM_INFO("    CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
462 				if (devices & ATOM_DEVICE_LCD1_SUPPORT)
463 					DRM_INFO("    LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
464 				if (devices & ATOM_DEVICE_DFP1_SUPPORT)
465 					DRM_INFO("    DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
466 				if (devices & ATOM_DEVICE_DFP2_SUPPORT)
467 					DRM_INFO("    DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
468 				if (devices & ATOM_DEVICE_DFP3_SUPPORT)
469 					DRM_INFO("    DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
470 				if (devices & ATOM_DEVICE_DFP4_SUPPORT)
471 					DRM_INFO("    DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
472 				if (devices & ATOM_DEVICE_DFP5_SUPPORT)
473 					DRM_INFO("    DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
474 				if (devices & ATOM_DEVICE_DFP6_SUPPORT)
475 					DRM_INFO("    DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
476 				if (devices & ATOM_DEVICE_TV1_SUPPORT)
477 					DRM_INFO("    TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
478 				if (devices & ATOM_DEVICE_CV_SUPPORT)
479 					DRM_INFO("    CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
480 			}
481 		}
482 		i++;
483 	}
484 	drm_connector_list_iter_end(&iter);
485 }
486 
amdgpu_display_ddc_probe(struct amdgpu_connector * amdgpu_connector,bool use_aux)487 bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
488 			      bool use_aux)
489 {
490 	u8 out = 0x0;
491 	u8 buf[8];
492 	int ret;
493 	struct i2c_msg msgs[] = {
494 		{
495 			.addr = DDC_ADDR,
496 			.flags = 0,
497 			.len = 1,
498 			.buf = &out,
499 		},
500 		{
501 			.addr = DDC_ADDR,
502 			.flags = I2C_M_RD,
503 			.len = 8,
504 			.buf = buf,
505 		}
506 	};
507 
508 	/* on hw with routers, select right port */
509 	if (amdgpu_connector->router.ddc_valid)
510 		amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
511 
512 	if (use_aux)
513 		ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
514 	else
515 		ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
516 
517 	if (ret != 2)
518 		/* Couldn't find an accessible DDC on this connector */
519 		return false;
520 	/* Probe also for valid EDID header
521 	 * EDID header starts with:
522 	 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
523 	 * Only the first 6 bytes must be valid as
524 	 * drm_edid_block_valid() can fix the last 2 bytes
525 	 */
526 	if (drm_edid_header_is_valid(buf) < 6) {
527 		/* Couldn't find an accessible EDID on this
528 		 * connector
529 		 */
530 		return false;
531 	}
532 	return true;
533 }
534 
amdgpu_dirtyfb(struct drm_framebuffer * fb,struct drm_file * file,unsigned int flags,unsigned int color,struct drm_clip_rect * clips,unsigned int num_clips)535 static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file,
536 			  unsigned int flags, unsigned int color,
537 			  struct drm_clip_rect *clips, unsigned int num_clips)
538 {
539 
540 	if (file)
541 		return -ENOSYS;
542 
543 	return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips,
544 					 num_clips);
545 }
546 
547 static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
548 	.destroy = drm_gem_fb_destroy,
549 	.create_handle = drm_gem_fb_create_handle,
550 };
551 
552 static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
553 	.destroy = drm_gem_fb_destroy,
554 	.create_handle = drm_gem_fb_create_handle,
555 	.dirty = amdgpu_dirtyfb
556 };
557 
amdgpu_display_supported_domains(struct amdgpu_device * adev,uint64_t bo_flags)558 uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
559 					  uint64_t bo_flags)
560 {
561 	uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
562 
563 #if defined(CONFIG_DRM_AMD_DC)
564 	/*
565 	 * if amdgpu_bo_support_uswc returns false it means that USWC mappings
566 	 * is not supported for this board. But this mapping is required
567 	 * to avoid hang caused by placement of scanout BO in GTT on certain
568 	 * APUs. So force the BO placement to VRAM in case this architecture
569 	 * will not allow USWC mappings.
570 	 * Also, don't allow GTT domain if the BO doesn't have USWC flag set.
571 	 */
572 	if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
573 	    amdgpu_bo_support_uswc(bo_flags) &&
574 	    adev->dc_enabled &&
575 	    adev->mode_info.gpu_vm_support)
576 		domain |= AMDGPU_GEM_DOMAIN_GTT;
577 #endif
578 
579 	return domain;
580 }
581 
582 static const struct drm_format_info dcc_formats[] = {
583 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
584 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
585 	 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
586 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
587 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
588 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
589 	   .has_alpha = true, },
590 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
591 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
592 	  .has_alpha = true, },
593 	{ .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2,
594 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
595 	  .has_alpha = true, },
596 	{ .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
597 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
598 	{ .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
599 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
600 	{ .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
601 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
602 	  .has_alpha = true, },
603 	{ .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
604 	  .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
605 	  .has_alpha = true, },
606 	{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2,
607 	  .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
608 };
609 
610 static const struct drm_format_info dcc_retile_formats[] = {
611 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
612 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
613 	 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
614 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
615 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
616 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
617 	   .has_alpha = true, },
618 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
619 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
620 	  .has_alpha = true, },
621 	{ .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3,
622 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
623 	  .has_alpha = true, },
624 	{ .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
625 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
626 	{ .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
627 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
628 	{ .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
629 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
630 	  .has_alpha = true, },
631 	{ .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
632 	  .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
633 	  .has_alpha = true, },
634 	{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3,
635 	  .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
636 };
637 
638 static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],int num_formats,u32 format)639 lookup_format_info(const struct drm_format_info formats[],
640 		  int num_formats, u32 format)
641 {
642 	int i;
643 
644 	for (i = 0; i < num_formats; i++) {
645 		if (formats[i].format == format)
646 			return &formats[i];
647 	}
648 
649 	return NULL;
650 }
651 
652 const struct drm_format_info *
amdgpu_lookup_format_info(u32 format,uint64_t modifier)653 amdgpu_lookup_format_info(u32 format, uint64_t modifier)
654 {
655 	if (!IS_AMD_FMT_MOD(modifier))
656 		return NULL;
657 
658 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) < AMD_FMT_MOD_TILE_VER_GFX9 ||
659 	    AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX12)
660 		return NULL;
661 
662 	if (AMD_FMT_MOD_GET(DCC_RETILE, modifier))
663 		return lookup_format_info(dcc_retile_formats,
664 					  ARRAY_SIZE(dcc_retile_formats),
665 					  format);
666 
667 	if (AMD_FMT_MOD_GET(DCC, modifier))
668 		return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats),
669 					  format);
670 
671 	/* returning NULL will cause the default format structs to be used. */
672 	return NULL;
673 }
674 
675 
676 /*
677  * Tries to extract the renderable DCC offset from the opaque metadata attached
678  * to the buffer.
679  */
680 static int
extract_render_dcc_offset(struct amdgpu_device * adev,struct drm_gem_object * obj,uint64_t * offset)681 extract_render_dcc_offset(struct amdgpu_device *adev,
682 			  struct drm_gem_object *obj,
683 			  uint64_t *offset)
684 {
685 	struct amdgpu_bo *rbo;
686 	int r = 0;
687 	uint32_t metadata[10]; /* Something that fits a descriptor + header. */
688 	uint32_t size;
689 
690 	rbo = gem_to_amdgpu_bo(obj);
691 	r = amdgpu_bo_reserve(rbo, false);
692 
693 	if (unlikely(r)) {
694 		/* Don't show error message when returning -ERESTARTSYS */
695 		if (r != -ERESTARTSYS)
696 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
697 		return r;
698 	}
699 
700 	r = amdgpu_bo_get_metadata(rbo, metadata, sizeof(metadata), &size, NULL);
701 	amdgpu_bo_unreserve(rbo);
702 
703 	if (r)
704 		return r;
705 
706 	/*
707 	 * The first word is the metadata version, and we need space for at least
708 	 * the version + pci vendor+device id + 8 words for a descriptor.
709 	 */
710 	if (size < 40  || metadata[0] != 1)
711 		return -EINVAL;
712 
713 	if (adev->family >= AMDGPU_FAMILY_NV) {
714 		/* resource word 6/7 META_DATA_ADDRESS{_LO} */
715 		*offset = ((u64)metadata[9] << 16u) |
716 			  ((metadata[8] & 0xFF000000u) >> 16);
717 	} else {
718 		/* resource word 5/7 META_DATA_ADDRESS */
719 		*offset = ((u64)metadata[9] << 8u) |
720 			  ((u64)(metadata[7] & 0x1FE0000u) << 23);
721 	}
722 
723 	return 0;
724 }
725 
convert_tiling_flags_to_modifier_gfx12(struct amdgpu_framebuffer * afb)726 static int convert_tiling_flags_to_modifier_gfx12(struct amdgpu_framebuffer *afb)
727 {
728 	u64 modifier = 0;
729 	int swizzle_mode = AMDGPU_TILING_GET(afb->tiling_flags, GFX12_SWIZZLE_MODE);
730 
731 	if (!swizzle_mode) {
732 		modifier = DRM_FORMAT_MOD_LINEAR;
733 	} else {
734 		int max_comp_block =
735 			AMDGPU_TILING_GET(afb->tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
736 
737 		modifier =
738 			AMD_FMT_MOD |
739 			AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12) |
740 			AMD_FMT_MOD_SET(TILE, swizzle_mode) |
741 			AMD_FMT_MOD_SET(DCC, afb->gfx12_dcc) |
742 			AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_comp_block);
743 	}
744 
745 	afb->base.modifier = modifier;
746 	afb->base.flags |= DRM_MODE_FB_MODIFIERS;
747 	return 0;
748 }
749 
convert_tiling_flags_to_modifier(struct amdgpu_framebuffer * afb)750 static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
751 {
752 	struct amdgpu_device *adev = drm_to_adev(afb->base.dev);
753 	uint64_t modifier = 0;
754 	int num_pipes = 0;
755 	int num_pkrs = 0;
756 
757 	num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
758 	num_pipes = adev->gfx.config.gb_addr_config_fields.num_pipes;
759 
760 	if (!afb->tiling_flags || !AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) {
761 		modifier = DRM_FORMAT_MOD_LINEAR;
762 	} else {
763 		int swizzle = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE);
764 		bool has_xor = swizzle >= 16;
765 		int block_size_bits;
766 		int version;
767 		int pipe_xor_bits = 0;
768 		int bank_xor_bits = 0;
769 		int packers = 0;
770 		int rb = 0;
771 		int pipes = ilog2(num_pipes);
772 		uint32_t dcc_offset = AMDGPU_TILING_GET(afb->tiling_flags, DCC_OFFSET_256B);
773 
774 		switch (swizzle >> 2) {
775 		case 0: /* 256B */
776 			block_size_bits = 8;
777 			break;
778 		case 1: /* 4KiB */
779 		case 5: /* 4KiB _X */
780 			block_size_bits = 12;
781 			break;
782 		case 2: /* 64KiB */
783 		case 4: /* 64 KiB _T */
784 		case 6: /* 64 KiB _X */
785 			block_size_bits = 16;
786 			break;
787 		case 7: /* 256 KiB */
788 			block_size_bits = 18;
789 			break;
790 		default:
791 			/* RESERVED or VAR */
792 			return -EINVAL;
793 		}
794 
795 		if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0))
796 			version = AMD_FMT_MOD_TILE_VER_GFX11;
797 		else if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
798 			 IP_VERSION(10, 3, 0))
799 			version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
800 		else if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
801 			 IP_VERSION(10, 0, 0))
802 			version = AMD_FMT_MOD_TILE_VER_GFX10;
803 		else
804 			version = AMD_FMT_MOD_TILE_VER_GFX9;
805 
806 		switch (swizzle & 3) {
807 		case 0: /* Z microtiling */
808 			return -EINVAL;
809 		case 1: /* S microtiling */
810 			if (amdgpu_ip_version(adev, GC_HWIP, 0) <
811 			    IP_VERSION(11, 0, 0)) {
812 				if (!has_xor)
813 					version = AMD_FMT_MOD_TILE_VER_GFX9;
814 			}
815 			break;
816 		case 2:
817 			if (amdgpu_ip_version(adev, GC_HWIP, 0) <
818 			    IP_VERSION(11, 0, 0)) {
819 				if (!has_xor && afb->base.format->cpp[0] != 4)
820 					version = AMD_FMT_MOD_TILE_VER_GFX9;
821 			}
822 			break;
823 		case 3:
824 			break;
825 		}
826 
827 		if (has_xor) {
828 			if (num_pipes == num_pkrs && num_pkrs == 0) {
829 				DRM_ERROR("invalid number of pipes and packers\n");
830 				return -EINVAL;
831 			}
832 
833 			switch (version) {
834 			case AMD_FMT_MOD_TILE_VER_GFX11:
835 				pipe_xor_bits = min(block_size_bits - 8, pipes);
836 				packers = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
837 				break;
838 			case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
839 				pipe_xor_bits = min(block_size_bits - 8, pipes);
840 				packers = min(block_size_bits - 8 - pipe_xor_bits,
841 					      ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs));
842 				break;
843 			case AMD_FMT_MOD_TILE_VER_GFX10:
844 				pipe_xor_bits = min(block_size_bits - 8, pipes);
845 				break;
846 			case AMD_FMT_MOD_TILE_VER_GFX9:
847 				rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
848 				     ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
849 				pipe_xor_bits = min(block_size_bits - 8, pipes +
850 						    ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
851 				bank_xor_bits = min(block_size_bits - 8 - pipe_xor_bits,
852 						    ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
853 				break;
854 			}
855 		}
856 
857 		modifier = AMD_FMT_MOD |
858 			   AMD_FMT_MOD_SET(TILE, AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) |
859 			   AMD_FMT_MOD_SET(TILE_VERSION, version) |
860 			   AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
861 			   AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
862 			   AMD_FMT_MOD_SET(PACKERS, packers);
863 
864 		if (dcc_offset != 0) {
865 			bool dcc_i64b = AMDGPU_TILING_GET(afb->tiling_flags, DCC_INDEPENDENT_64B) != 0;
866 			bool dcc_i128b = version >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
867 			const struct drm_format_info *format_info;
868 			u64 render_dcc_offset;
869 
870 			/* Enable constant encode on RAVEN2 and later. */
871 			bool dcc_constant_encode =
872 				(adev->asic_type > CHIP_RAVEN ||
873 				 (adev->asic_type == CHIP_RAVEN &&
874 				  adev->external_rev_id >= 0x81)) &&
875 				amdgpu_ip_version(adev, GC_HWIP, 0) <
876 					IP_VERSION(11, 0, 0);
877 
878 			int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B :
879 					      dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B :
880 					      AMD_FMT_MOD_DCC_BLOCK_256B;
881 
882 			modifier |= AMD_FMT_MOD_SET(DCC, 1) |
883 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, dcc_constant_encode) |
884 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, dcc_i64b) |
885 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, dcc_i128b) |
886 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_cblock_size);
887 
888 			afb->base.offsets[1] = dcc_offset * 256 + afb->base.offsets[0];
889 			afb->base.pitches[1] =
890 				AMDGPU_TILING_GET(afb->tiling_flags, DCC_PITCH_MAX) + 1;
891 
892 			/*
893 			 * If the userspace driver uses retiling the tiling flags do not contain
894 			 * info on the renderable DCC buffer. Luckily the opaque metadata contains
895 			 * the info so we can try to extract it. The kernel does not use this info
896 			 * but we should convert it to a modifier plane for getfb2, so the
897 			 * userspace driver that gets it doesn't have to juggle around another DCC
898 			 * plane internally.
899 			 */
900 			if (extract_render_dcc_offset(adev, afb->base.obj[0],
901 						      &render_dcc_offset) == 0 &&
902 			    render_dcc_offset != 0 &&
903 			    render_dcc_offset != afb->base.offsets[1] &&
904 			    render_dcc_offset < UINT_MAX) {
905 				uint32_t dcc_block_bits;  /* of base surface data */
906 
907 				modifier |= AMD_FMT_MOD_SET(DCC_RETILE, 1);
908 				afb->base.offsets[2] = render_dcc_offset;
909 
910 				if (adev->family >= AMDGPU_FAMILY_NV) {
911 					int extra_pipe = 0;
912 
913 					if ((amdgpu_ip_version(adev, GC_HWIP,
914 							       0) >=
915 					     IP_VERSION(10, 3, 0)) &&
916 					    pipes == packers && pipes > 1)
917 						extra_pipe = 1;
918 
919 					dcc_block_bits = max(20, 16 + pipes + extra_pipe);
920 				} else {
921 					modifier |= AMD_FMT_MOD_SET(RB, rb) |
922 						    AMD_FMT_MOD_SET(PIPE, pipes);
923 					dcc_block_bits = max(20, 18 + rb);
924 				}
925 
926 				dcc_block_bits -= ilog2(afb->base.format->cpp[0]);
927 				afb->base.pitches[2] = ALIGN(afb->base.width,
928 							     1u << ((dcc_block_bits + 1) / 2));
929 			}
930 			format_info = amdgpu_lookup_format_info(afb->base.format->format,
931 								modifier);
932 			if (!format_info)
933 				return -EINVAL;
934 
935 			afb->base.format = format_info;
936 		}
937 	}
938 
939 	afb->base.modifier = modifier;
940 	afb->base.flags |= DRM_MODE_FB_MODIFIERS;
941 	return 0;
942 }
943 
944 /* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
check_tiling_flags_gfx6(struct amdgpu_framebuffer * afb)945 static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
946 {
947 	u64 micro_tile_mode;
948 
949 	if (AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) == 1) /* LINEAR_ALIGNED */
950 		return 0;
951 
952 	micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
953 	switch (micro_tile_mode) {
954 	case 0: /* DISPLAY */
955 	case 3: /* RENDER */
956 		return 0;
957 	default:
958 		drm_dbg_kms(afb->base.dev,
959 			    "Micro tile mode %llu not supported for scanout\n",
960 			    micro_tile_mode);
961 		return -EINVAL;
962 	}
963 }
964 
get_block_dimensions(unsigned int block_log2,unsigned int cpp,unsigned int * width,unsigned int * height)965 static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
966 				 unsigned int *width, unsigned int *height)
967 {
968 	unsigned int cpp_log2 = ilog2(cpp);
969 	unsigned int pixel_log2 = block_log2 - cpp_log2;
970 	unsigned int width_log2 = (pixel_log2 + 1) / 2;
971 	unsigned int height_log2 = pixel_log2 - width_log2;
972 
973 	*width = 1 << width_log2;
974 	*height = 1 << height_log2;
975 }
976 
get_dcc_block_size(uint64_t modifier,bool rb_aligned,bool pipe_aligned)977 static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
978 				       bool pipe_aligned)
979 {
980 	unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
981 
982 	switch (ver) {
983 	case AMD_FMT_MOD_TILE_VER_GFX9: {
984 		/*
985 		 * TODO: for pipe aligned we may need to check the alignment of the
986 		 * total size of the surface, which may need to be bigger than the
987 		 * natural alignment due to some HW workarounds
988 		 */
989 		return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
990 	}
991 	case AMD_FMT_MOD_TILE_VER_GFX10:
992 	case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
993 	case AMD_FMT_MOD_TILE_VER_GFX11: {
994 		int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
995 
996 		if (ver >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
997 		    AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
998 			++pipes_log2;
999 
1000 		return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
1001 	}
1002 	default:
1003 		return 0;
1004 	}
1005 }
1006 
amdgpu_display_verify_plane(struct amdgpu_framebuffer * rfb,int plane,const struct drm_format_info * format,unsigned int block_width,unsigned int block_height,unsigned int block_size_log2)1007 static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
1008 				       const struct drm_format_info *format,
1009 				       unsigned int block_width, unsigned int block_height,
1010 				       unsigned int block_size_log2)
1011 {
1012 	unsigned int width = rfb->base.width /
1013 		((plane && plane < format->num_planes) ? format->hsub : 1);
1014 	unsigned int height = rfb->base.height /
1015 		((plane && plane < format->num_planes) ? format->vsub : 1);
1016 	unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
1017 	unsigned int block_pitch = block_width * cpp;
1018 	unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
1019 	unsigned int block_size = 1 << block_size_log2;
1020 	uint64_t size;
1021 
1022 	if (rfb->base.pitches[plane] % block_pitch) {
1023 		drm_dbg_kms(rfb->base.dev,
1024 			    "pitch %d for plane %d is not a multiple of block pitch %d\n",
1025 			    rfb->base.pitches[plane], plane, block_pitch);
1026 		return -EINVAL;
1027 	}
1028 	if (rfb->base.pitches[plane] < min_pitch) {
1029 		drm_dbg_kms(rfb->base.dev,
1030 			    "pitch %d for plane %d is less than minimum pitch %d\n",
1031 			    rfb->base.pitches[plane], plane, min_pitch);
1032 		return -EINVAL;
1033 	}
1034 
1035 	/* Force at least natural alignment. */
1036 	if (rfb->base.offsets[plane] % block_size) {
1037 		drm_dbg_kms(rfb->base.dev,
1038 			    "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
1039 			    rfb->base.offsets[plane], plane, block_size);
1040 		return -EINVAL;
1041 	}
1042 
1043 	size = rfb->base.offsets[plane] +
1044 		(uint64_t)rfb->base.pitches[plane] / block_pitch *
1045 		block_size * DIV_ROUND_UP(height, block_height);
1046 
1047 	if (rfb->base.obj[0]->size < size) {
1048 		drm_dbg_kms(rfb->base.dev,
1049 			    "BO size 0x%zx is less than 0x%llx required for plane %d\n",
1050 			    rfb->base.obj[0]->size, size, plane);
1051 		return -EINVAL;
1052 	}
1053 
1054 	return 0;
1055 }
1056 
1057 
amdgpu_display_verify_sizes(struct amdgpu_framebuffer * rfb)1058 static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
1059 {
1060 	const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
1061 	uint64_t modifier = rfb->base.modifier;
1062 	int ret;
1063 	unsigned int i, block_width, block_height, block_size_log2;
1064 
1065 	if (rfb->base.dev->mode_config.fb_modifiers_not_supported)
1066 		return 0;
1067 
1068 	for (i = 0; i < format_info->num_planes; ++i) {
1069 		if (modifier == DRM_FORMAT_MOD_LINEAR) {
1070 			block_width = 256 / format_info->cpp[i];
1071 			block_height = 1;
1072 			block_size_log2 = 8;
1073 		} else if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX12) {
1074 			int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
1075 
1076 			switch (swizzle) {
1077 			case AMD_FMT_MOD_TILE_GFX12_256B_2D:
1078 				block_size_log2 = 8;
1079 				break;
1080 			case AMD_FMT_MOD_TILE_GFX12_4K_2D:
1081 				block_size_log2 = 12;
1082 				break;
1083 			case AMD_FMT_MOD_TILE_GFX12_64K_2D:
1084 				block_size_log2 = 16;
1085 				break;
1086 			case AMD_FMT_MOD_TILE_GFX12_256K_2D:
1087 				block_size_log2 = 18;
1088 				break;
1089 			default:
1090 				drm_dbg_kms(rfb->base.dev,
1091 					    "Gfx12 swizzle mode with unknown block size: %d\n", swizzle);
1092 				return -EINVAL;
1093 			}
1094 
1095 			get_block_dimensions(block_size_log2, format_info->cpp[i],
1096 					     &block_width, &block_height);
1097 		} else {
1098 			int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
1099 
1100 			switch ((swizzle & ~3) + 1) {
1101 			case DC_SW_256B_S:
1102 				block_size_log2 = 8;
1103 				break;
1104 			case DC_SW_4KB_S:
1105 			case DC_SW_4KB_S_X:
1106 				block_size_log2 = 12;
1107 				break;
1108 			case DC_SW_64KB_S:
1109 			case DC_SW_64KB_S_T:
1110 			case DC_SW_64KB_S_X:
1111 				block_size_log2 = 16;
1112 				break;
1113 			case DC_SW_VAR_S_X:
1114 				block_size_log2 = 18;
1115 				break;
1116 			default:
1117 				drm_dbg_kms(rfb->base.dev,
1118 					    "Swizzle mode with unknown block size: %d\n", swizzle);
1119 				return -EINVAL;
1120 			}
1121 
1122 			get_block_dimensions(block_size_log2, format_info->cpp[i],
1123 					     &block_width, &block_height);
1124 		}
1125 
1126 		ret = amdgpu_display_verify_plane(rfb, i, format_info,
1127 						  block_width, block_height, block_size_log2);
1128 		if (ret)
1129 			return ret;
1130 	}
1131 
1132 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11 &&
1133 	    AMD_FMT_MOD_GET(DCC, modifier)) {
1134 		if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
1135 			block_size_log2 = get_dcc_block_size(modifier, false, false);
1136 			get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
1137 					     &block_width, &block_height);
1138 			ret = amdgpu_display_verify_plane(rfb, i, format_info,
1139 							  block_width, block_height,
1140 							  block_size_log2);
1141 			if (ret)
1142 				return ret;
1143 
1144 			++i;
1145 			block_size_log2 = get_dcc_block_size(modifier, true, true);
1146 		} else {
1147 			bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
1148 
1149 			block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
1150 		}
1151 		get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
1152 				     &block_width, &block_height);
1153 		ret = amdgpu_display_verify_plane(rfb, i, format_info,
1154 						  block_width, block_height, block_size_log2);
1155 		if (ret)
1156 			return ret;
1157 	}
1158 
1159 	return 0;
1160 }
1161 
amdgpu_display_get_fb_info(const struct amdgpu_framebuffer * amdgpu_fb,uint64_t * tiling_flags,bool * tmz_surface,bool * gfx12_dcc)1162 static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1163 				      uint64_t *tiling_flags, bool *tmz_surface,
1164 				      bool *gfx12_dcc)
1165 {
1166 	struct amdgpu_bo *rbo;
1167 	int r;
1168 
1169 	if (!amdgpu_fb) {
1170 		*tiling_flags = 0;
1171 		*tmz_surface = false;
1172 		*gfx12_dcc = false;
1173 		return 0;
1174 	}
1175 
1176 	rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
1177 	r = amdgpu_bo_reserve(rbo, false);
1178 
1179 	if (unlikely(r)) {
1180 		/* Don't show error message when returning -ERESTARTSYS */
1181 		if (r != -ERESTARTSYS)
1182 			DRM_ERROR("Unable to reserve buffer: %d\n", r);
1183 		return r;
1184 	}
1185 
1186 	amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1187 	*tmz_surface = amdgpu_bo_encrypted(rbo);
1188 	*gfx12_dcc = rbo->flags & AMDGPU_GEM_CREATE_GFX12_DCC;
1189 
1190 	amdgpu_bo_unreserve(rbo);
1191 
1192 	return r;
1193 }
1194 
amdgpu_display_gem_fb_verify_and_init(struct drm_device * dev,struct amdgpu_framebuffer * rfb,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd,struct drm_gem_object * obj)1195 static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
1196 						 struct amdgpu_framebuffer *rfb,
1197 						 struct drm_file *file_priv,
1198 						 const struct drm_mode_fb_cmd2 *mode_cmd,
1199 						 struct drm_gem_object *obj)
1200 {
1201 	int ret;
1202 
1203 	rfb->base.obj[0] = obj;
1204 	drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
1205 	/* Verify that the modifier is supported. */
1206 	if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
1207 				      mode_cmd->modifier[0])) {
1208 		drm_dbg_kms(dev,
1209 			    "unsupported pixel format %p4cc / modifier 0x%llx\n",
1210 			    &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1211 
1212 		ret = -EINVAL;
1213 		goto err;
1214 	}
1215 
1216 	ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
1217 	if (ret)
1218 		goto err;
1219 
1220 	if (drm_drv_uses_atomic_modeset(dev))
1221 		ret = drm_framebuffer_init(dev, &rfb->base,
1222 					   &amdgpu_fb_funcs_atomic);
1223 	else
1224 		ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
1225 
1226 	if (ret)
1227 		goto err;
1228 
1229 	return 0;
1230 err:
1231 	drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
1232 	rfb->base.obj[0] = NULL;
1233 	return ret;
1234 }
1235 
amdgpu_display_framebuffer_init(struct drm_device * dev,struct amdgpu_framebuffer * rfb,const struct drm_mode_fb_cmd2 * mode_cmd,struct drm_gem_object * obj)1236 static int amdgpu_display_framebuffer_init(struct drm_device *dev,
1237 					   struct amdgpu_framebuffer *rfb,
1238 					   const struct drm_mode_fb_cmd2 *mode_cmd,
1239 					   struct drm_gem_object *obj)
1240 {
1241 	struct amdgpu_device *adev = drm_to_adev(dev);
1242 	int ret, i;
1243 
1244 	/*
1245 	 * This needs to happen before modifier conversion as that might change
1246 	 * the number of planes.
1247 	 */
1248 	for (i = 1; i < rfb->base.format->num_planes; ++i) {
1249 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
1250 			drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n",
1251 				    i, mode_cmd->handles[0], mode_cmd->handles[i]);
1252 			ret = -EINVAL;
1253 			return ret;
1254 		}
1255 	}
1256 
1257 	ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface,
1258 					 &rfb->gfx12_dcc);
1259 	if (ret)
1260 		return ret;
1261 
1262 	if (dev->mode_config.fb_modifiers_not_supported && !adev->enable_virtual_display) {
1263 		drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
1264 			      "GFX9+ requires FB check based on format modifier\n");
1265 		ret = check_tiling_flags_gfx6(rfb);
1266 		if (ret)
1267 			return ret;
1268 	}
1269 
1270 	if (!dev->mode_config.fb_modifiers_not_supported &&
1271 	    !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
1272 		if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0))
1273 			ret = convert_tiling_flags_to_modifier_gfx12(rfb);
1274 		else
1275 			ret = convert_tiling_flags_to_modifier(rfb);
1276 
1277 		if (ret) {
1278 			drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier",
1279 				    rfb->tiling_flags);
1280 			return ret;
1281 		}
1282 	}
1283 
1284 	ret = amdgpu_display_verify_sizes(rfb);
1285 	if (ret)
1286 		return ret;
1287 
1288 	for (i = 0; i < rfb->base.format->num_planes; ++i) {
1289 		drm_gem_object_get(rfb->base.obj[0]);
1290 		rfb->base.obj[i] = rfb->base.obj[0];
1291 	}
1292 
1293 	return 0;
1294 }
1295 
1296 struct drm_framebuffer *
amdgpu_display_user_framebuffer_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd)1297 amdgpu_display_user_framebuffer_create(struct drm_device *dev,
1298 				       struct drm_file *file_priv,
1299 				       const struct drm_mode_fb_cmd2 *mode_cmd)
1300 {
1301 	struct amdgpu_framebuffer *amdgpu_fb;
1302 	struct drm_gem_object *obj;
1303 	struct amdgpu_bo *bo;
1304 	uint32_t domains;
1305 	int ret;
1306 
1307 	obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
1308 	if (obj ==  NULL) {
1309 		drm_dbg_kms(dev,
1310 			    "No GEM object associated to handle 0x%08X, can't create framebuffer\n",
1311 			    mode_cmd->handles[0]);
1312 
1313 		return ERR_PTR(-ENOENT);
1314 	}
1315 
1316 	/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
1317 	bo = gem_to_amdgpu_bo(obj);
1318 	domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
1319 	if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
1320 		drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
1321 		drm_gem_object_put(obj);
1322 		return ERR_PTR(-EINVAL);
1323 	}
1324 
1325 	amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
1326 	if (amdgpu_fb == NULL) {
1327 		drm_gem_object_put(obj);
1328 		return ERR_PTR(-ENOMEM);
1329 	}
1330 
1331 	ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
1332 						    mode_cmd, obj);
1333 	if (ret) {
1334 		kfree(amdgpu_fb);
1335 		drm_gem_object_put(obj);
1336 		return ERR_PTR(ret);
1337 	}
1338 
1339 	drm_gem_object_put(obj);
1340 	return &amdgpu_fb->base;
1341 }
1342 
1343 const struct drm_mode_config_funcs amdgpu_mode_funcs = {
1344 	.fb_create = amdgpu_display_user_framebuffer_create,
1345 };
1346 
1347 static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = {
1348 	{ UNDERSCAN_OFF, "off" },
1349 	{ UNDERSCAN_ON, "on" },
1350 	{ UNDERSCAN_AUTO, "auto" },
1351 };
1352 
1353 static const struct drm_prop_enum_list amdgpu_audio_enum_list[] = {
1354 	{ AMDGPU_AUDIO_DISABLE, "off" },
1355 	{ AMDGPU_AUDIO_ENABLE, "on" },
1356 	{ AMDGPU_AUDIO_AUTO, "auto" },
1357 };
1358 
1359 /* XXX support different dither options? spatial, temporal, both, etc. */
1360 static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = {
1361 	{ AMDGPU_FMT_DITHER_DISABLE, "off" },
1362 	{ AMDGPU_FMT_DITHER_ENABLE, "on" },
1363 };
1364 
amdgpu_display_modeset_create_props(struct amdgpu_device * adev)1365 int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
1366 {
1367 	int sz;
1368 
1369 	adev->mode_info.coherent_mode_property =
1370 		drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
1371 	if (!adev->mode_info.coherent_mode_property)
1372 		return -ENOMEM;
1373 
1374 	adev->mode_info.load_detect_property =
1375 		drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
1376 	if (!adev->mode_info.load_detect_property)
1377 		return -ENOMEM;
1378 
1379 	drm_mode_create_scaling_mode_property(adev_to_drm(adev));
1380 
1381 	sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
1382 	adev->mode_info.underscan_property =
1383 		drm_property_create_enum(adev_to_drm(adev), 0,
1384 					 "underscan",
1385 					 amdgpu_underscan_enum_list, sz);
1386 
1387 	adev->mode_info.underscan_hborder_property =
1388 		drm_property_create_range(adev_to_drm(adev), 0,
1389 					  "underscan hborder", 0, 128);
1390 	if (!adev->mode_info.underscan_hborder_property)
1391 		return -ENOMEM;
1392 
1393 	adev->mode_info.underscan_vborder_property =
1394 		drm_property_create_range(adev_to_drm(adev), 0,
1395 					  "underscan vborder", 0, 128);
1396 	if (!adev->mode_info.underscan_vborder_property)
1397 		return -ENOMEM;
1398 
1399 	sz = ARRAY_SIZE(amdgpu_audio_enum_list);
1400 	adev->mode_info.audio_property =
1401 		drm_property_create_enum(adev_to_drm(adev), 0,
1402 					 "audio",
1403 					 amdgpu_audio_enum_list, sz);
1404 
1405 	sz = ARRAY_SIZE(amdgpu_dither_enum_list);
1406 	adev->mode_info.dither_property =
1407 		drm_property_create_enum(adev_to_drm(adev), 0,
1408 					 "dither",
1409 					 amdgpu_dither_enum_list, sz);
1410 
1411 	return 0;
1412 }
1413 
amdgpu_display_update_priority(struct amdgpu_device * adev)1414 void amdgpu_display_update_priority(struct amdgpu_device *adev)
1415 {
1416 	/* adjustment options for the display watermarks */
1417 	if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
1418 		adev->mode_info.disp_priority = 0;
1419 	else
1420 		adev->mode_info.disp_priority = amdgpu_disp_priority;
1421 
1422 }
1423 
amdgpu_display_is_hdtv_mode(const struct drm_display_mode * mode)1424 static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
1425 {
1426 	/* try and guess if this is a tv or a monitor */
1427 	if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
1428 	    (mode->vdisplay == 576) || /* 576p */
1429 	    (mode->vdisplay == 720) || /* 720p */
1430 	    (mode->vdisplay == 1080)) /* 1080p */
1431 		return true;
1432 	else
1433 		return false;
1434 }
1435 
amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)1436 bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1437 					const struct drm_display_mode *mode,
1438 					struct drm_display_mode *adjusted_mode)
1439 {
1440 	struct drm_device *dev = crtc->dev;
1441 	struct drm_encoder *encoder;
1442 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1443 	struct amdgpu_encoder *amdgpu_encoder;
1444 	struct drm_connector *connector;
1445 	u32 src_v = 1, dst_v = 1;
1446 	u32 src_h = 1, dst_h = 1;
1447 
1448 	amdgpu_crtc->h_border = 0;
1449 	amdgpu_crtc->v_border = 0;
1450 
1451 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1452 		if (encoder->crtc != crtc)
1453 			continue;
1454 		amdgpu_encoder = to_amdgpu_encoder(encoder);
1455 		connector = amdgpu_get_connector_for_encoder(encoder);
1456 
1457 		/* set scaling */
1458 		if (amdgpu_encoder->rmx_type == RMX_OFF)
1459 			amdgpu_crtc->rmx_type = RMX_OFF;
1460 		else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
1461 			 mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
1462 			amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
1463 		else
1464 			amdgpu_crtc->rmx_type = RMX_OFF;
1465 		/* copy native mode */
1466 		memcpy(&amdgpu_crtc->native_mode,
1467 		       &amdgpu_encoder->native_mode,
1468 		       sizeof(struct drm_display_mode));
1469 		src_v = crtc->mode.vdisplay;
1470 		dst_v = amdgpu_crtc->native_mode.vdisplay;
1471 		src_h = crtc->mode.hdisplay;
1472 		dst_h = amdgpu_crtc->native_mode.hdisplay;
1473 
1474 		/* fix up for overscan on hdmi */
1475 		if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1476 		    ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
1477 		     ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
1478 		      connector && connector->display_info.is_hdmi &&
1479 		      amdgpu_display_is_hdtv_mode(mode)))) {
1480 			if (amdgpu_encoder->underscan_hborder != 0)
1481 				amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
1482 			else
1483 				amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
1484 			if (amdgpu_encoder->underscan_vborder != 0)
1485 				amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
1486 			else
1487 				amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
1488 			amdgpu_crtc->rmx_type = RMX_FULL;
1489 			src_v = crtc->mode.vdisplay;
1490 			dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
1491 			src_h = crtc->mode.hdisplay;
1492 			dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
1493 		}
1494 	}
1495 	if (amdgpu_crtc->rmx_type != RMX_OFF) {
1496 		fixed20_12 a, b;
1497 
1498 		a.full = dfixed_const(src_v);
1499 		b.full = dfixed_const(dst_v);
1500 		amdgpu_crtc->vsc.full = dfixed_div(a, b);
1501 		a.full = dfixed_const(src_h);
1502 		b.full = dfixed_const(dst_h);
1503 		amdgpu_crtc->hsc.full = dfixed_div(a, b);
1504 	} else {
1505 		amdgpu_crtc->vsc.full = dfixed_const(1);
1506 		amdgpu_crtc->hsc.full = dfixed_const(1);
1507 	}
1508 	return true;
1509 }
1510 
1511 /*
1512  * Retrieve current video scanout position of crtc on a given gpu, and
1513  * an optional accurate timestamp of when query happened.
1514  *
1515  * \param dev Device to query.
1516  * \param pipe Crtc to query.
1517  * \param flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
1518  *              For driver internal use only also supports these flags:
1519  *
1520  *              USE_REAL_VBLANKSTART to use the real start of vblank instead
1521  *              of a fudged earlier start of vblank.
1522  *
1523  *              GET_DISTANCE_TO_VBLANKSTART to return distance to the
1524  *              fudged earlier start of vblank in *vpos and the distance
1525  *              to true start of vblank in *hpos.
1526  *
1527  * \param *vpos Location where vertical scanout position should be stored.
1528  * \param *hpos Location where horizontal scanout position should go.
1529  * \param *stime Target location for timestamp taken immediately before
1530  *               scanout position query. Can be NULL to skip timestamp.
1531  * \param *etime Target location for timestamp taken immediately after
1532  *               scanout position query. Can be NULL to skip timestamp.
1533  *
1534  * Returns vpos as a positive number while in active scanout area.
1535  * Returns vpos as a negative number inside vblank, counting the number
1536  * of scanlines to go until end of vblank, e.g., -1 means "one scanline
1537  * until start of active scanout / end of vblank."
1538  *
1539  * \return Flags, or'ed together as follows:
1540  *
1541  * DRM_SCANOUTPOS_VALID = Query successful.
1542  * DRM_SCANOUTPOS_INVBL = Inside vblank.
1543  * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
1544  * this flag means that returned position may be offset by a constant but
1545  * unknown small number of scanlines wrt. real scanout position.
1546  *
1547  */
amdgpu_display_get_crtc_scanoutpos(struct drm_device * dev,unsigned int pipe,unsigned int flags,int * vpos,int * hpos,ktime_t * stime,ktime_t * etime,const struct drm_display_mode * mode)1548 int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
1549 			unsigned int pipe, unsigned int flags, int *vpos,
1550 			int *hpos, ktime_t *stime, ktime_t *etime,
1551 			const struct drm_display_mode *mode)
1552 {
1553 	u32 vbl = 0, position = 0;
1554 	int vbl_start, vbl_end, vtotal, ret = 0;
1555 	bool in_vbl = true;
1556 
1557 	struct amdgpu_device *adev = drm_to_adev(dev);
1558 
1559 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1560 
1561 	/* Get optional system timestamp before query. */
1562 	if (stime)
1563 		*stime = ktime_get();
1564 
1565 	if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
1566 		ret |= DRM_SCANOUTPOS_VALID;
1567 
1568 	/* Get optional system timestamp after query. */
1569 	if (etime)
1570 		*etime = ktime_get();
1571 
1572 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1573 
1574 	/* Decode into vertical and horizontal scanout position. */
1575 	*vpos = position & 0x1fff;
1576 	*hpos = (position >> 16) & 0x1fff;
1577 
1578 	/* Valid vblank area boundaries from gpu retrieved? */
1579 	if (vbl > 0) {
1580 		/* Yes: Decode. */
1581 		ret |= DRM_SCANOUTPOS_ACCURATE;
1582 		vbl_start = vbl & 0x1fff;
1583 		vbl_end = (vbl >> 16) & 0x1fff;
1584 	} else {
1585 		/* No: Fake something reasonable which gives at least ok results. */
1586 		vbl_start = mode->crtc_vdisplay;
1587 		vbl_end = 0;
1588 	}
1589 
1590 	/* Called from driver internal vblank counter query code? */
1591 	if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1592 		/* Caller wants distance from real vbl_start in *hpos */
1593 		*hpos = *vpos - vbl_start;
1594 	}
1595 
1596 	/* Fudge vblank to start a few scanlines earlier to handle the
1597 	 * problem that vblank irqs fire a few scanlines before start
1598 	 * of vblank. Some driver internal callers need the true vblank
1599 	 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
1600 	 *
1601 	 * The cause of the "early" vblank irq is that the irq is triggered
1602 	 * by the line buffer logic when the line buffer read position enters
1603 	 * the vblank, whereas our crtc scanout position naturally lags the
1604 	 * line buffer read position.
1605 	 */
1606 	if (!(flags & USE_REAL_VBLANKSTART))
1607 		vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
1608 
1609 	/* Test scanout position against vblank region. */
1610 	if ((*vpos < vbl_start) && (*vpos >= vbl_end))
1611 		in_vbl = false;
1612 
1613 	/* In vblank? */
1614 	if (in_vbl)
1615 		ret |= DRM_SCANOUTPOS_IN_VBLANK;
1616 
1617 	/* Called from driver internal vblank counter query code? */
1618 	if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1619 		/* Caller wants distance from fudged earlier vbl_start */
1620 		*vpos -= vbl_start;
1621 		return ret;
1622 	}
1623 
1624 	/* Check if inside vblank area and apply corrective offsets:
1625 	 * vpos will then be >=0 in video scanout area, but negative
1626 	 * within vblank area, counting down the number of lines until
1627 	 * start of scanout.
1628 	 */
1629 
1630 	/* Inside "upper part" of vblank area? Apply corrective offset if so: */
1631 	if (in_vbl && (*vpos >= vbl_start)) {
1632 		vtotal = mode->crtc_vtotal;
1633 
1634 		/* With variable refresh rate displays the vpos can exceed
1635 		 * the vtotal value. Clamp to 0 to return -vbl_end instead
1636 		 * of guessing the remaining number of lines until scanout.
1637 		 */
1638 		*vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
1639 	}
1640 
1641 	/* Correct for shifted end of vbl at vbl_end. */
1642 	*vpos = *vpos - vbl_end;
1643 
1644 	return ret;
1645 }
1646 
amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device * adev,int crtc)1647 int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
1648 {
1649 	if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
1650 		return AMDGPU_CRTC_IRQ_NONE;
1651 
1652 	switch (crtc) {
1653 	case 0:
1654 		return AMDGPU_CRTC_IRQ_VBLANK1;
1655 	case 1:
1656 		return AMDGPU_CRTC_IRQ_VBLANK2;
1657 	case 2:
1658 		return AMDGPU_CRTC_IRQ_VBLANK3;
1659 	case 3:
1660 		return AMDGPU_CRTC_IRQ_VBLANK4;
1661 	case 4:
1662 		return AMDGPU_CRTC_IRQ_VBLANK5;
1663 	case 5:
1664 		return AMDGPU_CRTC_IRQ_VBLANK6;
1665 	default:
1666 		return AMDGPU_CRTC_IRQ_NONE;
1667 	}
1668 }
1669 
amdgpu_crtc_get_scanout_position(struct drm_crtc * crtc,bool in_vblank_irq,int * vpos,int * hpos,ktime_t * stime,ktime_t * etime,const struct drm_display_mode * mode)1670 bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
1671 			bool in_vblank_irq, int *vpos,
1672 			int *hpos, ktime_t *stime, ktime_t *etime,
1673 			const struct drm_display_mode *mode)
1674 {
1675 	struct drm_device *dev = crtc->dev;
1676 	unsigned int pipe = crtc->index;
1677 
1678 	return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
1679 						  stime, etime, mode);
1680 }
1681 
1682 static bool
amdgpu_display_robj_is_fb(struct amdgpu_device * adev,struct amdgpu_bo * robj)1683 amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
1684 {
1685 	struct drm_device *dev = adev_to_drm(adev);
1686 	struct drm_fb_helper *fb_helper = dev->fb_helper;
1687 
1688 	if (!fb_helper || !fb_helper->buffer)
1689 		return false;
1690 
1691 	if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj)
1692 		return false;
1693 
1694 	return true;
1695 }
1696 
amdgpu_display_suspend_helper(struct amdgpu_device * adev)1697 int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
1698 {
1699 	struct drm_device *dev = adev_to_drm(adev);
1700 	struct drm_crtc *crtc;
1701 	struct drm_connector *connector;
1702 	struct drm_connector_list_iter iter;
1703 	int r;
1704 
1705 	drm_kms_helper_poll_disable(dev);
1706 
1707 	/* turn off display hw */
1708 	drm_modeset_lock_all(dev);
1709 	drm_connector_list_iter_begin(dev, &iter);
1710 	drm_for_each_connector_iter(connector, &iter)
1711 		drm_helper_connector_dpms(connector,
1712 					  DRM_MODE_DPMS_OFF);
1713 	drm_connector_list_iter_end(&iter);
1714 	drm_modeset_unlock_all(dev);
1715 	/* unpin the front buffers and cursors */
1716 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1717 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1718 		struct drm_framebuffer *fb = crtc->primary->fb;
1719 		struct amdgpu_bo *robj;
1720 
1721 		if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1722 			struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1723 
1724 			r = amdgpu_bo_reserve(aobj, true);
1725 			if (r == 0) {
1726 				amdgpu_bo_unpin(aobj);
1727 				amdgpu_bo_unreserve(aobj);
1728 			}
1729 		}
1730 
1731 		if (!fb || !fb->obj[0])
1732 			continue;
1733 
1734 		robj = gem_to_amdgpu_bo(fb->obj[0]);
1735 		if (!amdgpu_display_robj_is_fb(adev, robj)) {
1736 			r = amdgpu_bo_reserve(robj, true);
1737 			if (r == 0) {
1738 				amdgpu_bo_unpin(robj);
1739 				amdgpu_bo_unreserve(robj);
1740 			}
1741 		}
1742 	}
1743 	return 0;
1744 }
1745 
amdgpu_display_resume_helper(struct amdgpu_device * adev)1746 int amdgpu_display_resume_helper(struct amdgpu_device *adev)
1747 {
1748 	struct drm_device *dev = adev_to_drm(adev);
1749 	struct drm_connector *connector;
1750 	struct drm_connector_list_iter iter;
1751 	struct drm_crtc *crtc;
1752 	int r;
1753 
1754 	/* pin cursors */
1755 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1756 		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1757 
1758 		if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1759 			struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1760 
1761 			r = amdgpu_bo_reserve(aobj, true);
1762 			if (r == 0) {
1763 				aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1764 				r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
1765 				if (r != 0)
1766 					dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
1767 				amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
1768 				amdgpu_bo_unreserve(aobj);
1769 			}
1770 		}
1771 	}
1772 
1773 	drm_helper_resume_force_mode(dev);
1774 
1775 	/* turn on display hw */
1776 	drm_modeset_lock_all(dev);
1777 
1778 	drm_connector_list_iter_begin(dev, &iter);
1779 	drm_for_each_connector_iter(connector, &iter)
1780 		drm_helper_connector_dpms(connector,
1781 					  DRM_MODE_DPMS_ON);
1782 	drm_connector_list_iter_end(&iter);
1783 
1784 	drm_modeset_unlock_all(dev);
1785 
1786 	drm_kms_helper_poll_enable(dev);
1787 
1788 	return 0;
1789 }
1790 
1791