1 /* 2 * Copyright (C) 2015 Broadcom 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 /** 10 * DOC: VC4 KMS 11 * 12 * This is the general code for implementing KMS mode setting that 13 * doesn't clearly associate with any of the other objects (plane, 14 * crtc, HDMI encoder). 15 */ 16 17 #include <drm/drm_crtc.h> 18 #include <drm/drm_atomic.h> 19 #include <drm/drm_atomic_helper.h> 20 #include <drm/drm_crtc_helper.h> 21 #include <drm/drm_plane_helper.h> 22 #include <drm/drm_fb_helper.h> 23 #include <drm/drm_fb_cma_helper.h> 24 #include <drm/drm_gem_framebuffer_helper.h> 25 #include "vc4_drv.h" 26 #include "vc4_regs.h" 27 28 struct vc4_ctm_state { 29 struct drm_private_state base; 30 struct drm_color_ctm *ctm; 31 int fifo; 32 }; 33 34 static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv) 35 { 36 return container_of(priv, struct vc4_ctm_state, base); 37 } 38 39 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state, 40 struct drm_private_obj *manager) 41 { 42 struct drm_device *dev = state->dev; 43 struct vc4_dev *vc4 = dev->dev_private; 44 struct drm_private_state *priv_state; 45 int ret; 46 47 ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx); 48 if (ret) 49 return ERR_PTR(ret); 50 51 priv_state = drm_atomic_get_private_obj_state(state, manager); 52 if (IS_ERR(priv_state)) 53 return ERR_CAST(priv_state); 54 55 return to_vc4_ctm_state(priv_state); 56 } 57 58 static struct drm_private_state * 59 vc4_ctm_duplicate_state(struct drm_private_obj *obj) 60 { 61 struct vc4_ctm_state *state; 62 63 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 64 if (!state) 65 return NULL; 66 67 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 68 69 return &state->base; 70 } 71 72 static void vc4_ctm_destroy_state(struct drm_private_obj *obj, 73 struct drm_private_state *state) 74 { 75 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state); 76 77 kfree(ctm_state); 78 } 79 80 static const struct drm_private_state_funcs vc4_ctm_state_funcs = { 81 .atomic_duplicate_state = vc4_ctm_duplicate_state, 82 .atomic_destroy_state = vc4_ctm_destroy_state, 83 }; 84 85 /* Converts a DRM S31.32 value to the HW S0.9 format. */ 86 static u16 vc4_ctm_s31_32_to_s0_9(u64 in) 87 { 88 u16 r; 89 90 /* Sign bit. */ 91 r = in & BIT_ULL(63) ? BIT(9) : 0; 92 93 if ((in & GENMASK_ULL(62, 32)) > 0) { 94 /* We have zero integer bits so we can only saturate here. */ 95 r |= GENMASK(8, 0); 96 } else { 97 /* Otherwise take the 9 most important fractional bits. */ 98 r |= (in >> 23) & GENMASK(8, 0); 99 } 100 101 return r; 102 } 103 104 static void 105 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) 106 { 107 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state); 108 struct drm_color_ctm *ctm = ctm_state->ctm; 109 110 if (ctm_state->fifo) { 111 HVS_WRITE(SCALER_OLEDCOEF2, 112 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]), 113 SCALER_OLEDCOEF2_R_TO_R) | 114 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]), 115 SCALER_OLEDCOEF2_R_TO_G) | 116 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]), 117 SCALER_OLEDCOEF2_R_TO_B)); 118 HVS_WRITE(SCALER_OLEDCOEF1, 119 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]), 120 SCALER_OLEDCOEF1_G_TO_R) | 121 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]), 122 SCALER_OLEDCOEF1_G_TO_G) | 123 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]), 124 SCALER_OLEDCOEF1_G_TO_B)); 125 HVS_WRITE(SCALER_OLEDCOEF0, 126 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]), 127 SCALER_OLEDCOEF0_B_TO_R) | 128 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]), 129 SCALER_OLEDCOEF0_B_TO_G) | 130 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]), 131 SCALER_OLEDCOEF0_B_TO_B)); 132 } 133 134 HVS_WRITE(SCALER_OLEDOFFS, 135 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO)); 136 } 137 138 static void 139 vc4_atomic_complete_commit(struct drm_atomic_state *state) 140 { 141 struct drm_device *dev = state->dev; 142 struct vc4_dev *vc4 = to_vc4_dev(dev); 143 144 drm_atomic_helper_wait_for_fences(dev, state, false); 145 146 drm_atomic_helper_wait_for_dependencies(state); 147 148 drm_atomic_helper_commit_modeset_disables(dev, state); 149 150 vc4_ctm_commit(vc4, state); 151 152 drm_atomic_helper_commit_planes(dev, state, 0); 153 154 drm_atomic_helper_commit_modeset_enables(dev, state); 155 156 /* Make sure that drm_atomic_helper_wait_for_vblanks() 157 * actually waits for vblank. If we're doing a full atomic 158 * modeset (as opposed to a vc4_update_plane() short circuit), 159 * then we need to wait for scanout to be done with our 160 * display lists before we free it and potentially reallocate 161 * and overwrite the dlist memory with a new modeset. 162 */ 163 state->legacy_cursor_update = false; 164 165 drm_atomic_helper_commit_hw_done(state); 166 167 drm_atomic_helper_wait_for_vblanks(dev, state); 168 169 drm_atomic_helper_cleanup_planes(dev, state); 170 171 drm_atomic_helper_commit_cleanup_done(state); 172 173 drm_atomic_state_put(state); 174 175 up(&vc4->async_modeset); 176 } 177 178 static void commit_work(struct work_struct *work) 179 { 180 struct drm_atomic_state *state = container_of(work, 181 struct drm_atomic_state, 182 commit_work); 183 vc4_atomic_complete_commit(state); 184 } 185 186 /** 187 * vc4_atomic_commit - commit validated state object 188 * @dev: DRM device 189 * @state: the driver state object 190 * @nonblock: nonblocking commit 191 * 192 * This function commits a with drm_atomic_helper_check() pre-validated state 193 * object. This can still fail when e.g. the framebuffer reservation fails. For 194 * now this doesn't implement asynchronous commits. 195 * 196 * RETURNS 197 * Zero for success or -errno. 198 */ 199 static int vc4_atomic_commit(struct drm_device *dev, 200 struct drm_atomic_state *state, 201 bool nonblock) 202 { 203 struct vc4_dev *vc4 = to_vc4_dev(dev); 204 int ret; 205 206 if (state->async_update) { 207 ret = down_interruptible(&vc4->async_modeset); 208 if (ret) 209 return ret; 210 211 ret = drm_atomic_helper_prepare_planes(dev, state); 212 if (ret) { 213 up(&vc4->async_modeset); 214 return ret; 215 } 216 217 drm_atomic_helper_async_commit(dev, state); 218 219 drm_atomic_helper_cleanup_planes(dev, state); 220 221 up(&vc4->async_modeset); 222 223 return 0; 224 } 225 226 ret = drm_atomic_helper_setup_commit(state, nonblock); 227 if (ret) 228 return ret; 229 230 INIT_WORK(&state->commit_work, commit_work); 231 232 ret = down_interruptible(&vc4->async_modeset); 233 if (ret) 234 return ret; 235 236 ret = drm_atomic_helper_prepare_planes(dev, state); 237 if (ret) { 238 up(&vc4->async_modeset); 239 return ret; 240 } 241 242 if (!nonblock) { 243 ret = drm_atomic_helper_wait_for_fences(dev, state, true); 244 if (ret) { 245 drm_atomic_helper_cleanup_planes(dev, state); 246 up(&vc4->async_modeset); 247 return ret; 248 } 249 } 250 251 /* 252 * This is the point of no return - everything below never fails except 253 * when the hw goes bonghits. Which means we can commit the new state on 254 * the software side now. 255 */ 256 257 BUG_ON(drm_atomic_helper_swap_state(state, false) < 0); 258 259 /* 260 * Everything below can be run asynchronously without the need to grab 261 * any modeset locks at all under one condition: It must be guaranteed 262 * that the asynchronous work has either been cancelled (if the driver 263 * supports it, which at least requires that the framebuffers get 264 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed 265 * before the new state gets committed on the software side with 266 * drm_atomic_helper_swap_state(). 267 * 268 * This scheme allows new atomic state updates to be prepared and 269 * checked in parallel to the asynchronous completion of the previous 270 * update. Which is important since compositors need to figure out the 271 * composition of the next frame right after having submitted the 272 * current layout. 273 */ 274 275 drm_atomic_state_get(state); 276 if (nonblock) 277 queue_work(system_unbound_wq, &state->commit_work); 278 else 279 vc4_atomic_complete_commit(state); 280 281 return 0; 282 } 283 284 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, 285 struct drm_file *file_priv, 286 const struct drm_mode_fb_cmd2 *mode_cmd) 287 { 288 struct drm_mode_fb_cmd2 mode_cmd_local; 289 290 /* If the user didn't specify a modifier, use the 291 * vc4_set_tiling_ioctl() state for the BO. 292 */ 293 if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) { 294 struct drm_gem_object *gem_obj; 295 struct vc4_bo *bo; 296 297 gem_obj = drm_gem_object_lookup(file_priv, 298 mode_cmd->handles[0]); 299 if (!gem_obj) { 300 DRM_DEBUG("Failed to look up GEM BO %d\n", 301 mode_cmd->handles[0]); 302 return ERR_PTR(-ENOENT); 303 } 304 bo = to_vc4_bo(gem_obj); 305 306 mode_cmd_local = *mode_cmd; 307 308 if (bo->t_format) { 309 mode_cmd_local.modifier[0] = 310 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED; 311 } else { 312 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE; 313 } 314 315 drm_gem_object_put_unlocked(gem_obj); 316 317 mode_cmd = &mode_cmd_local; 318 } 319 320 return drm_gem_fb_create(dev, file_priv, mode_cmd); 321 } 322 323 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC 324 * at a time and the HW only supports S0.9 scalars. To account for the latter, 325 * we don't allow userland to set a CTM that we have no hope of approximating. 326 */ 327 static int 328 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 329 { 330 struct vc4_dev *vc4 = to_vc4_dev(dev); 331 struct vc4_ctm_state *ctm_state = NULL; 332 struct drm_crtc *crtc; 333 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 334 struct drm_color_ctm *ctm; 335 int i; 336 337 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 338 /* CTM is being disabled. */ 339 if (!new_crtc_state->ctm && old_crtc_state->ctm) { 340 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager); 341 if (IS_ERR(ctm_state)) 342 return PTR_ERR(ctm_state); 343 ctm_state->fifo = 0; 344 } 345 } 346 347 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 348 if (new_crtc_state->ctm == old_crtc_state->ctm) 349 continue; 350 351 if (!ctm_state) { 352 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager); 353 if (IS_ERR(ctm_state)) 354 return PTR_ERR(ctm_state); 355 } 356 357 /* CTM is being enabled or the matrix changed. */ 358 if (new_crtc_state->ctm) { 359 /* fifo is 1-based since 0 disables CTM. */ 360 int fifo = to_vc4_crtc(crtc)->channel + 1; 361 362 /* Check userland isn't trying to turn on CTM for more 363 * than one CRTC at a time. 364 */ 365 if (ctm_state->fifo && ctm_state->fifo != fifo) { 366 DRM_DEBUG_DRIVER("Too many CTM configured\n"); 367 return -EINVAL; 368 } 369 370 /* Check we can approximate the specified CTM. 371 * We disallow scalars |c| > 1.0 since the HW has 372 * no integer bits. 373 */ 374 ctm = new_crtc_state->ctm->data; 375 for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) { 376 u64 val = ctm->matrix[i]; 377 378 val &= ~BIT_ULL(63); 379 if (val > BIT_ULL(32)) 380 return -EINVAL; 381 } 382 383 ctm_state->fifo = fifo; 384 ctm_state->ctm = ctm; 385 } 386 } 387 388 return 0; 389 } 390 391 static int 392 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) 393 { 394 int ret; 395 396 ret = vc4_ctm_atomic_check(dev, state); 397 if (ret < 0) 398 return ret; 399 400 return drm_atomic_helper_check(dev, state); 401 } 402 403 static const struct drm_mode_config_funcs vc4_mode_funcs = { 404 .output_poll_changed = drm_fb_helper_output_poll_changed, 405 .atomic_check = vc4_atomic_check, 406 .atomic_commit = vc4_atomic_commit, 407 .fb_create = vc4_fb_create, 408 }; 409 410 int vc4_kms_load(struct drm_device *dev) 411 { 412 struct vc4_dev *vc4 = to_vc4_dev(dev); 413 struct vc4_ctm_state *ctm_state; 414 int ret; 415 416 sema_init(&vc4->async_modeset, 1); 417 418 /* Set support for vblank irq fast disable, before drm_vblank_init() */ 419 dev->vblank_disable_immediate = true; 420 421 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 422 if (ret < 0) { 423 dev_err(dev->dev, "failed to initialize vblank\n"); 424 return ret; 425 } 426 427 dev->mode_config.max_width = 2048; 428 dev->mode_config.max_height = 2048; 429 dev->mode_config.funcs = &vc4_mode_funcs; 430 dev->mode_config.preferred_depth = 24; 431 dev->mode_config.async_page_flip = true; 432 dev->mode_config.allow_fb_modifiers = true; 433 434 drm_modeset_lock_init(&vc4->ctm_state_lock); 435 436 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL); 437 if (!ctm_state) 438 return -ENOMEM; 439 drm_atomic_private_obj_init(&vc4->ctm_manager, &ctm_state->base, 440 &vc4_ctm_state_funcs); 441 442 drm_mode_config_reset(dev); 443 444 if (dev->mode_config.num_connector) 445 drm_fb_cma_fbdev_init(dev, 32, 0); 446 447 drm_kms_helper_poll_init(dev); 448 449 return 0; 450 } 451