1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * (C) COPYRIGHT 2018 ARM Limited. All rights reserved. 4 * Author: James.Qian.Wang <james.qian.wang@arm.com> 5 * 6 */ 7 #include <linux/component.h> 8 #include <linux/interrupt.h> 9 10 #include <drm/drm_atomic.h> 11 #include <drm/drm_atomic_helper.h> 12 #include <drm/drm_drv.h> 13 #include <drm/drm_fb_helper.h> 14 #include <drm/drm_gem_cma_helper.h> 15 #include <drm/drm_gem_framebuffer_helper.h> 16 #include <drm/drm_irq.h> 17 #include <drm/drm_vblank.h> 18 #include <drm/drm_probe_helper.h> 19 20 #include "komeda_dev.h" 21 #include "komeda_framebuffer.h" 22 #include "komeda_kms.h" 23 24 DEFINE_DRM_GEM_CMA_FOPS(komeda_cma_fops); 25 26 static int komeda_gem_cma_dumb_create(struct drm_file *file, 27 struct drm_device *dev, 28 struct drm_mode_create_dumb *args) 29 { 30 struct komeda_dev *mdev = dev->dev_private; 31 u32 pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 32 33 args->pitch = ALIGN(pitch, mdev->chip.bus_width); 34 35 return drm_gem_cma_dumb_create_internal(file, dev, args); 36 } 37 38 static irqreturn_t komeda_kms_irq_handler(int irq, void *data) 39 { 40 struct drm_device *drm = data; 41 struct komeda_dev *mdev = drm->dev_private; 42 struct komeda_kms_dev *kms = to_kdev(drm); 43 struct komeda_events evts; 44 irqreturn_t status; 45 u32 i; 46 47 /* Call into the CHIP to recognize events */ 48 memset(&evts, 0, sizeof(evts)); 49 status = mdev->funcs->irq_handler(mdev, &evts); 50 51 /* Notify the crtc to handle the events */ 52 for (i = 0; i < kms->n_crtcs; i++) 53 komeda_crtc_handle_event(&kms->crtcs[i], &evts); 54 55 return status; 56 } 57 58 static struct drm_driver komeda_kms_driver = { 59 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC | 60 DRIVER_PRIME | DRIVER_HAVE_IRQ, 61 .lastclose = drm_fb_helper_lastclose, 62 .gem_free_object_unlocked = drm_gem_cma_free_object, 63 .gem_vm_ops = &drm_gem_cma_vm_ops, 64 .dumb_create = komeda_gem_cma_dumb_create, 65 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 66 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 67 .gem_prime_export = drm_gem_prime_export, 68 .gem_prime_import = drm_gem_prime_import, 69 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, 70 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, 71 .gem_prime_vmap = drm_gem_cma_prime_vmap, 72 .gem_prime_vunmap = drm_gem_cma_prime_vunmap, 73 .gem_prime_mmap = drm_gem_cma_prime_mmap, 74 .fops = &komeda_cma_fops, 75 .name = "komeda", 76 .desc = "Arm Komeda Display Processor driver", 77 .date = "20181101", 78 .major = 0, 79 .minor = 1, 80 }; 81 82 static void komeda_kms_commit_tail(struct drm_atomic_state *old_state) 83 { 84 struct drm_device *dev = old_state->dev; 85 86 drm_atomic_helper_commit_modeset_disables(dev, old_state); 87 88 drm_atomic_helper_commit_planes(dev, old_state, 0); 89 90 drm_atomic_helper_commit_modeset_enables(dev, old_state); 91 92 drm_atomic_helper_wait_for_flip_done(dev, old_state); 93 94 drm_atomic_helper_commit_hw_done(old_state); 95 96 drm_atomic_helper_cleanup_planes(dev, old_state); 97 } 98 99 static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = { 100 .atomic_commit_tail = komeda_kms_commit_tail, 101 }; 102 103 static int komeda_plane_state_list_add(struct drm_plane_state *plane_st, 104 struct list_head *zorder_list) 105 { 106 struct komeda_plane_state *new = to_kplane_st(plane_st); 107 struct komeda_plane_state *node, *last; 108 109 last = list_empty(zorder_list) ? 110 NULL : list_last_entry(zorder_list, typeof(*last), zlist_node); 111 112 /* Considering the list sequence is zpos increasing, so if list is empty 113 * or the zpos of new node bigger than the last node in list, no need 114 * loop and just insert the new one to the tail of the list. 115 */ 116 if (!last || (new->base.zpos > last->base.zpos)) { 117 list_add_tail(&new->zlist_node, zorder_list); 118 return 0; 119 } 120 121 /* Build the list by zpos increasing */ 122 list_for_each_entry(node, zorder_list, zlist_node) { 123 if (new->base.zpos < node->base.zpos) { 124 list_add_tail(&new->zlist_node, &node->zlist_node); 125 break; 126 } else if (node->base.zpos == new->base.zpos) { 127 struct drm_plane *a = node->base.plane; 128 struct drm_plane *b = new->base.plane; 129 130 /* Komeda doesn't support setting a same zpos for 131 * different planes. 132 */ 133 DRM_DEBUG_ATOMIC("PLANE: %s and PLANE: %s are configured same zpos: %d.\n", 134 a->name, b->name, node->base.zpos); 135 return -EINVAL; 136 } 137 } 138 139 return 0; 140 } 141 142 static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc, 143 struct drm_crtc_state *crtc_st) 144 { 145 struct drm_atomic_state *state = crtc_st->state; 146 struct komeda_crtc *kcrtc = to_kcrtc(crtc); 147 struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st); 148 struct komeda_plane_state *kplane_st; 149 struct drm_plane_state *plane_st; 150 struct drm_framebuffer *fb; 151 struct drm_plane *plane; 152 struct list_head zorder_list; 153 int order = 0, err; 154 155 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n", 156 crtc->base.id, crtc->name); 157 158 INIT_LIST_HEAD(&zorder_list); 159 160 /* This loop also added all effected planes into the new state */ 161 drm_for_each_plane_mask(plane, crtc->dev, crtc_st->plane_mask) { 162 plane_st = drm_atomic_get_plane_state(state, plane); 163 if (IS_ERR(plane_st)) 164 return PTR_ERR(plane_st); 165 166 /* Build a list by zpos increasing */ 167 err = komeda_plane_state_list_add(plane_st, &zorder_list); 168 if (err) 169 return err; 170 } 171 172 kcrtc_st->max_slave_zorder = 0; 173 174 list_for_each_entry(kplane_st, &zorder_list, zlist_node) { 175 plane_st = &kplane_st->base; 176 fb = plane_st->fb; 177 plane = plane_st->plane; 178 179 plane_st->normalized_zpos = order++; 180 /* When layer_split has been enabled, one plane will be handled 181 * by two separated komeda layers (left/right), which may needs 182 * two zorders. 183 * - zorder: for left_layer for left display part. 184 * - zorder + 1: will be reserved for right layer. 185 */ 186 if (to_kplane_st(plane_st)->layer_split) 187 order++; 188 189 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] zpos:%d, normalized zpos: %d\n", 190 plane->base.id, plane->name, 191 plane_st->zpos, plane_st->normalized_zpos); 192 193 /* calculate max slave zorder */ 194 if (has_bit(drm_plane_index(plane), kcrtc->slave_planes)) 195 kcrtc_st->max_slave_zorder = 196 max(plane_st->normalized_zpos, 197 kcrtc_st->max_slave_zorder); 198 } 199 200 crtc_st->zpos_changed = true; 201 202 return 0; 203 } 204 205 static int komeda_kms_check(struct drm_device *dev, 206 struct drm_atomic_state *state) 207 { 208 struct drm_crtc *crtc; 209 struct drm_crtc_state *old_crtc_st, *new_crtc_st; 210 int i, err; 211 212 err = drm_atomic_helper_check_modeset(dev, state); 213 if (err) 214 return err; 215 216 /* Komeda need to re-calculate resource assumption in every commit 217 * so need to add all affected_planes (even unchanged) to 218 * drm_atomic_state. 219 */ 220 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_st, new_crtc_st, i) { 221 err = drm_atomic_add_affected_planes(state, crtc); 222 if (err) 223 return err; 224 225 err = komeda_crtc_normalize_zpos(crtc, new_crtc_st); 226 if (err) 227 return err; 228 } 229 230 err = drm_atomic_helper_check_planes(dev, state); 231 if (err) 232 return err; 233 234 return 0; 235 } 236 237 static const struct drm_mode_config_funcs komeda_mode_config_funcs = { 238 .fb_create = komeda_fb_create, 239 .atomic_check = komeda_kms_check, 240 .atomic_commit = drm_atomic_helper_commit, 241 }; 242 243 static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms, 244 struct komeda_dev *mdev) 245 { 246 struct drm_mode_config *config = &kms->base.mode_config; 247 248 drm_mode_config_init(&kms->base); 249 250 komeda_kms_setup_crtcs(kms, mdev); 251 252 /* Get value from dev */ 253 config->min_width = 0; 254 config->min_height = 0; 255 config->max_width = 4096; 256 config->max_height = 4096; 257 config->allow_fb_modifiers = true; 258 259 config->funcs = &komeda_mode_config_funcs; 260 config->helper_private = &komeda_mode_config_helpers; 261 } 262 263 struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) 264 { 265 struct komeda_kms_dev *kms = kzalloc(sizeof(*kms), GFP_KERNEL); 266 struct drm_device *drm; 267 int err; 268 269 if (!kms) 270 return ERR_PTR(-ENOMEM); 271 272 drm = &kms->base; 273 err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev); 274 if (err) 275 goto free_kms; 276 277 drm->dev_private = mdev; 278 279 komeda_kms_mode_config_init(kms, mdev); 280 281 err = komeda_kms_add_private_objs(kms, mdev); 282 if (err) 283 goto cleanup_mode_config; 284 285 err = komeda_kms_add_planes(kms, mdev); 286 if (err) 287 goto cleanup_mode_config; 288 289 err = drm_vblank_init(drm, kms->n_crtcs); 290 if (err) 291 goto cleanup_mode_config; 292 293 err = komeda_kms_add_crtcs(kms, mdev); 294 if (err) 295 goto cleanup_mode_config; 296 297 err = komeda_kms_add_wb_connectors(kms, mdev); 298 if (err) 299 goto cleanup_mode_config; 300 301 err = component_bind_all(mdev->dev, kms); 302 if (err) 303 goto cleanup_mode_config; 304 305 drm_mode_config_reset(drm); 306 307 err = devm_request_irq(drm->dev, mdev->irq, 308 komeda_kms_irq_handler, IRQF_SHARED, 309 drm->driver->name, drm); 310 if (err) 311 goto cleanup_mode_config; 312 313 err = mdev->funcs->enable_irq(mdev); 314 if (err) 315 goto cleanup_mode_config; 316 317 drm->irq_enabled = true; 318 319 drm_kms_helper_poll_init(drm); 320 321 err = drm_dev_register(drm, 0); 322 if (err) 323 goto cleanup_mode_config; 324 325 return kms; 326 327 cleanup_mode_config: 328 drm_kms_helper_poll_fini(drm); 329 drm->irq_enabled = false; 330 drm_mode_config_cleanup(drm); 331 komeda_kms_cleanup_private_objs(kms); 332 free_kms: 333 kfree(kms); 334 return ERR_PTR(err); 335 } 336 337 void komeda_kms_detach(struct komeda_kms_dev *kms) 338 { 339 struct drm_device *drm = &kms->base; 340 struct komeda_dev *mdev = drm->dev_private; 341 342 drm->irq_enabled = false; 343 mdev->funcs->disable_irq(mdev); 344 drm_dev_unregister(drm); 345 drm_kms_helper_poll_fini(drm); 346 component_unbind_all(mdev->dev, drm); 347 komeda_kms_cleanup_private_objs(kms); 348 drm_mode_config_cleanup(drm); 349 drm->dev_private = NULL; 350 drm_dev_put(drm); 351 } 352