1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 3 /* 4 * Xen para-virtual DRM device 5 * 6 * Copyright (C) 2016-2018 EPAM Systems Inc. 7 * 8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> 9 */ 10 11 #include <drm/drm_atomic.h> 12 #include <drm/drm_atomic_helper.h> 13 #include <drm/drm_drv.h> 14 #include <drm/drm_fourcc.h> 15 #include <drm/drm_framebuffer.h> 16 #include <drm/drm_gem.h> 17 #include <drm/drm_gem_atomic_helper.h> 18 #include <drm/drm_gem_framebuffer_helper.h> 19 #include <drm/drm_print.h> 20 #include <drm/drm_probe_helper.h> 21 #include <drm/drm_vblank.h> 22 23 #include "xen_drm_front.h" 24 #include "xen_drm_front_conn.h" 25 #include "xen_drm_front_kms.h" 26 27 /* 28 * Timeout in ms to wait for frame done event from the backend: 29 * must be a bit more than IO time-out 30 */ 31 #define FRAME_DONE_TO_MS (XEN_DRM_FRONT_WAIT_BACK_MS + 100) 32 33 static struct xen_drm_front_drm_pipeline * 34 to_xen_drm_pipeline(struct drm_simple_display_pipe *pipe) 35 { 36 return container_of(pipe, struct xen_drm_front_drm_pipeline, pipe); 37 } 38 39 static void fb_destroy(struct drm_framebuffer *fb) 40 { 41 struct xen_drm_front_drm_info *drm_info = fb->dev->dev_private; 42 int idx; 43 44 if (drm_dev_enter(fb->dev, &idx)) { 45 xen_drm_front_fb_detach(drm_info->front_info, 46 xen_drm_front_fb_to_cookie(fb)); 47 drm_dev_exit(idx); 48 } 49 drm_gem_fb_destroy(fb); 50 } 51 52 static const struct drm_framebuffer_funcs fb_funcs = { 53 .destroy = fb_destroy, 54 }; 55 56 static struct drm_framebuffer * 57 fb_create(struct drm_device *dev, struct drm_file *filp, 58 const struct drm_format_info *info, 59 const struct drm_mode_fb_cmd2 *mode_cmd) 60 { 61 struct xen_drm_front_drm_info *drm_info = dev->dev_private; 62 struct drm_framebuffer *fb; 63 struct drm_gem_object *gem_obj; 64 int ret; 65 66 fb = drm_gem_fb_create_with_funcs(dev, filp, info, mode_cmd, &fb_funcs); 67 if (IS_ERR(fb)) 68 return fb; 69 70 gem_obj = fb->obj[0]; 71 72 ret = xen_drm_front_fb_attach(drm_info->front_info, 73 xen_drm_front_dbuf_to_cookie(gem_obj), 74 xen_drm_front_fb_to_cookie(fb), 75 fb->width, fb->height, 76 fb->format->format); 77 if (ret < 0) { 78 DRM_ERROR("Back failed to attach FB %p: %d\n", fb, ret); 79 goto fail; 80 } 81 82 return fb; 83 84 fail: 85 drm_gem_fb_destroy(fb); 86 return ERR_PTR(ret); 87 } 88 89 static const struct drm_mode_config_funcs mode_config_funcs = { 90 .fb_create = fb_create, 91 .atomic_check = drm_atomic_helper_check, 92 .atomic_commit = drm_atomic_helper_commit, 93 }; 94 95 static void send_pending_event(struct xen_drm_front_drm_pipeline *pipeline) 96 { 97 struct drm_crtc *crtc = &pipeline->pipe.crtc; 98 struct drm_device *dev = crtc->dev; 99 unsigned long flags; 100 101 spin_lock_irqsave(&dev->event_lock, flags); 102 if (pipeline->pending_event) 103 drm_crtc_send_vblank_event(crtc, pipeline->pending_event); 104 pipeline->pending_event = NULL; 105 spin_unlock_irqrestore(&dev->event_lock, flags); 106 } 107 108 static void display_enable(struct drm_simple_display_pipe *pipe, 109 struct drm_crtc_state *crtc_state, 110 struct drm_plane_state *plane_state) 111 { 112 struct xen_drm_front_drm_pipeline *pipeline = 113 to_xen_drm_pipeline(pipe); 114 struct drm_crtc *crtc = &pipe->crtc; 115 struct drm_framebuffer *fb = plane_state->fb; 116 int ret, idx; 117 118 if (!drm_dev_enter(pipe->crtc.dev, &idx)) 119 return; 120 121 ret = xen_drm_front_mode_set(pipeline, crtc->x, crtc->y, 122 fb->width, fb->height, 123 fb->format->cpp[0] * 8, 124 xen_drm_front_fb_to_cookie(fb)); 125 126 if (ret) { 127 DRM_ERROR("Failed to enable display: %d\n", ret); 128 pipeline->conn_connected = false; 129 } 130 131 drm_dev_exit(idx); 132 } 133 134 static void display_disable(struct drm_simple_display_pipe *pipe) 135 { 136 struct xen_drm_front_drm_pipeline *pipeline = 137 to_xen_drm_pipeline(pipe); 138 int ret = 0, idx; 139 140 if (drm_dev_enter(pipe->crtc.dev, &idx)) { 141 ret = xen_drm_front_mode_set(pipeline, 0, 0, 0, 0, 0, 142 xen_drm_front_fb_to_cookie(NULL)); 143 drm_dev_exit(idx); 144 } 145 if (ret) 146 DRM_ERROR("Failed to disable display: %d\n", ret); 147 148 /* Make sure we can restart with enabled connector next time */ 149 pipeline->conn_connected = true; 150 151 /* release stalled event if any */ 152 send_pending_event(pipeline); 153 } 154 155 void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline, 156 u64 fb_cookie) 157 { 158 /* 159 * This runs in interrupt context, e.g. under 160 * drm_info->front_info->io_lock, so we cannot call _sync version 161 * to cancel the work 162 */ 163 cancel_delayed_work(&pipeline->pflip_to_worker); 164 165 send_pending_event(pipeline); 166 } 167 168 static void pflip_to_worker(struct work_struct *work) 169 { 170 struct delayed_work *delayed_work = to_delayed_work(work); 171 struct xen_drm_front_drm_pipeline *pipeline = 172 container_of(delayed_work, 173 struct xen_drm_front_drm_pipeline, 174 pflip_to_worker); 175 176 DRM_ERROR("Frame done timed-out, releasing"); 177 send_pending_event(pipeline); 178 } 179 180 static bool display_send_page_flip(struct drm_simple_display_pipe *pipe, 181 struct drm_plane_state *old_plane_state) 182 { 183 struct drm_plane_state *plane_state = 184 drm_atomic_get_new_plane_state(old_plane_state->state, 185 &pipe->plane); 186 187 /* 188 * If old_plane_state->fb is NULL and plane_state->fb is not, 189 * then this is an atomic commit which will enable display. 190 * If old_plane_state->fb is not NULL and plane_state->fb is, 191 * then this is an atomic commit which will disable display. 192 * Ignore these and do not send page flip as this framebuffer will be 193 * sent to the backend as a part of display_set_config call. 194 */ 195 if (old_plane_state->fb && plane_state->fb) { 196 struct xen_drm_front_drm_pipeline *pipeline = 197 to_xen_drm_pipeline(pipe); 198 struct xen_drm_front_drm_info *drm_info = pipeline->drm_info; 199 int ret; 200 201 schedule_delayed_work(&pipeline->pflip_to_worker, 202 msecs_to_jiffies(FRAME_DONE_TO_MS)); 203 204 ret = xen_drm_front_page_flip(drm_info->front_info, 205 pipeline->index, 206 xen_drm_front_fb_to_cookie(plane_state->fb)); 207 if (ret) { 208 DRM_ERROR("Failed to send page flip request to backend: %d\n", ret); 209 210 pipeline->conn_connected = false; 211 /* 212 * Report the flip not handled, so pending event is 213 * sent, unblocking user-space. 214 */ 215 return false; 216 } 217 /* 218 * Signal that page flip was handled, pending event will be sent 219 * on frame done event from the backend. 220 */ 221 return true; 222 } 223 224 return false; 225 } 226 227 static int display_check(struct drm_simple_display_pipe *pipe, 228 struct drm_plane_state *plane_state, 229 struct drm_crtc_state *crtc_state) 230 { 231 /* 232 * Xen doesn't initialize vblanking via drm_vblank_init(), so 233 * DRM helpers assume that it doesn't handle vblanking and start 234 * sending out fake VBLANK events automatically. 235 * 236 * As xen contains it's own logic for sending out VBLANK events 237 * in send_pending_event(), disable no_vblank (i.e., the xen 238 * driver has vblanking support). 239 */ 240 crtc_state->no_vblank = false; 241 242 return 0; 243 } 244 245 static void display_update(struct drm_simple_display_pipe *pipe, 246 struct drm_plane_state *old_plane_state) 247 { 248 struct xen_drm_front_drm_pipeline *pipeline = 249 to_xen_drm_pipeline(pipe); 250 struct drm_crtc *crtc = &pipe->crtc; 251 struct drm_pending_vblank_event *event; 252 int idx; 253 254 event = crtc->state->event; 255 if (event) { 256 struct drm_device *dev = crtc->dev; 257 unsigned long flags; 258 259 WARN_ON(pipeline->pending_event); 260 261 spin_lock_irqsave(&dev->event_lock, flags); 262 crtc->state->event = NULL; 263 264 pipeline->pending_event = event; 265 spin_unlock_irqrestore(&dev->event_lock, flags); 266 } 267 268 if (!drm_dev_enter(pipe->crtc.dev, &idx)) { 269 send_pending_event(pipeline); 270 return; 271 } 272 273 /* 274 * Send page flip request to the backend *after* we have event cached 275 * above, so on page flip done event from the backend we can 276 * deliver it and there is no race condition between this code and 277 * event from the backend. 278 * If this is not a page flip, e.g. no flip done event from the backend 279 * is expected, then send now. 280 */ 281 if (!display_send_page_flip(pipe, old_plane_state)) 282 send_pending_event(pipeline); 283 284 drm_dev_exit(idx); 285 } 286 287 static enum drm_mode_status 288 display_mode_valid(struct drm_simple_display_pipe *pipe, 289 const struct drm_display_mode *mode) 290 { 291 struct xen_drm_front_drm_pipeline *pipeline = 292 container_of(pipe, struct xen_drm_front_drm_pipeline, 293 pipe); 294 295 if (mode->hdisplay != pipeline->width) 296 return MODE_ERROR; 297 298 if (mode->vdisplay != pipeline->height) 299 return MODE_ERROR; 300 301 return MODE_OK; 302 } 303 304 static const struct drm_simple_display_pipe_funcs display_funcs = { 305 .mode_valid = display_mode_valid, 306 .enable = display_enable, 307 .disable = display_disable, 308 .check = display_check, 309 .update = display_update, 310 }; 311 312 static int display_pipe_init(struct xen_drm_front_drm_info *drm_info, 313 int index, struct xen_drm_front_cfg_connector *cfg, 314 struct xen_drm_front_drm_pipeline *pipeline) 315 { 316 struct drm_device *dev = drm_info->drm_dev; 317 const u32 *formats; 318 int format_count; 319 int ret; 320 321 pipeline->drm_info = drm_info; 322 pipeline->index = index; 323 pipeline->height = cfg->height; 324 pipeline->width = cfg->width; 325 326 INIT_DELAYED_WORK(&pipeline->pflip_to_worker, pflip_to_worker); 327 328 ret = xen_drm_front_conn_init(drm_info, &pipeline->conn); 329 if (ret) 330 return ret; 331 332 formats = xen_drm_front_conn_get_formats(&format_count); 333 334 return drm_simple_display_pipe_init(dev, &pipeline->pipe, 335 &display_funcs, formats, 336 format_count, NULL, 337 &pipeline->conn); 338 } 339 340 int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info) 341 { 342 struct drm_device *dev = drm_info->drm_dev; 343 int i, ret; 344 345 drm_mode_config_init(dev); 346 347 dev->mode_config.min_width = 0; 348 dev->mode_config.min_height = 0; 349 dev->mode_config.max_width = 4095; 350 dev->mode_config.max_height = 2047; 351 dev->mode_config.funcs = &mode_config_funcs; 352 353 for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) { 354 struct xen_drm_front_cfg_connector *cfg = 355 &drm_info->front_info->cfg.connectors[i]; 356 struct xen_drm_front_drm_pipeline *pipeline = 357 &drm_info->pipeline[i]; 358 359 ret = display_pipe_init(drm_info, i, cfg, pipeline); 360 if (ret) { 361 drm_mode_config_cleanup(dev); 362 return ret; 363 } 364 } 365 366 drm_mode_config_reset(dev); 367 drm_kms_helper_poll_init(dev); 368 return 0; 369 } 370 371 void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info) 372 { 373 int i; 374 375 for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) { 376 struct xen_drm_front_drm_pipeline *pipeline = 377 &drm_info->pipeline[i]; 378 379 cancel_delayed_work_sync(&pipeline->pflip_to_worker); 380 381 send_pending_event(pipeline); 382 } 383 } 384