xref: /linux/drivers/gpu/drm/i915/display/intel_fbdev.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 /*
2  * Copyright © 2007 David Airlie
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *     David Airlie
25  */
26 
27 #include <linux/console.h>
28 #include <linux/delay.h>
29 #include <linux/errno.h>
30 #include <linux/fb.h>
31 #include <linux/init.h>
32 #include <linux/kernel.h>
33 #include <linux/mm.h>
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/sysrq.h>
37 #include <linux/tty.h>
38 #include <linux/vga_switcheroo.h>
39 
40 #include <drm/drm_crtc.h>
41 #include <drm/drm_crtc_helper.h>
42 #include <drm/drm_fb_helper.h>
43 #include <drm/drm_fourcc.h>
44 #include <drm/drm_gem.h>
45 #include <drm/drm_gem_framebuffer_helper.h>
46 
47 #include "i915_drv.h"
48 #include "intel_bo.h"
49 #include "intel_display_types.h"
50 #include "intel_fb.h"
51 #include "intel_fb_pin.h"
52 #include "intel_fbdev.h"
53 #include "intel_fbdev_fb.h"
54 #include "intel_frontbuffer.h"
55 
56 struct intel_fbdev {
57 	struct drm_fb_helper helper;
58 	struct intel_framebuffer *fb;
59 	struct i915_vma *vma;
60 	unsigned long vma_flags;
61 	int preferred_bpp;
62 
63 	/* Whether or not fbdev hpd processing is temporarily suspended */
64 	bool hpd_suspended: 1;
65 	/* Set when a hotplug was received while HPD processing was suspended */
66 	bool hpd_waiting: 1;
67 
68 	/* Protects hpd_suspended */
69 	struct mutex hpd_lock;
70 };
71 
72 static struct intel_fbdev *to_intel_fbdev(struct drm_fb_helper *fb_helper)
73 {
74 	return container_of(fb_helper, struct intel_fbdev, helper);
75 }
76 
77 static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
78 {
79 	return ifbdev->fb->frontbuffer;
80 }
81 
82 static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
83 {
84 	intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU);
85 }
86 
87 FB_GEN_DEFAULT_DEFERRED_IOMEM_OPS(intel_fbdev,
88 				  drm_fb_helper_damage_range,
89 				  drm_fb_helper_damage_area)
90 
91 static int intel_fbdev_set_par(struct fb_info *info)
92 {
93 	struct intel_fbdev *ifbdev = to_intel_fbdev(info->par);
94 	int ret;
95 
96 	ret = drm_fb_helper_set_par(info);
97 	if (ret == 0)
98 		intel_fbdev_invalidate(ifbdev);
99 
100 	return ret;
101 }
102 
103 static int intel_fbdev_blank(int blank, struct fb_info *info)
104 {
105 	struct intel_fbdev *ifbdev = to_intel_fbdev(info->par);
106 	int ret;
107 
108 	ret = drm_fb_helper_blank(blank, info);
109 	if (ret == 0)
110 		intel_fbdev_invalidate(ifbdev);
111 
112 	return ret;
113 }
114 
115 static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
116 				   struct fb_info *info)
117 {
118 	struct intel_fbdev *ifbdev = to_intel_fbdev(info->par);
119 	int ret;
120 
121 	ret = drm_fb_helper_pan_display(var, info);
122 	if (ret == 0)
123 		intel_fbdev_invalidate(ifbdev);
124 
125 	return ret;
126 }
127 
128 static int intel_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
129 {
130 	struct intel_fbdev *fbdev = to_intel_fbdev(info->par);
131 	struct drm_gem_object *obj = drm_gem_fb_get_obj(&fbdev->fb->base, 0);
132 
133 	return intel_bo_fb_mmap(obj, vma);
134 }
135 
136 static void intel_fbdev_fb_destroy(struct fb_info *info)
137 {
138 	struct drm_fb_helper *fb_helper = info->par;
139 	struct intel_fbdev *ifbdev = container_of(fb_helper, struct intel_fbdev, helper);
140 
141 	drm_fb_helper_fini(&ifbdev->helper);
142 
143 	/*
144 	 * We rely on the object-free to release the VMA pinning for
145 	 * the info->screen_base mmaping. Leaking the VMA is simpler than
146 	 * trying to rectify all the possible error paths leading here.
147 	 */
148 	intel_fb_unpin_vma(ifbdev->vma, ifbdev->vma_flags);
149 	drm_framebuffer_remove(&ifbdev->fb->base);
150 
151 	drm_client_release(&fb_helper->client);
152 	drm_fb_helper_unprepare(&ifbdev->helper);
153 	kfree(ifbdev);
154 }
155 
156 __diag_push();
157 __diag_ignore_all("-Woverride-init", "Allow field initialization overrides for fb ops");
158 
159 static const struct fb_ops intelfb_ops = {
160 	.owner = THIS_MODULE,
161 	__FB_DEFAULT_DEFERRED_OPS_RDWR(intel_fbdev),
162 	DRM_FB_HELPER_DEFAULT_OPS,
163 	.fb_set_par = intel_fbdev_set_par,
164 	.fb_blank = intel_fbdev_blank,
165 	.fb_pan_display = intel_fbdev_pan_display,
166 	__FB_DEFAULT_DEFERRED_OPS_DRAW(intel_fbdev),
167 	.fb_mmap = intel_fbdev_mmap,
168 	.fb_destroy = intel_fbdev_fb_destroy,
169 };
170 
171 __diag_pop();
172 
173 static int intelfb_create(struct drm_fb_helper *helper,
174 			  struct drm_fb_helper_surface_size *sizes)
175 {
176 	struct intel_fbdev *ifbdev = to_intel_fbdev(helper);
177 	struct intel_framebuffer *fb = ifbdev->fb;
178 	struct drm_device *dev = helper->dev;
179 	struct drm_i915_private *dev_priv = to_i915(dev);
180 	const struct i915_gtt_view view = {
181 		.type = I915_GTT_VIEW_NORMAL,
182 	};
183 	intel_wakeref_t wakeref;
184 	struct fb_info *info;
185 	struct i915_vma *vma;
186 	unsigned long flags = 0;
187 	bool prealloc = false;
188 	struct drm_gem_object *obj;
189 	int ret;
190 
191 	mutex_lock(&ifbdev->hpd_lock);
192 	ret = ifbdev->hpd_suspended ? -EAGAIN : 0;
193 	mutex_unlock(&ifbdev->hpd_lock);
194 	if (ret)
195 		return ret;
196 
197 	ifbdev->fb = NULL;
198 
199 	if (fb &&
200 	    (sizes->fb_width > fb->base.width ||
201 	     sizes->fb_height > fb->base.height)) {
202 		drm_dbg_kms(&dev_priv->drm,
203 			    "BIOS fb too small (%dx%d), we require (%dx%d),"
204 			    " releasing it\n",
205 			    fb->base.width, fb->base.height,
206 			    sizes->fb_width, sizes->fb_height);
207 		drm_framebuffer_put(&fb->base);
208 		fb = NULL;
209 	}
210 	if (!fb || drm_WARN_ON(dev, !intel_fb_bo(&fb->base))) {
211 		drm_dbg_kms(&dev_priv->drm,
212 			    "no BIOS fb, allocating a new one\n");
213 		fb = intel_fbdev_fb_alloc(helper, sizes);
214 		if (IS_ERR(fb))
215 			return PTR_ERR(fb);
216 	} else {
217 		drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n");
218 		prealloc = true;
219 		sizes->fb_width = fb->base.width;
220 		sizes->fb_height = fb->base.height;
221 	}
222 
223 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
224 
225 	/* Pin the GGTT vma for our access via info->screen_base.
226 	 * This also validates that any existing fb inherited from the
227 	 * BIOS is suitable for own access.
228 	 */
229 	vma = intel_fb_pin_to_ggtt(&fb->base, &view,
230 				   fb->min_alignment, 0,
231 				   false, &flags);
232 	if (IS_ERR(vma)) {
233 		ret = PTR_ERR(vma);
234 		goto out_unlock;
235 	}
236 
237 	info = drm_fb_helper_alloc_info(helper);
238 	if (IS_ERR(info)) {
239 		drm_err(&dev_priv->drm, "Failed to allocate fb_info (%pe)\n", info);
240 		ret = PTR_ERR(info);
241 		goto out_unpin;
242 	}
243 
244 	ifbdev->helper.fb = &fb->base;
245 
246 	info->fbops = &intelfb_ops;
247 
248 	obj = intel_fb_bo(&fb->base);
249 
250 	ret = intel_fbdev_fb_fill_info(dev_priv, info, obj, vma);
251 	if (ret)
252 		goto out_unpin;
253 
254 	drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
255 
256 	/* If the object is shmemfs backed, it will have given us zeroed pages.
257 	 * If the object is stolen however, it will be full of whatever
258 	 * garbage was left in there.
259 	 */
260 	if (!intel_bo_is_shmem(obj) && !prealloc)
261 		memset_io(info->screen_base, 0, info->screen_size);
262 
263 	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
264 
265 	drm_dbg_kms(&dev_priv->drm, "allocated %dx%d fb: 0x%08x\n",
266 		    fb->base.width, fb->base.height,
267 		    i915_ggtt_offset(vma));
268 	ifbdev->fb = fb;
269 	ifbdev->vma = vma;
270 	ifbdev->vma_flags = flags;
271 
272 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
273 
274 	return 0;
275 
276 out_unpin:
277 	intel_fb_unpin_vma(vma, flags);
278 out_unlock:
279 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
280 	return ret;
281 }
282 
283 static int intelfb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
284 {
285 	if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
286 		return 0;
287 
288 	if (helper->fb->funcs->dirty)
289 		return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
290 
291 	return 0;
292 }
293 
294 static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
295 	.fb_probe = intelfb_create,
296 	.fb_dirty = intelfb_dirty,
297 };
298 
299 /*
300  * Build an intel_fbdev struct using a BIOS allocated framebuffer, if possible.
301  * The core display code will have read out the current plane configuration,
302  * so we use that to figure out if there's an object for us to use as the
303  * fb, and if so, we re-use it for the fbdev configuration.
304  *
305  * Note we only support a single fb shared across pipes for boot (mostly for
306  * fbcon), so we just find the biggest and use that.
307  */
308 static bool intel_fbdev_init_bios(struct drm_device *dev,
309 				  struct intel_fbdev *ifbdev)
310 {
311 	struct drm_i915_private *i915 = to_i915(dev);
312 	struct intel_framebuffer *fb = NULL;
313 	struct intel_crtc *crtc;
314 	unsigned int max_size = 0;
315 
316 	/* Find the largest fb */
317 	for_each_intel_crtc(dev, crtc) {
318 		struct intel_crtc_state *crtc_state =
319 			to_intel_crtc_state(crtc->base.state);
320 		struct intel_plane *plane =
321 			to_intel_plane(crtc->base.primary);
322 		struct intel_plane_state *plane_state =
323 			to_intel_plane_state(plane->base.state);
324 		struct drm_gem_object *obj = intel_fb_bo(plane_state->uapi.fb);
325 
326 		if (!crtc_state->uapi.active) {
327 			drm_dbg_kms(&i915->drm,
328 				    "[CRTC:%d:%s] not active, skipping\n",
329 				    crtc->base.base.id, crtc->base.name);
330 			continue;
331 		}
332 
333 		if (!obj) {
334 			drm_dbg_kms(&i915->drm,
335 				    "[PLANE:%d:%s] no fb, skipping\n",
336 				    plane->base.base.id, plane->base.name);
337 			continue;
338 		}
339 
340 		if (obj->size > max_size) {
341 			drm_dbg_kms(&i915->drm,
342 				    "found possible fb from [PLANE:%d:%s]\n",
343 				    plane->base.base.id, plane->base.name);
344 			fb = to_intel_framebuffer(plane_state->uapi.fb);
345 			max_size = obj->size;
346 		}
347 	}
348 
349 	if (!fb) {
350 		drm_dbg_kms(&i915->drm,
351 			    "no active fbs found, not using BIOS config\n");
352 		goto out;
353 	}
354 
355 	/* Now make sure all the pipes will fit into it */
356 	for_each_intel_crtc(dev, crtc) {
357 		struct intel_crtc_state *crtc_state =
358 			to_intel_crtc_state(crtc->base.state);
359 		struct intel_plane *plane =
360 			to_intel_plane(crtc->base.primary);
361 		unsigned int cur_size;
362 
363 		if (!crtc_state->uapi.active) {
364 			drm_dbg_kms(&i915->drm,
365 				    "[CRTC:%d:%s] not active, skipping\n",
366 				    crtc->base.base.id, crtc->base.name);
367 			continue;
368 		}
369 
370 		drm_dbg_kms(&i915->drm, "checking [PLANE:%d:%s] for BIOS fb\n",
371 			    plane->base.base.id, plane->base.name);
372 
373 		/*
374 		 * See if the plane fb we found above will fit on this
375 		 * pipe.  Note we need to use the selected fb's pitch and bpp
376 		 * rather than the current pipe's, since they differ.
377 		 */
378 		cur_size = crtc_state->uapi.adjusted_mode.crtc_hdisplay;
379 		cur_size = cur_size * fb->base.format->cpp[0];
380 		if (fb->base.pitches[0] < cur_size) {
381 			drm_dbg_kms(&i915->drm,
382 				    "fb not wide enough for [PLANE:%d:%s] (%d vs %d)\n",
383 				    plane->base.base.id, plane->base.name,
384 				    cur_size, fb->base.pitches[0]);
385 			fb = NULL;
386 			break;
387 		}
388 
389 		cur_size = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
390 		cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
391 		cur_size *= fb->base.pitches[0];
392 		drm_dbg_kms(&i915->drm,
393 			    "[CRTC:%d:%s] area: %dx%d, bpp: %d, size: %d\n",
394 			    crtc->base.base.id, crtc->base.name,
395 			    crtc_state->uapi.adjusted_mode.crtc_hdisplay,
396 			    crtc_state->uapi.adjusted_mode.crtc_vdisplay,
397 			    fb->base.format->cpp[0] * 8,
398 			    cur_size);
399 
400 		if (cur_size > max_size) {
401 			drm_dbg_kms(&i915->drm,
402 				    "fb not big enough for [PLANE:%d:%s] (%d vs %d)\n",
403 				    plane->base.base.id, plane->base.name,
404 				    cur_size, max_size);
405 			fb = NULL;
406 			break;
407 		}
408 
409 		drm_dbg_kms(&i915->drm,
410 			    "fb big enough [PLANE:%d:%s] (%d >= %d)\n",
411 			    plane->base.base.id, plane->base.name,
412 			    max_size, cur_size);
413 	}
414 
415 	if (!fb) {
416 		drm_dbg_kms(&i915->drm,
417 			    "BIOS fb not suitable for all pipes, not using\n");
418 		goto out;
419 	}
420 
421 	ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
422 	ifbdev->fb = fb;
423 
424 	drm_framebuffer_get(&ifbdev->fb->base);
425 
426 	/* Final pass to check if any active pipes don't have fbs */
427 	for_each_intel_crtc(dev, crtc) {
428 		struct intel_crtc_state *crtc_state =
429 			to_intel_crtc_state(crtc->base.state);
430 		struct intel_plane *plane =
431 			to_intel_plane(crtc->base.primary);
432 		struct intel_plane_state *plane_state =
433 			to_intel_plane_state(plane->base.state);
434 
435 		if (!crtc_state->uapi.active)
436 			continue;
437 
438 		drm_WARN(dev, !plane_state->uapi.fb,
439 			 "re-used BIOS config but lost an fb on [PLANE:%d:%s]\n",
440 			 plane->base.base.id, plane->base.name);
441 	}
442 
443 
444 	drm_dbg_kms(&i915->drm, "using BIOS fb for initial console\n");
445 	return true;
446 
447 out:
448 
449 	return false;
450 }
451 
452 static void intel_fbdev_suspend_worker(struct work_struct *work)
453 {
454 	intel_fbdev_set_suspend(&container_of(work,
455 					      struct drm_i915_private,
456 					      display.fbdev.suspend_work)->drm,
457 				FBINFO_STATE_RUNNING,
458 				true);
459 }
460 
461 /* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
462  * processing, fbdev will perform a full connector reprobe if a hotplug event
463  * was received while HPD was suspended.
464  */
465 static void intel_fbdev_hpd_set_suspend(struct drm_i915_private *i915, int state)
466 {
467 	struct intel_fbdev *ifbdev = i915->display.fbdev.fbdev;
468 	bool send_hpd = false;
469 
470 	mutex_lock(&ifbdev->hpd_lock);
471 	ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
472 	send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
473 	ifbdev->hpd_waiting = false;
474 	mutex_unlock(&ifbdev->hpd_lock);
475 
476 	if (send_hpd) {
477 		drm_dbg_kms(&i915->drm, "Handling delayed fbcon HPD event\n");
478 		drm_fb_helper_hotplug_event(&ifbdev->helper);
479 	}
480 }
481 
482 void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
483 {
484 	struct drm_i915_private *dev_priv = to_i915(dev);
485 	struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
486 	struct fb_info *info;
487 
488 	if (!ifbdev)
489 		return;
490 
491 	if (drm_WARN_ON(&dev_priv->drm, !HAS_DISPLAY(dev_priv)))
492 		return;
493 
494 	if (!ifbdev->vma)
495 		goto set_suspend;
496 
497 	info = ifbdev->helper.info;
498 
499 	if (synchronous) {
500 		/* Flush any pending work to turn the console on, and then
501 		 * wait to turn it off. It must be synchronous as we are
502 		 * about to suspend or unload the driver.
503 		 *
504 		 * Note that from within the work-handler, we cannot flush
505 		 * ourselves, so only flush outstanding work upon suspend!
506 		 */
507 		if (state != FBINFO_STATE_RUNNING)
508 			flush_work(&dev_priv->display.fbdev.suspend_work);
509 
510 		console_lock();
511 	} else {
512 		/*
513 		 * The console lock can be pretty contented on resume due
514 		 * to all the printk activity.  Try to keep it out of the hot
515 		 * path of resume if possible.
516 		 */
517 		drm_WARN_ON(dev, state != FBINFO_STATE_RUNNING);
518 		if (!console_trylock()) {
519 			/* Don't block our own workqueue as this can
520 			 * be run in parallel with other i915.ko tasks.
521 			 */
522 			queue_work(dev_priv->unordered_wq,
523 				   &dev_priv->display.fbdev.suspend_work);
524 			return;
525 		}
526 	}
527 
528 	/* On resume from hibernation: If the object is shmemfs backed, it has
529 	 * been restored from swap. If the object is stolen however, it will be
530 	 * full of whatever garbage was left in there.
531 	 */
532 	if (state == FBINFO_STATE_RUNNING &&
533 	    !intel_bo_is_shmem(intel_fb_bo(&ifbdev->fb->base)))
534 		memset_io(info->screen_base, 0, info->screen_size);
535 
536 	drm_fb_helper_set_suspend(&ifbdev->helper, state);
537 	console_unlock();
538 
539 set_suspend:
540 	intel_fbdev_hpd_set_suspend(dev_priv, state);
541 }
542 
543 static int intel_fbdev_output_poll_changed(struct drm_device *dev)
544 {
545 	struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev;
546 	bool send_hpd;
547 
548 	if (!ifbdev)
549 		return -EINVAL;
550 
551 	mutex_lock(&ifbdev->hpd_lock);
552 	send_hpd = !ifbdev->hpd_suspended;
553 	ifbdev->hpd_waiting = true;
554 	mutex_unlock(&ifbdev->hpd_lock);
555 
556 	if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
557 		drm_fb_helper_hotplug_event(&ifbdev->helper);
558 
559 	return 0;
560 }
561 
562 static int intel_fbdev_restore_mode(struct drm_i915_private *dev_priv)
563 {
564 	struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
565 	int ret;
566 
567 	if (!ifbdev)
568 		return -EINVAL;
569 
570 	if (!ifbdev->vma)
571 		return -ENOMEM;
572 
573 	ret = drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper);
574 	if (ret)
575 		return ret;
576 
577 	intel_fbdev_invalidate(ifbdev);
578 
579 	return 0;
580 }
581 
582 /*
583  * Fbdev client and struct drm_client_funcs
584  */
585 
586 static void intel_fbdev_client_unregister(struct drm_client_dev *client)
587 {
588 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
589 	struct drm_device *dev = fb_helper->dev;
590 	struct pci_dev *pdev = to_pci_dev(dev->dev);
591 
592 	if (fb_helper->info) {
593 		vga_switcheroo_client_fb_set(pdev, NULL);
594 		drm_fb_helper_unregister_info(fb_helper);
595 	} else {
596 		drm_fb_helper_unprepare(fb_helper);
597 		drm_client_release(&fb_helper->client);
598 		kfree(fb_helper);
599 	}
600 }
601 
602 static int intel_fbdev_client_restore(struct drm_client_dev *client)
603 {
604 	struct drm_i915_private *dev_priv = to_i915(client->dev);
605 	int ret;
606 
607 	ret = intel_fbdev_restore_mode(dev_priv);
608 	if (ret)
609 		return ret;
610 
611 	vga_switcheroo_process_delayed_switch();
612 
613 	return 0;
614 }
615 
616 static int intel_fbdev_client_hotplug(struct drm_client_dev *client)
617 {
618 	struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
619 	struct drm_device *dev = client->dev;
620 	struct pci_dev *pdev = to_pci_dev(dev->dev);
621 	int ret;
622 
623 	if (dev->fb_helper)
624 		return intel_fbdev_output_poll_changed(dev);
625 
626 	ret = drm_fb_helper_init(dev, fb_helper);
627 	if (ret)
628 		goto err_drm_err;
629 
630 	ret = drm_fb_helper_initial_config(fb_helper);
631 	if (ret)
632 		goto err_drm_fb_helper_fini;
633 
634 	vga_switcheroo_client_fb_set(pdev, fb_helper->info);
635 
636 	return 0;
637 
638 err_drm_fb_helper_fini:
639 	drm_fb_helper_fini(fb_helper);
640 err_drm_err:
641 	drm_err(dev, "Failed to setup i915 fbdev emulation (ret=%d)\n", ret);
642 	return ret;
643 }
644 
645 static const struct drm_client_funcs intel_fbdev_client_funcs = {
646 	.owner		= THIS_MODULE,
647 	.unregister	= intel_fbdev_client_unregister,
648 	.restore	= intel_fbdev_client_restore,
649 	.hotplug	= intel_fbdev_client_hotplug,
650 };
651 
652 void intel_fbdev_setup(struct drm_i915_private *i915)
653 {
654 	struct drm_device *dev = &i915->drm;
655 	struct intel_fbdev *ifbdev;
656 	int ret;
657 
658 	if (!HAS_DISPLAY(i915))
659 		return;
660 
661 	ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
662 	if (!ifbdev)
663 		return;
664 	drm_fb_helper_prepare(dev, &ifbdev->helper, 32, &intel_fb_helper_funcs);
665 
666 	i915->display.fbdev.fbdev = ifbdev;
667 	INIT_WORK(&i915->display.fbdev.suspend_work, intel_fbdev_suspend_worker);
668 	mutex_init(&ifbdev->hpd_lock);
669 	if (intel_fbdev_init_bios(dev, ifbdev))
670 		ifbdev->helper.preferred_bpp = ifbdev->preferred_bpp;
671 	else
672 		ifbdev->preferred_bpp = ifbdev->helper.preferred_bpp;
673 
674 	ret = drm_client_init(dev, &ifbdev->helper.client, "intel-fbdev",
675 			      &intel_fbdev_client_funcs);
676 	if (ret) {
677 		drm_err(dev, "Failed to register client: %d\n", ret);
678 		goto err_drm_fb_helper_unprepare;
679 	}
680 
681 	drm_client_register(&ifbdev->helper.client);
682 
683 	return;
684 
685 err_drm_fb_helper_unprepare:
686 	drm_fb_helper_unprepare(&ifbdev->helper);
687 	mutex_destroy(&ifbdev->hpd_lock);
688 	kfree(ifbdev);
689 }
690 
691 struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev)
692 {
693 	if (!fbdev || !fbdev->helper.fb)
694 		return NULL;
695 
696 	return to_intel_framebuffer(fbdev->helper.fb);
697 }
698