xref: /linux/drivers/video/fbdev/hyperv_fb.c (revision 79997eda0d31bc68203c95ecb978773ee6ce7a1f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012, Microsoft Corporation.
4  *
5  * Author:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  */
8 
9 /*
10  * Hyper-V Synthetic Video Frame Buffer Driver
11  *
12  * This is the driver for the Hyper-V Synthetic Video, which supports
13  * screen resolution up to Full HD 1920x1080 with 32 bit color on Windows
14  * Server 2012, and 1600x1200 with 16 bit color on Windows Server 2008 R2
15  * or earlier.
16  *
17  * It also solves the double mouse cursor issue of the emulated video mode.
18  *
19  * The default screen resolution is 1152x864, which may be changed by a
20  * kernel parameter:
21  *     video=hyperv_fb:<width>x<height>
22  *     For example: video=hyperv_fb:1280x1024
23  *
24  * Portrait orientation is also supported:
25  *     For example: video=hyperv_fb:864x1152
26  *
27  * When a Windows 10 RS5+ host is used, the virtual machine screen
28  * resolution is obtained from the host. The "video=hyperv_fb" option is
29  * not needed, but still can be used to overwrite what the host specifies.
30  * The VM resolution on the host could be set by executing the powershell
31  * "set-vmvideo" command. For example
32  *     set-vmvideo -vmname name -horizontalresolution:1920 \
33  * -verticalresolution:1200 -resolutiontype single
34  *
35  * Gen 1 VMs also support direct using VM's physical memory for framebuffer.
36  * It could improve the efficiency and performance for framebuffer and VM.
37  * This requires to allocate contiguous physical memory from Linux kernel's
38  * CMA memory allocator. To enable this, supply a kernel parameter to give
39  * enough memory space to CMA allocator for framebuffer. For example:
40  *    cma=130m
41  * This gives 130MB memory to CMA allocator that can be allocated to
42  * framebuffer. For reference, 8K resolution (7680x4320) takes about
43  * 127MB memory.
44  */
45 
46 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
47 
48 #include <linux/aperture.h>
49 #include <linux/module.h>
50 #include <linux/kernel.h>
51 #include <linux/screen_info.h>
52 #include <linux/vmalloc.h>
53 #include <linux/init.h>
54 #include <linux/completion.h>
55 #include <linux/fb.h>
56 #include <linux/pci.h>
57 #include <linux/panic_notifier.h>
58 #include <linux/efi.h>
59 #include <linux/console.h>
60 
61 #include <linux/hyperv.h>
62 
63 /* Hyper-V Synthetic Video Protocol definitions and structures */
64 #define MAX_VMBUS_PKT_SIZE 0x4000
65 
66 #define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major))
67 /* Support for VERSION_WIN7 is removed. #define is retained for reference. */
68 #define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0)
69 #define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2)
70 #define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5)
71 
72 #define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff)
73 #define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16)
74 
75 #define SYNTHVID_DEPTH_WIN8 32
76 #define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
77 
78 enum pipe_msg_type {
79 	PIPE_MSG_INVALID,
80 	PIPE_MSG_DATA,
81 	PIPE_MSG_MAX
82 };
83 
84 struct pipe_msg_hdr {
85 	u32 type;
86 	u32 size; /* size of message after this field */
87 } __packed;
88 
89 
90 enum synthvid_msg_type {
91 	SYNTHVID_ERROR			= 0,
92 	SYNTHVID_VERSION_REQUEST	= 1,
93 	SYNTHVID_VERSION_RESPONSE	= 2,
94 	SYNTHVID_VRAM_LOCATION		= 3,
95 	SYNTHVID_VRAM_LOCATION_ACK	= 4,
96 	SYNTHVID_SITUATION_UPDATE	= 5,
97 	SYNTHVID_SITUATION_UPDATE_ACK	= 6,
98 	SYNTHVID_POINTER_POSITION	= 7,
99 	SYNTHVID_POINTER_SHAPE		= 8,
100 	SYNTHVID_FEATURE_CHANGE		= 9,
101 	SYNTHVID_DIRT			= 10,
102 	SYNTHVID_RESOLUTION_REQUEST	= 13,
103 	SYNTHVID_RESOLUTION_RESPONSE	= 14,
104 
105 	SYNTHVID_MAX			= 15
106 };
107 
108 #define		SYNTHVID_EDID_BLOCK_SIZE	128
109 #define		SYNTHVID_MAX_RESOLUTION_COUNT	64
110 
111 struct hvd_screen_info {
112 	u16 width;
113 	u16 height;
114 } __packed;
115 
116 struct synthvid_msg_hdr {
117 	u32 type;
118 	u32 size;  /* size of this header + payload after this field*/
119 } __packed;
120 
121 struct synthvid_version_req {
122 	u32 version;
123 } __packed;
124 
125 struct synthvid_version_resp {
126 	u32 version;
127 	u8 is_accepted;
128 	u8 max_video_outputs;
129 } __packed;
130 
131 struct synthvid_supported_resolution_req {
132 	u8 maximum_resolution_count;
133 } __packed;
134 
135 struct synthvid_supported_resolution_resp {
136 	u8 edid_block[SYNTHVID_EDID_BLOCK_SIZE];
137 	u8 resolution_count;
138 	u8 default_resolution_index;
139 	u8 is_standard;
140 	struct hvd_screen_info
141 		supported_resolution[SYNTHVID_MAX_RESOLUTION_COUNT];
142 } __packed;
143 
144 struct synthvid_vram_location {
145 	u64 user_ctx;
146 	u8 is_vram_gpa_specified;
147 	u64 vram_gpa;
148 } __packed;
149 
150 struct synthvid_vram_location_ack {
151 	u64 user_ctx;
152 } __packed;
153 
154 struct video_output_situation {
155 	u8 active;
156 	u32 vram_offset;
157 	u8 depth_bits;
158 	u32 width_pixels;
159 	u32 height_pixels;
160 	u32 pitch_bytes;
161 } __packed;
162 
163 struct synthvid_situation_update {
164 	u64 user_ctx;
165 	u8 video_output_count;
166 	struct video_output_situation video_output[1];
167 } __packed;
168 
169 struct synthvid_situation_update_ack {
170 	u64 user_ctx;
171 } __packed;
172 
173 struct synthvid_pointer_position {
174 	u8 is_visible;
175 	u8 video_output;
176 	s32 image_x;
177 	s32 image_y;
178 } __packed;
179 
180 
181 #define CURSOR_MAX_X 96
182 #define CURSOR_MAX_Y 96
183 #define CURSOR_ARGB_PIXEL_SIZE 4
184 #define CURSOR_MAX_SIZE (CURSOR_MAX_X * CURSOR_MAX_Y * CURSOR_ARGB_PIXEL_SIZE)
185 #define CURSOR_COMPLETE (-1)
186 
187 struct synthvid_pointer_shape {
188 	u8 part_idx;
189 	u8 is_argb;
190 	u32 width; /* CURSOR_MAX_X at most */
191 	u32 height; /* CURSOR_MAX_Y at most */
192 	u32 hot_x; /* hotspot relative to upper-left of pointer image */
193 	u32 hot_y;
194 	u8 data[4];
195 } __packed;
196 
197 struct synthvid_feature_change {
198 	u8 is_dirt_needed;
199 	u8 is_ptr_pos_needed;
200 	u8 is_ptr_shape_needed;
201 	u8 is_situ_needed;
202 } __packed;
203 
204 struct rect {
205 	s32 x1, y1; /* top left corner */
206 	s32 x2, y2; /* bottom right corner, exclusive */
207 } __packed;
208 
209 struct synthvid_dirt {
210 	u8 video_output;
211 	u8 dirt_count;
212 	struct rect rect[1];
213 } __packed;
214 
215 struct synthvid_msg {
216 	struct pipe_msg_hdr pipe_hdr;
217 	struct synthvid_msg_hdr vid_hdr;
218 	union {
219 		struct synthvid_version_req ver_req;
220 		struct synthvid_version_resp ver_resp;
221 		struct synthvid_vram_location vram;
222 		struct synthvid_vram_location_ack vram_ack;
223 		struct synthvid_situation_update situ;
224 		struct synthvid_situation_update_ack situ_ack;
225 		struct synthvid_pointer_position ptr_pos;
226 		struct synthvid_pointer_shape ptr_shape;
227 		struct synthvid_feature_change feature_chg;
228 		struct synthvid_dirt dirt;
229 		struct synthvid_supported_resolution_req resolution_req;
230 		struct synthvid_supported_resolution_resp resolution_resp;
231 	};
232 } __packed;
233 
234 
235 /* FB driver definitions and structures */
236 #define HVFB_WIDTH 1152 /* default screen width */
237 #define HVFB_HEIGHT 864 /* default screen height */
238 #define HVFB_WIDTH_MIN 640
239 #define HVFB_HEIGHT_MIN 480
240 
241 #define RING_BUFSIZE (256 * 1024)
242 #define VSP_TIMEOUT (10 * HZ)
243 #define HVFB_UPDATE_DELAY (HZ / 20)
244 #define HVFB_ONDEMAND_THROTTLE (HZ / 20)
245 
246 struct hvfb_par {
247 	struct fb_info *info;
248 	struct resource *mem;
249 	bool fb_ready; /* fb device is ready */
250 	struct completion wait;
251 	u32 synthvid_version;
252 
253 	struct delayed_work dwork;
254 	bool update;
255 	bool update_saved; /* The value of 'update' before hibernation */
256 
257 	u32 pseudo_palette[16];
258 	u8 init_buf[MAX_VMBUS_PKT_SIZE];
259 	u8 recv_buf[MAX_VMBUS_PKT_SIZE];
260 
261 	/* If true, the VSC notifies the VSP on every framebuffer change */
262 	bool synchronous_fb;
263 
264 	/* If true, need to copy from deferred IO mem to framebuffer mem */
265 	bool need_docopy;
266 
267 	struct notifier_block hvfb_panic_nb;
268 
269 	/* Memory for deferred IO and frame buffer itself */
270 	unsigned char *dio_vp;
271 	unsigned char *mmio_vp;
272 	phys_addr_t mmio_pp;
273 
274 	/* Dirty rectangle, protected by delayed_refresh_lock */
275 	int x1, y1, x2, y2;
276 	bool delayed_refresh;
277 	spinlock_t delayed_refresh_lock;
278 };
279 
280 static uint screen_width = HVFB_WIDTH;
281 static uint screen_height = HVFB_HEIGHT;
282 static uint screen_depth;
283 static uint screen_fb_size;
284 static uint dio_fb_size; /* FB size for deferred IO */
285 
286 /* Send message to Hyper-V host */
287 static inline int synthvid_send(struct hv_device *hdev,
288 				struct synthvid_msg *msg)
289 {
290 	static atomic64_t request_id = ATOMIC64_INIT(0);
291 	int ret;
292 
293 	msg->pipe_hdr.type = PIPE_MSG_DATA;
294 	msg->pipe_hdr.size = msg->vid_hdr.size;
295 
296 	ret = vmbus_sendpacket(hdev->channel, msg,
297 			       msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
298 			       atomic64_inc_return(&request_id),
299 			       VM_PKT_DATA_INBAND, 0);
300 
301 	if (ret)
302 		pr_err_ratelimited("Unable to send packet via vmbus; error %d\n", ret);
303 
304 	return ret;
305 }
306 
307 
308 /* Send screen resolution info to host */
309 static int synthvid_send_situ(struct hv_device *hdev)
310 {
311 	struct fb_info *info = hv_get_drvdata(hdev);
312 	struct synthvid_msg msg;
313 
314 	if (!info)
315 		return -ENODEV;
316 
317 	memset(&msg, 0, sizeof(struct synthvid_msg));
318 
319 	msg.vid_hdr.type = SYNTHVID_SITUATION_UPDATE;
320 	msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
321 		sizeof(struct synthvid_situation_update);
322 	msg.situ.user_ctx = 0;
323 	msg.situ.video_output_count = 1;
324 	msg.situ.video_output[0].active = 1;
325 	msg.situ.video_output[0].vram_offset = 0;
326 	msg.situ.video_output[0].depth_bits = info->var.bits_per_pixel;
327 	msg.situ.video_output[0].width_pixels = info->var.xres;
328 	msg.situ.video_output[0].height_pixels = info->var.yres;
329 	msg.situ.video_output[0].pitch_bytes = info->fix.line_length;
330 
331 	synthvid_send(hdev, &msg);
332 
333 	return 0;
334 }
335 
336 /* Send mouse pointer info to host */
337 static int synthvid_send_ptr(struct hv_device *hdev)
338 {
339 	struct synthvid_msg msg;
340 
341 	memset(&msg, 0, sizeof(struct synthvid_msg));
342 	msg.vid_hdr.type = SYNTHVID_POINTER_POSITION;
343 	msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
344 		sizeof(struct synthvid_pointer_position);
345 	msg.ptr_pos.is_visible = 1;
346 	msg.ptr_pos.video_output = 0;
347 	msg.ptr_pos.image_x = 0;
348 	msg.ptr_pos.image_y = 0;
349 	synthvid_send(hdev, &msg);
350 
351 	memset(&msg, 0, sizeof(struct synthvid_msg));
352 	msg.vid_hdr.type = SYNTHVID_POINTER_SHAPE;
353 	msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
354 		sizeof(struct synthvid_pointer_shape);
355 	msg.ptr_shape.part_idx = CURSOR_COMPLETE;
356 	msg.ptr_shape.is_argb = 1;
357 	msg.ptr_shape.width = 1;
358 	msg.ptr_shape.height = 1;
359 	msg.ptr_shape.hot_x = 0;
360 	msg.ptr_shape.hot_y = 0;
361 	msg.ptr_shape.data[0] = 0;
362 	msg.ptr_shape.data[1] = 1;
363 	msg.ptr_shape.data[2] = 1;
364 	msg.ptr_shape.data[3] = 1;
365 	synthvid_send(hdev, &msg);
366 
367 	return 0;
368 }
369 
370 /* Send updated screen area (dirty rectangle) location to host */
371 static int
372 synthvid_update(struct fb_info *info, int x1, int y1, int x2, int y2)
373 {
374 	struct hv_device *hdev = device_to_hv_device(info->device);
375 	struct synthvid_msg msg;
376 
377 	memset(&msg, 0, sizeof(struct synthvid_msg));
378 	if (x2 == INT_MAX)
379 		x2 = info->var.xres;
380 	if (y2 == INT_MAX)
381 		y2 = info->var.yres;
382 
383 	msg.vid_hdr.type = SYNTHVID_DIRT;
384 	msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
385 		sizeof(struct synthvid_dirt);
386 	msg.dirt.video_output = 0;
387 	msg.dirt.dirt_count = 1;
388 	msg.dirt.rect[0].x1 = (x1 > x2) ? 0 : x1;
389 	msg.dirt.rect[0].y1 = (y1 > y2) ? 0 : y1;
390 	msg.dirt.rect[0].x2 =
391 		(x2 < x1 || x2 > info->var.xres) ? info->var.xres : x2;
392 	msg.dirt.rect[0].y2 =
393 		(y2 < y1 || y2 > info->var.yres) ? info->var.yres : y2;
394 
395 	synthvid_send(hdev, &msg);
396 
397 	return 0;
398 }
399 
400 static void hvfb_docopy(struct hvfb_par *par,
401 			unsigned long offset,
402 			unsigned long size)
403 {
404 	if (!par || !par->mmio_vp || !par->dio_vp || !par->fb_ready ||
405 	    size == 0 || offset >= dio_fb_size)
406 		return;
407 
408 	if (offset + size > dio_fb_size)
409 		size = dio_fb_size - offset;
410 
411 	memcpy(par->mmio_vp + offset, par->dio_vp + offset, size);
412 }
413 
414 /* Deferred IO callback */
415 static void synthvid_deferred_io(struct fb_info *p, struct list_head *pagereflist)
416 {
417 	struct hvfb_par *par = p->par;
418 	struct fb_deferred_io_pageref *pageref;
419 	unsigned long start, end;
420 	int y1, y2, miny, maxy;
421 
422 	miny = INT_MAX;
423 	maxy = 0;
424 
425 	/*
426 	 * Merge dirty pages. It is possible that last page cross
427 	 * over the end of frame buffer row yres. This is taken care of
428 	 * in synthvid_update function by clamping the y2
429 	 * value to yres.
430 	 */
431 	list_for_each_entry(pageref, pagereflist, list) {
432 		start = pageref->offset;
433 		end = start + PAGE_SIZE - 1;
434 		y1 = start / p->fix.line_length;
435 		y2 = end / p->fix.line_length;
436 		miny = min_t(int, miny, y1);
437 		maxy = max_t(int, maxy, y2);
438 
439 		/* Copy from dio space to mmio address */
440 		if (par->fb_ready && par->need_docopy)
441 			hvfb_docopy(par, start, PAGE_SIZE);
442 	}
443 
444 	if (par->fb_ready && par->update)
445 		synthvid_update(p, 0, miny, p->var.xres, maxy + 1);
446 }
447 
448 static struct fb_deferred_io synthvid_defio = {
449 	.delay		= HZ / 20,
450 	.deferred_io	= synthvid_deferred_io,
451 };
452 
453 /*
454  * Actions on received messages from host:
455  * Complete the wait event.
456  * Or, reply with screen and cursor info.
457  */
458 static void synthvid_recv_sub(struct hv_device *hdev)
459 {
460 	struct fb_info *info = hv_get_drvdata(hdev);
461 	struct hvfb_par *par;
462 	struct synthvid_msg *msg;
463 
464 	if (!info)
465 		return;
466 
467 	par = info->par;
468 	msg = (struct synthvid_msg *)par->recv_buf;
469 
470 	/* Complete the wait event */
471 	if (msg->vid_hdr.type == SYNTHVID_VERSION_RESPONSE ||
472 	    msg->vid_hdr.type == SYNTHVID_RESOLUTION_RESPONSE ||
473 	    msg->vid_hdr.type == SYNTHVID_VRAM_LOCATION_ACK) {
474 		memcpy(par->init_buf, msg, MAX_VMBUS_PKT_SIZE);
475 		complete(&par->wait);
476 		return;
477 	}
478 
479 	/* Reply with screen and cursor info */
480 	if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) {
481 		if (par->fb_ready) {
482 			synthvid_send_ptr(hdev);
483 			synthvid_send_situ(hdev);
484 		}
485 
486 		par->update = msg->feature_chg.is_dirt_needed;
487 		if (par->update)
488 			schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
489 	}
490 }
491 
492 /* Receive callback for messages from the host */
493 static void synthvid_receive(void *ctx)
494 {
495 	struct hv_device *hdev = ctx;
496 	struct fb_info *info = hv_get_drvdata(hdev);
497 	struct hvfb_par *par;
498 	struct synthvid_msg *recv_buf;
499 	u32 bytes_recvd;
500 	u64 req_id;
501 	int ret;
502 
503 	if (!info)
504 		return;
505 
506 	par = info->par;
507 	recv_buf = (struct synthvid_msg *)par->recv_buf;
508 
509 	do {
510 		ret = vmbus_recvpacket(hdev->channel, recv_buf,
511 				       MAX_VMBUS_PKT_SIZE,
512 				       &bytes_recvd, &req_id);
513 		if (bytes_recvd > 0 &&
514 		    recv_buf->pipe_hdr.type == PIPE_MSG_DATA)
515 			synthvid_recv_sub(hdev);
516 	} while (bytes_recvd > 0 && ret == 0);
517 }
518 
519 /* Check if the ver1 version is equal or greater than ver2 */
520 static inline bool synthvid_ver_ge(u32 ver1, u32 ver2)
521 {
522 	if (SYNTHVID_VER_GET_MAJOR(ver1) > SYNTHVID_VER_GET_MAJOR(ver2) ||
523 	    (SYNTHVID_VER_GET_MAJOR(ver1) == SYNTHVID_VER_GET_MAJOR(ver2) &&
524 	     SYNTHVID_VER_GET_MINOR(ver1) >= SYNTHVID_VER_GET_MINOR(ver2)))
525 		return true;
526 
527 	return false;
528 }
529 
530 /* Check synthetic video protocol version with the host */
531 static int synthvid_negotiate_ver(struct hv_device *hdev, u32 ver)
532 {
533 	struct fb_info *info = hv_get_drvdata(hdev);
534 	struct hvfb_par *par = info->par;
535 	struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf;
536 	int ret = 0;
537 	unsigned long t;
538 
539 	memset(msg, 0, sizeof(struct synthvid_msg));
540 	msg->vid_hdr.type = SYNTHVID_VERSION_REQUEST;
541 	msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
542 		sizeof(struct synthvid_version_req);
543 	msg->ver_req.version = ver;
544 	synthvid_send(hdev, msg);
545 
546 	t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
547 	if (!t) {
548 		pr_err("Time out on waiting version response\n");
549 		ret = -ETIMEDOUT;
550 		goto out;
551 	}
552 	if (!msg->ver_resp.is_accepted) {
553 		ret = -ENODEV;
554 		goto out;
555 	}
556 
557 	par->synthvid_version = ver;
558 	pr_info("Synthvid Version major %d, minor %d\n",
559 		SYNTHVID_VER_GET_MAJOR(ver), SYNTHVID_VER_GET_MINOR(ver));
560 
561 out:
562 	return ret;
563 }
564 
565 /* Get current resolution from the host */
566 static int synthvid_get_supported_resolution(struct hv_device *hdev)
567 {
568 	struct fb_info *info = hv_get_drvdata(hdev);
569 	struct hvfb_par *par = info->par;
570 	struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf;
571 	int ret = 0;
572 	unsigned long t;
573 	u8 index;
574 
575 	memset(msg, 0, sizeof(struct synthvid_msg));
576 	msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
577 	msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
578 		sizeof(struct synthvid_supported_resolution_req);
579 
580 	msg->resolution_req.maximum_resolution_count =
581 		SYNTHVID_MAX_RESOLUTION_COUNT;
582 	synthvid_send(hdev, msg);
583 
584 	t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
585 	if (!t) {
586 		pr_err("Time out on waiting resolution response\n");
587 		ret = -ETIMEDOUT;
588 		goto out;
589 	}
590 
591 	if (msg->resolution_resp.resolution_count == 0) {
592 		pr_err("No supported resolutions\n");
593 		ret = -ENODEV;
594 		goto out;
595 	}
596 
597 	index = msg->resolution_resp.default_resolution_index;
598 	if (index >= msg->resolution_resp.resolution_count) {
599 		pr_err("Invalid resolution index: %d\n", index);
600 		ret = -ENODEV;
601 		goto out;
602 	}
603 
604 	screen_width =
605 		msg->resolution_resp.supported_resolution[index].width;
606 	screen_height =
607 		msg->resolution_resp.supported_resolution[index].height;
608 
609 out:
610 	return ret;
611 }
612 
613 /* Connect to VSP (Virtual Service Provider) on host */
614 static int synthvid_connect_vsp(struct hv_device *hdev)
615 {
616 	struct fb_info *info = hv_get_drvdata(hdev);
617 	struct hvfb_par *par = info->par;
618 	int ret;
619 
620 	ret = vmbus_open(hdev->channel, RING_BUFSIZE, RING_BUFSIZE,
621 			 NULL, 0, synthvid_receive, hdev);
622 	if (ret) {
623 		pr_err("Unable to open vmbus channel\n");
624 		return ret;
625 	}
626 
627 	/* Negotiate the protocol version with host */
628 	switch (vmbus_proto_version) {
629 	case VERSION_WIN10:
630 	case VERSION_WIN10_V5:
631 		ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10);
632 		if (!ret)
633 			break;
634 		fallthrough;
635 	case VERSION_WIN8:
636 	case VERSION_WIN8_1:
637 		ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN8);
638 		break;
639 	default:
640 		ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10);
641 		break;
642 	}
643 
644 	if (ret) {
645 		pr_err("Synthetic video device version not accepted\n");
646 		goto error;
647 	}
648 
649 	screen_depth = SYNTHVID_DEPTH_WIN8;
650 	if (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10)) {
651 		ret = synthvid_get_supported_resolution(hdev);
652 		if (ret)
653 			pr_info("Failed to get supported resolution from host, use default\n");
654 	}
655 
656 	screen_fb_size = hdev->channel->offermsg.offer.
657 				mmio_megabytes * 1024 * 1024;
658 
659 	return 0;
660 
661 error:
662 	vmbus_close(hdev->channel);
663 	return ret;
664 }
665 
666 /* Send VRAM and Situation messages to the host */
667 static int synthvid_send_config(struct hv_device *hdev)
668 {
669 	struct fb_info *info = hv_get_drvdata(hdev);
670 	struct hvfb_par *par = info->par;
671 	struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf;
672 	int ret = 0;
673 	unsigned long t;
674 
675 	/* Send VRAM location */
676 	memset(msg, 0, sizeof(struct synthvid_msg));
677 	msg->vid_hdr.type = SYNTHVID_VRAM_LOCATION;
678 	msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
679 		sizeof(struct synthvid_vram_location);
680 	msg->vram.user_ctx = msg->vram.vram_gpa = par->mmio_pp;
681 	msg->vram.is_vram_gpa_specified = 1;
682 	synthvid_send(hdev, msg);
683 
684 	t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
685 	if (!t) {
686 		pr_err("Time out on waiting vram location ack\n");
687 		ret = -ETIMEDOUT;
688 		goto out;
689 	}
690 	if (msg->vram_ack.user_ctx != par->mmio_pp) {
691 		pr_err("Unable to set VRAM location\n");
692 		ret = -ENODEV;
693 		goto out;
694 	}
695 
696 	/* Send pointer and situation update */
697 	synthvid_send_ptr(hdev);
698 	synthvid_send_situ(hdev);
699 
700 out:
701 	return ret;
702 }
703 
704 
705 /*
706  * Delayed work callback:
707  * It is scheduled to call whenever update request is received and it has
708  * not been called in last HVFB_ONDEMAND_THROTTLE time interval.
709  */
710 static void hvfb_update_work(struct work_struct *w)
711 {
712 	struct hvfb_par *par = container_of(w, struct hvfb_par, dwork.work);
713 	struct fb_info *info = par->info;
714 	unsigned long flags;
715 	int x1, x2, y1, y2;
716 	int j;
717 
718 	spin_lock_irqsave(&par->delayed_refresh_lock, flags);
719 	/* Reset the request flag */
720 	par->delayed_refresh = false;
721 
722 	/* Store the dirty rectangle to local variables */
723 	x1 = par->x1;
724 	x2 = par->x2;
725 	y1 = par->y1;
726 	y2 = par->y2;
727 
728 	/* Clear dirty rectangle */
729 	par->x1 = par->y1 = INT_MAX;
730 	par->x2 = par->y2 = 0;
731 
732 	spin_unlock_irqrestore(&par->delayed_refresh_lock, flags);
733 
734 	if (x1 > info->var.xres || x2 > info->var.xres ||
735 	    y1 > info->var.yres || y2 > info->var.yres || x2 <= x1)
736 		return;
737 
738 	/* Copy the dirty rectangle to frame buffer memory */
739 	if (par->need_docopy)
740 		for (j = y1; j < y2; j++)
741 			hvfb_docopy(par,
742 				    j * info->fix.line_length +
743 				    (x1 * screen_depth / 8),
744 				    (x2 - x1) * screen_depth / 8);
745 
746 	/* Refresh */
747 	if (par->fb_ready && par->update)
748 		synthvid_update(info, x1, y1, x2, y2);
749 }
750 
751 /*
752  * Control the on-demand refresh frequency. It schedules a delayed
753  * screen update if it has not yet.
754  */
755 static void hvfb_ondemand_refresh_throttle(struct hvfb_par *par,
756 					   int x1, int y1, int w, int h)
757 {
758 	unsigned long flags;
759 	int x2 = x1 + w;
760 	int y2 = y1 + h;
761 
762 	spin_lock_irqsave(&par->delayed_refresh_lock, flags);
763 
764 	/* Merge dirty rectangle */
765 	par->x1 = min_t(int, par->x1, x1);
766 	par->y1 = min_t(int, par->y1, y1);
767 	par->x2 = max_t(int, par->x2, x2);
768 	par->y2 = max_t(int, par->y2, y2);
769 
770 	/* Schedule a delayed screen update if not yet */
771 	if (par->delayed_refresh == false) {
772 		schedule_delayed_work(&par->dwork,
773 				      HVFB_ONDEMAND_THROTTLE);
774 		par->delayed_refresh = true;
775 	}
776 
777 	spin_unlock_irqrestore(&par->delayed_refresh_lock, flags);
778 }
779 
780 static int hvfb_on_panic(struct notifier_block *nb,
781 			 unsigned long e, void *p)
782 {
783 	struct hv_device *hdev;
784 	struct hvfb_par *par;
785 	struct fb_info *info;
786 
787 	par = container_of(nb, struct hvfb_par, hvfb_panic_nb);
788 	info = par->info;
789 	hdev = device_to_hv_device(info->device);
790 
791 	if (hv_ringbuffer_spinlock_busy(hdev->channel))
792 		return NOTIFY_DONE;
793 
794 	par->synchronous_fb = true;
795 	if (par->need_docopy)
796 		hvfb_docopy(par, 0, dio_fb_size);
797 	synthvid_update(info, 0, 0, INT_MAX, INT_MAX);
798 
799 	return NOTIFY_DONE;
800 }
801 
802 /* Framebuffer operation handlers */
803 
804 static int hvfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
805 {
806 	if (var->xres < HVFB_WIDTH_MIN || var->yres < HVFB_HEIGHT_MIN ||
807 	    var->xres > screen_width || var->yres >  screen_height ||
808 	    var->bits_per_pixel != screen_depth)
809 		return -EINVAL;
810 
811 	var->xres_virtual = var->xres;
812 	var->yres_virtual = var->yres;
813 
814 	return 0;
815 }
816 
817 static int hvfb_set_par(struct fb_info *info)
818 {
819 	struct hv_device *hdev = device_to_hv_device(info->device);
820 
821 	return synthvid_send_situ(hdev);
822 }
823 
824 
825 static inline u32 chan_to_field(u32 chan, struct fb_bitfield *bf)
826 {
827 	return ((chan & 0xffff) >> (16 - bf->length)) << bf->offset;
828 }
829 
830 static int hvfb_setcolreg(unsigned regno, unsigned red, unsigned green,
831 			  unsigned blue, unsigned transp, struct fb_info *info)
832 {
833 	u32 *pal = info->pseudo_palette;
834 
835 	if (regno > 15)
836 		return -EINVAL;
837 
838 	pal[regno] = chan_to_field(red, &info->var.red)
839 		| chan_to_field(green, &info->var.green)
840 		| chan_to_field(blue, &info->var.blue)
841 		| chan_to_field(transp, &info->var.transp);
842 
843 	return 0;
844 }
845 
846 static int hvfb_blank(int blank, struct fb_info *info)
847 {
848 	return 1;	/* get fb_blank to set the colormap to all black */
849 }
850 
851 static void hvfb_ops_damage_range(struct fb_info *info, off_t off, size_t len)
852 {
853 	/* TODO: implement damage handling */
854 }
855 
856 static void hvfb_ops_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u32 height)
857 {
858 	struct hvfb_par *par = info->par;
859 
860 	if (par->synchronous_fb)
861 		synthvid_update(info, 0, 0, INT_MAX, INT_MAX);
862 	else
863 		hvfb_ondemand_refresh_throttle(par, x, y, width, height);
864 }
865 
866 /*
867  * TODO: GEN1 codepaths allocate from system or DMA-able memory. Fix the
868  *       driver to use the _SYSMEM_ or _DMAMEM_ helpers in these cases.
869  */
870 FB_GEN_DEFAULT_DEFERRED_IOMEM_OPS(hvfb_ops,
871 				  hvfb_ops_damage_range,
872 				  hvfb_ops_damage_area)
873 
874 static const struct fb_ops hvfb_ops = {
875 	.owner = THIS_MODULE,
876 	FB_DEFAULT_DEFERRED_OPS(hvfb_ops),
877 	.fb_check_var = hvfb_check_var,
878 	.fb_set_par = hvfb_set_par,
879 	.fb_setcolreg = hvfb_setcolreg,
880 	.fb_blank = hvfb_blank,
881 };
882 
883 /* Get options from kernel paramenter "video=" */
884 static void hvfb_get_option(struct fb_info *info)
885 {
886 	struct hvfb_par *par = info->par;
887 	char *opt = NULL, *p;
888 	uint x = 0, y = 0;
889 
890 	if (fb_get_options(KBUILD_MODNAME, &opt) || !opt || !*opt)
891 		return;
892 
893 	p = strsep(&opt, "x");
894 	if (!*p || kstrtouint(p, 0, &x) ||
895 	    !opt || !*opt || kstrtouint(opt, 0, &y)) {
896 		pr_err("Screen option is invalid: skipped\n");
897 		return;
898 	}
899 
900 	if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN ||
901 	    (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) &&
902 	    (x * y * screen_depth / 8 > screen_fb_size)) ||
903 	    (par->synthvid_version == SYNTHVID_VERSION_WIN8 &&
904 	     x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8)) {
905 		pr_err("Screen resolution option is out of range: skipped\n");
906 		return;
907 	}
908 
909 	screen_width = x;
910 	screen_height = y;
911 	return;
912 }
913 
914 /*
915  * Allocate enough contiguous physical memory.
916  * Return physical address if succeeded or -1 if failed.
917  */
918 static phys_addr_t hvfb_get_phymem(struct hv_device *hdev,
919 				   unsigned int request_size)
920 {
921 	struct page *page = NULL;
922 	dma_addr_t dma_handle;
923 	void *vmem;
924 	phys_addr_t paddr = 0;
925 	unsigned int order = get_order(request_size);
926 
927 	if (request_size == 0)
928 		return -1;
929 
930 	if (order <= MAX_ORDER) {
931 		/* Call alloc_pages if the size is less than 2^MAX_ORDER */
932 		page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
933 		if (!page)
934 			return -1;
935 
936 		paddr = (page_to_pfn(page) << PAGE_SHIFT);
937 	} else {
938 		/* Allocate from CMA */
939 		hdev->device.coherent_dma_mask = DMA_BIT_MASK(64);
940 
941 		vmem = dma_alloc_coherent(&hdev->device,
942 					  round_up(request_size, PAGE_SIZE),
943 					  &dma_handle,
944 					  GFP_KERNEL | __GFP_NOWARN);
945 
946 		if (!vmem)
947 			return -1;
948 
949 		paddr = virt_to_phys(vmem);
950 	}
951 
952 	return paddr;
953 }
954 
955 /* Release contiguous physical memory */
956 static void hvfb_release_phymem(struct hv_device *hdev,
957 				phys_addr_t paddr, unsigned int size)
958 {
959 	unsigned int order = get_order(size);
960 
961 	if (order <= MAX_ORDER)
962 		__free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order);
963 	else
964 		dma_free_coherent(&hdev->device,
965 				  round_up(size, PAGE_SIZE),
966 				  phys_to_virt(paddr),
967 				  paddr);
968 }
969 
970 
971 /* Get framebuffer memory from Hyper-V video pci space */
972 static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
973 {
974 	struct hvfb_par *par = info->par;
975 	struct pci_dev *pdev  = NULL;
976 	void __iomem *fb_virt;
977 	int gen2vm = efi_enabled(EFI_BOOT);
978 	resource_size_t base, size;
979 	phys_addr_t paddr;
980 	int ret;
981 
982 	if (!gen2vm) {
983 		pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
984 			PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
985 		if (!pdev) {
986 			pr_err("Unable to find PCI Hyper-V video\n");
987 			return -ENODEV;
988 		}
989 
990 		base = pci_resource_start(pdev, 0);
991 		size = pci_resource_len(pdev, 0);
992 
993 		/*
994 		 * For Gen 1 VM, we can directly use the contiguous memory
995 		 * from VM. If we succeed, deferred IO happens directly
996 		 * on this allocated framebuffer memory, avoiding extra
997 		 * memory copy.
998 		 */
999 		paddr = hvfb_get_phymem(hdev, screen_fb_size);
1000 		if (paddr != (phys_addr_t) -1) {
1001 			par->mmio_pp = paddr;
1002 			par->mmio_vp = par->dio_vp = __va(paddr);
1003 
1004 			info->fix.smem_start = paddr;
1005 			info->fix.smem_len = screen_fb_size;
1006 			info->screen_base = par->mmio_vp;
1007 			info->screen_size = screen_fb_size;
1008 
1009 			par->need_docopy = false;
1010 			goto getmem_done;
1011 		}
1012 		pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
1013 	} else if (IS_ENABLED(CONFIG_SYSFB)) {
1014 		base = screen_info.lfb_base;
1015 		size = screen_info.lfb_size;
1016 	} else {
1017 		goto err1;
1018 	}
1019 
1020 	/*
1021 	 * Cannot use the contiguous physical memory.
1022 	 * Allocate mmio space for framebuffer.
1023 	 */
1024 	dio_fb_size =
1025 		screen_width * screen_height * screen_depth / 8;
1026 
1027 	ret = vmbus_allocate_mmio(&par->mem, hdev, 0, -1,
1028 				  screen_fb_size, 0x100000, true);
1029 	if (ret != 0) {
1030 		pr_err("Unable to allocate framebuffer memory\n");
1031 		goto err1;
1032 	}
1033 
1034 	/*
1035 	 * Map the VRAM cacheable for performance. This is also required for
1036 	 * VM Connect to display properly for ARM64 Linux VM, as the host also
1037 	 * maps the VRAM cacheable.
1038 	 */
1039 	fb_virt = ioremap_cache(par->mem->start, screen_fb_size);
1040 	if (!fb_virt)
1041 		goto err2;
1042 
1043 	/* Allocate memory for deferred IO */
1044 	par->dio_vp = vzalloc(round_up(dio_fb_size, PAGE_SIZE));
1045 	if (par->dio_vp == NULL)
1046 		goto err3;
1047 
1048 	/* Physical address of FB device */
1049 	par->mmio_pp = par->mem->start;
1050 	/* Virtual address of FB device */
1051 	par->mmio_vp = (unsigned char *) fb_virt;
1052 
1053 	info->fix.smem_start = par->mem->start;
1054 	info->fix.smem_len = dio_fb_size;
1055 	info->screen_base = par->dio_vp;
1056 	info->screen_size = dio_fb_size;
1057 
1058 getmem_done:
1059 	aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME);
1060 
1061 	if (!gen2vm) {
1062 		pci_dev_put(pdev);
1063 	} else if (IS_ENABLED(CONFIG_SYSFB)) {
1064 		/* framebuffer is reallocated, clear screen_info to avoid misuse from kexec */
1065 		screen_info.lfb_size = 0;
1066 		screen_info.lfb_base = 0;
1067 		screen_info.orig_video_isVGA = 0;
1068 	}
1069 
1070 	return 0;
1071 
1072 err3:
1073 	iounmap(fb_virt);
1074 err2:
1075 	vmbus_free_mmio(par->mem->start, screen_fb_size);
1076 	par->mem = NULL;
1077 err1:
1078 	if (!gen2vm)
1079 		pci_dev_put(pdev);
1080 
1081 	return -ENOMEM;
1082 }
1083 
1084 /* Release the framebuffer */
1085 static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info)
1086 {
1087 	struct hvfb_par *par = info->par;
1088 
1089 	if (par->need_docopy) {
1090 		vfree(par->dio_vp);
1091 		iounmap(info->screen_base);
1092 		vmbus_free_mmio(par->mem->start, screen_fb_size);
1093 	} else {
1094 		hvfb_release_phymem(hdev, info->fix.smem_start,
1095 				    screen_fb_size);
1096 	}
1097 
1098 	par->mem = NULL;
1099 }
1100 
1101 
1102 static int hvfb_probe(struct hv_device *hdev,
1103 		      const struct hv_vmbus_device_id *dev_id)
1104 {
1105 	struct fb_info *info;
1106 	struct hvfb_par *par;
1107 	int ret;
1108 
1109 	info = framebuffer_alloc(sizeof(struct hvfb_par), &hdev->device);
1110 	if (!info)
1111 		return -ENOMEM;
1112 
1113 	par = info->par;
1114 	par->info = info;
1115 	par->fb_ready = false;
1116 	par->need_docopy = true;
1117 	init_completion(&par->wait);
1118 	INIT_DELAYED_WORK(&par->dwork, hvfb_update_work);
1119 
1120 	par->delayed_refresh = false;
1121 	spin_lock_init(&par->delayed_refresh_lock);
1122 	par->x1 = par->y1 = INT_MAX;
1123 	par->x2 = par->y2 = 0;
1124 
1125 	/* Connect to VSP */
1126 	hv_set_drvdata(hdev, info);
1127 	ret = synthvid_connect_vsp(hdev);
1128 	if (ret) {
1129 		pr_err("Unable to connect to VSP\n");
1130 		goto error1;
1131 	}
1132 
1133 	hvfb_get_option(info);
1134 	pr_info("Screen resolution: %dx%d, Color depth: %d, Frame buffer size: %d\n",
1135 		screen_width, screen_height, screen_depth, screen_fb_size);
1136 
1137 	ret = hvfb_getmem(hdev, info);
1138 	if (ret) {
1139 		pr_err("No memory for framebuffer\n");
1140 		goto error2;
1141 	}
1142 
1143 	/* Set up fb_info */
1144 	info->var.xres_virtual = info->var.xres = screen_width;
1145 	info->var.yres_virtual = info->var.yres = screen_height;
1146 	info->var.bits_per_pixel = screen_depth;
1147 
1148 	if (info->var.bits_per_pixel == 16) {
1149 		info->var.red = (struct fb_bitfield){11, 5, 0};
1150 		info->var.green = (struct fb_bitfield){5, 6, 0};
1151 		info->var.blue = (struct fb_bitfield){0, 5, 0};
1152 		info->var.transp = (struct fb_bitfield){0, 0, 0};
1153 	} else {
1154 		info->var.red = (struct fb_bitfield){16, 8, 0};
1155 		info->var.green = (struct fb_bitfield){8, 8, 0};
1156 		info->var.blue = (struct fb_bitfield){0, 8, 0};
1157 		info->var.transp = (struct fb_bitfield){24, 8, 0};
1158 	}
1159 
1160 	info->var.activate = FB_ACTIVATE_NOW;
1161 	info->var.height = -1;
1162 	info->var.width = -1;
1163 	info->var.vmode = FB_VMODE_NONINTERLACED;
1164 
1165 	strcpy(info->fix.id, KBUILD_MODNAME);
1166 	info->fix.type = FB_TYPE_PACKED_PIXELS;
1167 	info->fix.visual = FB_VISUAL_TRUECOLOR;
1168 	info->fix.line_length = screen_width * screen_depth / 8;
1169 	info->fix.accel = FB_ACCEL_NONE;
1170 
1171 	info->fbops = &hvfb_ops;
1172 	info->pseudo_palette = par->pseudo_palette;
1173 
1174 	/* Initialize deferred IO */
1175 	info->fbdefio = &synthvid_defio;
1176 	fb_deferred_io_init(info);
1177 
1178 	/* Send config to host */
1179 	ret = synthvid_send_config(hdev);
1180 	if (ret)
1181 		goto error;
1182 
1183 	ret = register_framebuffer(info);
1184 	if (ret) {
1185 		pr_err("Unable to register framebuffer\n");
1186 		goto error;
1187 	}
1188 
1189 	par->fb_ready = true;
1190 
1191 	par->synchronous_fb = false;
1192 
1193 	/*
1194 	 * We need to be sure this panic notifier runs _before_ the
1195 	 * vmbus disconnect, so order it by priority. It must execute
1196 	 * before the function hv_panic_vmbus_unload() [drivers/hv/vmbus_drv.c],
1197 	 * which is almost at the end of list, with priority = INT_MIN + 1.
1198 	 */
1199 	par->hvfb_panic_nb.notifier_call = hvfb_on_panic;
1200 	par->hvfb_panic_nb.priority = INT_MIN + 10,
1201 	atomic_notifier_chain_register(&panic_notifier_list,
1202 				       &par->hvfb_panic_nb);
1203 
1204 	return 0;
1205 
1206 error:
1207 	fb_deferred_io_cleanup(info);
1208 	hvfb_putmem(hdev, info);
1209 error2:
1210 	vmbus_close(hdev->channel);
1211 error1:
1212 	cancel_delayed_work_sync(&par->dwork);
1213 	hv_set_drvdata(hdev, NULL);
1214 	framebuffer_release(info);
1215 	return ret;
1216 }
1217 
1218 static void hvfb_remove(struct hv_device *hdev)
1219 {
1220 	struct fb_info *info = hv_get_drvdata(hdev);
1221 	struct hvfb_par *par = info->par;
1222 
1223 	atomic_notifier_chain_unregister(&panic_notifier_list,
1224 					 &par->hvfb_panic_nb);
1225 
1226 	par->update = false;
1227 	par->fb_ready = false;
1228 
1229 	fb_deferred_io_cleanup(info);
1230 
1231 	unregister_framebuffer(info);
1232 	cancel_delayed_work_sync(&par->dwork);
1233 
1234 	vmbus_close(hdev->channel);
1235 	hv_set_drvdata(hdev, NULL);
1236 
1237 	hvfb_putmem(hdev, info);
1238 	framebuffer_release(info);
1239 }
1240 
1241 static int hvfb_suspend(struct hv_device *hdev)
1242 {
1243 	struct fb_info *info = hv_get_drvdata(hdev);
1244 	struct hvfb_par *par = info->par;
1245 
1246 	console_lock();
1247 
1248 	/* 1 means do suspend */
1249 	fb_set_suspend(info, 1);
1250 
1251 	cancel_delayed_work_sync(&par->dwork);
1252 	cancel_delayed_work_sync(&info->deferred_work);
1253 
1254 	par->update_saved = par->update;
1255 	par->update = false;
1256 	par->fb_ready = false;
1257 
1258 	vmbus_close(hdev->channel);
1259 
1260 	console_unlock();
1261 
1262 	return 0;
1263 }
1264 
1265 static int hvfb_resume(struct hv_device *hdev)
1266 {
1267 	struct fb_info *info = hv_get_drvdata(hdev);
1268 	struct hvfb_par *par = info->par;
1269 	int ret;
1270 
1271 	console_lock();
1272 
1273 	ret = synthvid_connect_vsp(hdev);
1274 	if (ret != 0)
1275 		goto out;
1276 
1277 	ret = synthvid_send_config(hdev);
1278 	if (ret != 0) {
1279 		vmbus_close(hdev->channel);
1280 		goto out;
1281 	}
1282 
1283 	par->fb_ready = true;
1284 	par->update = par->update_saved;
1285 
1286 	schedule_delayed_work(&info->deferred_work, info->fbdefio->delay);
1287 	schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
1288 
1289 	/* 0 means do resume */
1290 	fb_set_suspend(info, 0);
1291 
1292 out:
1293 	console_unlock();
1294 
1295 	return ret;
1296 }
1297 
1298 
1299 static const struct pci_device_id pci_stub_id_table[] = {
1300 	{
1301 		.vendor      = PCI_VENDOR_ID_MICROSOFT,
1302 		.device      = PCI_DEVICE_ID_HYPERV_VIDEO,
1303 	},
1304 	{ /* end of list */ }
1305 };
1306 
1307 static const struct hv_vmbus_device_id id_table[] = {
1308 	/* Synthetic Video Device GUID */
1309 	{HV_SYNTHVID_GUID},
1310 	{}
1311 };
1312 
1313 MODULE_DEVICE_TABLE(pci, pci_stub_id_table);
1314 MODULE_DEVICE_TABLE(vmbus, id_table);
1315 
1316 static struct hv_driver hvfb_drv = {
1317 	.name = KBUILD_MODNAME,
1318 	.id_table = id_table,
1319 	.probe = hvfb_probe,
1320 	.remove = hvfb_remove,
1321 	.suspend = hvfb_suspend,
1322 	.resume = hvfb_resume,
1323 	.driver = {
1324 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1325 	},
1326 };
1327 
1328 static int hvfb_pci_stub_probe(struct pci_dev *pdev,
1329 			       const struct pci_device_id *ent)
1330 {
1331 	return 0;
1332 }
1333 
1334 static void hvfb_pci_stub_remove(struct pci_dev *pdev)
1335 {
1336 }
1337 
1338 static struct pci_driver hvfb_pci_stub_driver = {
1339 	.name =		KBUILD_MODNAME,
1340 	.id_table =	pci_stub_id_table,
1341 	.probe =	hvfb_pci_stub_probe,
1342 	.remove =	hvfb_pci_stub_remove,
1343 	.driver = {
1344 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1345 	}
1346 };
1347 
1348 static int __init hvfb_drv_init(void)
1349 {
1350 	int ret;
1351 
1352 	if (fb_modesetting_disabled("hyper_fb"))
1353 		return -ENODEV;
1354 
1355 	ret = vmbus_driver_register(&hvfb_drv);
1356 	if (ret != 0)
1357 		return ret;
1358 
1359 	ret = pci_register_driver(&hvfb_pci_stub_driver);
1360 	if (ret != 0) {
1361 		vmbus_driver_unregister(&hvfb_drv);
1362 		return ret;
1363 	}
1364 
1365 	return 0;
1366 }
1367 
1368 static void __exit hvfb_drv_exit(void)
1369 {
1370 	pci_unregister_driver(&hvfb_pci_stub_driver);
1371 	vmbus_driver_unregister(&hvfb_drv);
1372 }
1373 
1374 module_init(hvfb_drv_init);
1375 module_exit(hvfb_drv_exit);
1376 
1377 MODULE_LICENSE("GPL");
1378 MODULE_DESCRIPTION("Microsoft Hyper-V Synthetic Video Frame Buffer Driver");
1379