xref: /linux/include/linux/hyperv.h (revision feb06d2690bb826fd33798a99ce5cff8d07b38f9)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *
4  * Copyright (c) 2011, Microsoft Corporation.
5  *
6  * Authors:
7  *   Haiyang Zhang <haiyangz@microsoft.com>
8  *   Hank Janssen  <hjanssen@microsoft.com>
9  *   K. Y. Srinivasan <kys@microsoft.com>
10  */
11 
12 #ifndef _HYPERV_H
13 #define _HYPERV_H
14 
15 #include <uapi/linux/hyperv.h>
16 
17 #include <linux/mm.h>
18 #include <linux/types.h>
19 #include <linux/scatterlist.h>
20 #include <linux/list.h>
21 #include <linux/timer.h>
22 #include <linux/completion.h>
23 #include <linux/device.h>
24 #include <linux/mod_devicetable.h>
25 #include <linux/interrupt.h>
26 #include <linux/reciprocal_div.h>
27 #include <hyperv/hvhdk.h>
28 
29 #define MAX_PAGE_BUFFER_COUNT				32
30 #define MAX_MULTIPAGE_BUFFER_COUNT			32 /* 128K */
31 
32 #pragma pack(push, 1)
33 
34 /*
35  * Types for GPADL, decides is how GPADL header is created.
36  *
37  * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
38  * same as HV_HYP_PAGE_SIZE.
39  *
40  * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
41  * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
42  * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
43  * HV_HYP_PAGE will be different between different types of GPADL, for example
44  * if PAGE_SIZE is 64K:
45  *
46  * BUFFER:
47  *
48  * gva:    |--       64k      --|--       64k      --| ... |
49  * gpa:    | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
50  * index:  0    1    2     15   16   17   18 .. 31   32 ...
51  *         |    |    ...   |    |    |   ...    |   ...
52  *         v    V          V    V    V          V
53  * gpadl:  | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
54  * index:  0    1    2 ... 15   16   17   18 .. 31   32 ...
55  *
56  * RING:
57  *
58  *         | header  |           data           | header  |     data      |
59  * gva:    |-- 64k --|--       64k      --| ... |-- 64k --|-- 64k --| ... |
60  * gpa:    | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
61  * index:  0    1    16   17   18    31   ...   n   n+1  n+16 ...         2n
62  *         |         /    /          /          |         /               /
63  *         |        /    /          /           |        /               /
64  *         |       /    /   ...    /    ...     |       /      ...      /
65  *         |      /    /          /             |      /               /
66  *         |     /    /          /              |     /               /
67  *         V    V    V          V               V    V               v
68  * gpadl:  | 4k | 4k |   ...    |    ...        | 4k | 4k |  ...     |
69  * index:  0    1    2   ...    16   ...       n-15 n-14 n-13  ...  2n-30
70  */
71 enum hv_gpadl_type {
72 	HV_GPADL_BUFFER,
73 	HV_GPADL_RING
74 };
75 
76 /* Single-page buffer */
77 struct hv_page_buffer {
78 	u32 len;
79 	u32 offset;
80 	u64 pfn;
81 };
82 
83 /* Multiple-page buffer */
84 struct hv_multipage_buffer {
85 	/* Length and Offset determines the # of pfns in the array */
86 	u32 len;
87 	u32 offset;
88 	u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
89 };
90 
91 /*
92  * Multiple-page buffer array; the pfn array is variable size:
93  * The number of entries in the PFN array is determined by
94  * "len" and "offset".
95  */
96 struct hv_mpb_array {
97 	/* Length and Offset determines the # of pfns in the array */
98 	u32 len;
99 	u32 offset;
100 	u64 pfn_array[];
101 };
102 
103 /* 0x18 includes the proprietary packet header */
104 #define MAX_PAGE_BUFFER_PACKET		(0x18 +			\
105 					(sizeof(struct hv_page_buffer) * \
106 					 MAX_PAGE_BUFFER_COUNT))
107 #define MAX_MULTIPAGE_BUFFER_PACKET	(0x18 +			\
108 					 sizeof(struct hv_multipage_buffer))
109 
110 
111 #pragma pack(pop)
112 
113 struct hv_ring_buffer {
114 	/* Offset in bytes from the start of ring data below */
115 	u32 write_index;
116 
117 	/* Offset in bytes from the start of ring data below */
118 	u32 read_index;
119 
120 	u32 interrupt_mask;
121 
122 	/*
123 	 * WS2012/Win8 and later versions of Hyper-V implement interrupt
124 	 * driven flow management. The feature bit feat_pending_send_sz
125 	 * is set by the host on the host->guest ring buffer, and by the
126 	 * guest on the guest->host ring buffer.
127 	 *
128 	 * The meaning of the feature bit is a bit complex in that it has
129 	 * semantics that apply to both ring buffers.  If the guest sets
130 	 * the feature bit in the guest->host ring buffer, the guest is
131 	 * telling the host that:
132 	 * 1) It will set the pending_send_sz field in the guest->host ring
133 	 *    buffer when it is waiting for space to become available, and
134 	 * 2) It will read the pending_send_sz field in the host->guest
135 	 *    ring buffer and interrupt the host when it frees enough space
136 	 *
137 	 * Similarly, if the host sets the feature bit in the host->guest
138 	 * ring buffer, the host is telling the guest that:
139 	 * 1) It will set the pending_send_sz field in the host->guest ring
140 	 *    buffer when it is waiting for space to become available, and
141 	 * 2) It will read the pending_send_sz field in the guest->host
142 	 *    ring buffer and interrupt the guest when it frees enough space
143 	 *
144 	 * If either the guest or host does not set the feature bit that it
145 	 * owns, that guest or host must do polling if it encounters a full
146 	 * ring buffer, and not signal the other end with an interrupt.
147 	 */
148 	u32 pending_send_sz;
149 	u32 reserved1[12];
150 	union {
151 		struct {
152 			u32 feat_pending_send_sz:1;
153 		};
154 		u32 value;
155 	} feature_bits;
156 
157 	/* Pad it to PAGE_SIZE so that data starts on page boundary */
158 	u8	reserved2[PAGE_SIZE - 68];
159 
160 	/*
161 	 * Ring data starts here + RingDataStartOffset
162 	 * !!! DO NOT place any fields below this !!!
163 	 */
164 	u8 buffer[];
165 } __packed;
166 
167 
168 /*
169  * If the requested ring buffer size is at least 8 times the size of the
170  * header, steal space from the ring buffer for the header. Otherwise, add
171  * space for the header so that is doesn't take too much of the ring buffer
172  * space.
173  *
174  * The factor of 8 is somewhat arbitrary. The goal is to prevent adding a
175  * relatively small header (4 Kbytes on x86) to a large-ish power-of-2 ring
176  * buffer size (such as 128 Kbytes) and so end up making a nearly twice as
177  * large allocation that will be almost half wasted. As a contrasting example,
178  * on ARM64 with 64 Kbyte page size, we don't want to take 64 Kbytes for the
179  * header from a 128 Kbyte allocation, leaving only 64 Kbytes for the ring.
180  * In this latter case, we must add 64 Kbytes for the header and not worry
181  * about what's wasted.
182  */
183 #define VMBUS_HEADER_ADJ(payload_sz) \
184 	((payload_sz) >=  8 * sizeof(struct hv_ring_buffer) ? \
185 	0 : sizeof(struct hv_ring_buffer))
186 
187 /* Calculate the proper size of a ringbuffer, it must be page-aligned */
188 #define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(VMBUS_HEADER_ADJ(payload_sz) + \
189 					       (payload_sz))
190 
191 struct hv_ring_buffer_info {
192 	struct hv_ring_buffer *ring_buffer;
193 	u32 ring_size;			/* Include the shared header */
194 	struct reciprocal_value ring_size_div10_reciprocal;
195 	spinlock_t ring_lock;
196 
197 	u32 ring_datasize;		/* < ring_size */
198 	u32 priv_read_index;
199 	/*
200 	 * The ring buffer mutex lock. This lock prevents the ring buffer from
201 	 * being freed while the ring buffer is being accessed.
202 	 */
203 	struct mutex ring_buffer_mutex;
204 
205 	/* Buffer that holds a copy of an incoming host packet */
206 	void *pkt_buffer;
207 	u32 pkt_buffer_size;
208 };
209 
210 
hv_get_bytes_to_read(const struct hv_ring_buffer_info * rbi)211 static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
212 {
213 	u32 read_loc, write_loc, dsize, read;
214 
215 	dsize = rbi->ring_datasize;
216 	read_loc = rbi->ring_buffer->read_index;
217 	write_loc = READ_ONCE(rbi->ring_buffer->write_index);
218 
219 	read = write_loc >= read_loc ? (write_loc - read_loc) :
220 		(dsize - read_loc) + write_loc;
221 
222 	return read;
223 }
224 
hv_get_bytes_to_write(const struct hv_ring_buffer_info * rbi)225 static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
226 {
227 	u32 read_loc, write_loc, dsize, write;
228 
229 	dsize = rbi->ring_datasize;
230 	read_loc = READ_ONCE(rbi->ring_buffer->read_index);
231 	write_loc = rbi->ring_buffer->write_index;
232 
233 	write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
234 		read_loc - write_loc;
235 	return write;
236 }
237 
hv_get_avail_to_write_percent(const struct hv_ring_buffer_info * rbi)238 static inline u32 hv_get_avail_to_write_percent(
239 		const struct hv_ring_buffer_info *rbi)
240 {
241 	u32 avail_write = hv_get_bytes_to_write(rbi);
242 
243 	return reciprocal_divide(
244 			(avail_write  << 3) + (avail_write << 1),
245 			rbi->ring_size_div10_reciprocal);
246 }
247 
248 /*
249  * VMBUS version is 32 bit entity broken up into
250  * two 16 bit quantities: major_number. minor_number.
251  *
252  * 0 . 13 (Windows Server 2008)
253  * 1 . 1  (Windows 7, WS2008 R2)
254  * 2 . 4  (Windows 8, WS2012)
255  * 3 . 0  (Windows 8.1, WS2012 R2)
256  * 4 . 0  (Windows 10)
257  * 4 . 1  (Windows 10 RS3)
258  * 5 . 0  (Newer Windows 10)
259  * 5 . 1  (Windows 10 RS4)
260  * 5 . 2  (Windows Server 2019, RS5)
261  * 5 . 3  (Windows Server 2022)
262  *
263  * The WS2008 and WIN7 versions are listed here for
264  * completeness but are no longer supported in the
265  * Linux kernel.
266  */
267 
268 #define VMBUS_MAKE_VERSION(MAJ, MIN)	((((u32)MAJ) << 16) | (MIN))
269 #define VERSION_WS2008					VMBUS_MAKE_VERSION(0, 13)
270 #define VERSION_WIN7					VMBUS_MAKE_VERSION(1, 1)
271 #define VERSION_WIN8					VMBUS_MAKE_VERSION(2, 4)
272 #define VERSION_WIN8_1					VMBUS_MAKE_VERSION(3, 0)
273 #define VERSION_WIN10					VMBUS_MAKE_VERSION(4, 0)
274 #define VERSION_WIN10_V4_1				VMBUS_MAKE_VERSION(4, 1)
275 #define VERSION_WIN10_V5				VMBUS_MAKE_VERSION(5, 0)
276 #define VERSION_WIN10_V5_1				VMBUS_MAKE_VERSION(5, 1)
277 #define VERSION_WIN10_V5_2				VMBUS_MAKE_VERSION(5, 2)
278 #define VERSION_WIN10_V5_3				VMBUS_MAKE_VERSION(5, 3)
279 #define VERSION_WIN10_V6_0				VMBUS_MAKE_VERSION(6, 0)
280 
281 /* Make maximum size of pipe payload of 16K */
282 #define MAX_PIPE_DATA_PAYLOAD		(sizeof(u8) * 16384)
283 
284 /* Define PipeMode values. */
285 #define VMBUS_PIPE_TYPE_BYTE		0x00000000
286 #define VMBUS_PIPE_TYPE_MESSAGE		0x00000004
287 
288 /* The size of the user defined data buffer for non-pipe offers. */
289 #define MAX_USER_DEFINED_BYTES		120
290 
291 /* The size of the user defined data buffer for pipe offers. */
292 #define MAX_PIPE_USER_DEFINED_BYTES	116
293 
294 /*
295  * At the center of the Channel Management library is the Channel Offer. This
296  * struct contains the fundamental information about an offer.
297  */
298 struct vmbus_channel_offer {
299 	guid_t if_type;
300 	guid_t if_instance;
301 
302 	/*
303 	 * These two fields are not currently used.
304 	 */
305 	u64 reserved1;
306 	u64 reserved2;
307 
308 	u16 chn_flags;
309 	u16 mmio_megabytes;		/* in bytes * 1024 * 1024 */
310 
311 	union {
312 		/* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
313 		struct {
314 			unsigned char user_def[MAX_USER_DEFINED_BYTES];
315 		} std;
316 
317 		/*
318 		 * Pipes:
319 		 * The following structure is an integrated pipe protocol, which
320 		 * is implemented on top of standard user-defined data. Pipe
321 		 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
322 		 * use.
323 		 */
324 		struct {
325 			u32  pipe_mode;
326 			unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
327 		} pipe;
328 	} u;
329 	/*
330 	 * The sub_channel_index is defined in Win8: a value of zero means a
331 	 * primary channel and a value of non-zero means a sub-channel.
332 	 *
333 	 * Before Win8, the field is reserved, meaning it's always zero.
334 	 */
335 	u16 sub_channel_index;
336 	u16 reserved3;
337 } __packed;
338 
339 /* Server Flags */
340 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE		0x0001
341 /*
342  * This flag indicates that the channel is offered by the paravisor, and must
343  * use encrypted memory for the channel ring buffer.
344  */
345 #define VMBUS_CHANNEL_CONFIDENTIAL_RING_BUFFER			0x0002
346 /*
347  * This flag indicates that the channel is offered by the paravisor, and must
348  * use encrypted memory for GPA direct packets and additional GPADLs.
349  */
350 #define VMBUS_CHANNEL_CONFIDENTIAL_EXTERNAL_MEMORY		0x0004
351 #define VMBUS_CHANNEL_NAMED_PIPE_MODE					0x0010
352 #define VMBUS_CHANNEL_LOOPBACK_OFFER					0x0100
353 #define VMBUS_CHANNEL_PARENT_OFFER						0x0200
354 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION	0x0400
355 #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER				0x2000
356 
357 struct vmpacket_descriptor {
358 	u16 type;
359 	u16 offset8;
360 	u16 len8;
361 	u16 flags;
362 	u64 trans_id;
363 } __packed;
364 
365 struct vmpacket_header {
366 	u32 prev_pkt_start_offset;
367 	struct vmpacket_descriptor descriptor;
368 } __packed;
369 
370 struct vmtransfer_page_range {
371 	u32 byte_count;
372 	u32 byte_offset;
373 } __packed;
374 
375 struct vmtransfer_page_packet_header {
376 	struct vmpacket_descriptor d;
377 	u16 xfer_pageset_id;
378 	u8  sender_owns_set;
379 	u8 reserved;
380 	u32 range_cnt;
381 	struct vmtransfer_page_range ranges[];
382 } __packed;
383 
384 /*
385  * This structure defines a range in guest physical space that can be made to
386  * look virtually contiguous.
387  */
388 struct gpa_range {
389 	u32 byte_count;
390 	u32 byte_offset;
391 	u64 pfn_array[];
392 };
393 
394 /*
395  * This is the format for a GPA-Direct packet, which contains a set of GPA
396  * ranges, in addition to commands and/or data.
397  */
398 struct vmdata_gpa_direct {
399 	struct vmpacket_descriptor d;
400 	u32 reserved;
401 	u32 range_cnt;
402 	struct gpa_range range[1];
403 } __packed;
404 
405 #define VMPACKET_DATA_START_ADDRESS(__packet)	\
406 	(void *)(((unsigned char *)__packet) +	\
407 	 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
408 
409 #define VMPACKET_DATA_LENGTH(__packet)		\
410 	((((struct vmpacket_descriptor)__packet)->len8 -	\
411 	  ((struct vmpacket_descriptor)__packet)->offset8) * 8)
412 
413 #define VMPACKET_TRANSFER_MODE(__packet)	\
414 	(((struct IMPACT)__packet)->type)
415 
416 enum vmbus_packet_type {
417 	VM_PKT_INVALID				= 0x0,
418 	VM_PKT_SYNCH				= 0x1,
419 	VM_PKT_ADD_XFER_PAGESET			= 0x2,
420 	VM_PKT_RM_XFER_PAGESET			= 0x3,
421 	VM_PKT_ESTABLISH_GPADL			= 0x4,
422 	VM_PKT_TEARDOWN_GPADL			= 0x5,
423 	VM_PKT_DATA_INBAND			= 0x6,
424 	VM_PKT_DATA_USING_XFER_PAGES		= 0x7,
425 	VM_PKT_DATA_USING_GPADL			= 0x8,
426 	VM_PKT_DATA_USING_GPA_DIRECT		= 0x9,
427 	VM_PKT_CANCEL_REQUEST			= 0xa,
428 	VM_PKT_COMP				= 0xb,
429 	VM_PKT_DATA_USING_ADDITIONAL_PKT	= 0xc,
430 	VM_PKT_ADDITIONAL_DATA			= 0xd
431 };
432 
433 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED	1
434 
435 
436 /* Version 1 messages */
437 enum vmbus_channel_message_type {
438 	CHANNELMSG_INVALID			=  0,
439 	CHANNELMSG_OFFERCHANNEL		=  1,
440 	CHANNELMSG_RESCIND_CHANNELOFFER	=  2,
441 	CHANNELMSG_REQUESTOFFERS		=  3,
442 	CHANNELMSG_ALLOFFERS_DELIVERED	=  4,
443 	CHANNELMSG_OPENCHANNEL		=  5,
444 	CHANNELMSG_OPENCHANNEL_RESULT		=  6,
445 	CHANNELMSG_CLOSECHANNEL		=  7,
446 	CHANNELMSG_GPADL_HEADER		=  8,
447 	CHANNELMSG_GPADL_BODY			=  9,
448 	CHANNELMSG_GPADL_CREATED		= 10,
449 	CHANNELMSG_GPADL_TEARDOWN		= 11,
450 	CHANNELMSG_GPADL_TORNDOWN		= 12,
451 	CHANNELMSG_RELID_RELEASED		= 13,
452 	CHANNELMSG_INITIATE_CONTACT		= 14,
453 	CHANNELMSG_VERSION_RESPONSE		= 15,
454 	CHANNELMSG_UNLOAD			= 16,
455 	CHANNELMSG_UNLOAD_RESPONSE		= 17,
456 	CHANNELMSG_18				= 18,
457 	CHANNELMSG_19				= 19,
458 	CHANNELMSG_20				= 20,
459 	CHANNELMSG_TL_CONNECT_REQUEST		= 21,
460 	CHANNELMSG_MODIFYCHANNEL		= 22,
461 	CHANNELMSG_TL_CONNECT_RESULT		= 23,
462 	CHANNELMSG_MODIFYCHANNEL_RESPONSE	= 24,
463 	CHANNELMSG_COUNT
464 };
465 
466 /* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
467 #define INVALID_RELID	U32_MAX
468 
469 struct vmbus_channel_message_header {
470 	enum vmbus_channel_message_type msgtype;
471 	u32 padding;
472 } __packed;
473 
474 /* Query VMBus Version parameters */
475 struct vmbus_channel_query_vmbus_version {
476 	struct vmbus_channel_message_header header;
477 	u32 version;
478 } __packed;
479 
480 /* VMBus Version Supported parameters */
481 struct vmbus_channel_version_supported {
482 	struct vmbus_channel_message_header header;
483 	u8 version_supported;
484 } __packed;
485 
486 /* Offer Channel parameters */
487 struct vmbus_channel_offer_channel {
488 	struct vmbus_channel_message_header header;
489 	struct vmbus_channel_offer offer;
490 	u32 child_relid;
491 	u8 monitorid;
492 	/*
493 	 * win7 and beyond splits this field into a bit field.
494 	 */
495 	u8 monitor_allocated:1;
496 	u8 reserved:7;
497 	/*
498 	 * These are new fields added in win7 and later.
499 	 * Do not access these fields without checking the
500 	 * negotiated protocol.
501 	 *
502 	 * If "is_dedicated_interrupt" is set, we must not set the
503 	 * associated bit in the channel bitmap while sending the
504 	 * interrupt to the host.
505 	 *
506 	 * connection_id is to be used in signaling the host.
507 	 */
508 	u16 is_dedicated_interrupt:1;
509 	u16 reserved1:15;
510 	u32 connection_id;
511 } __packed;
512 
513 /* Rescind Offer parameters */
514 struct vmbus_channel_rescind_offer {
515 	struct vmbus_channel_message_header header;
516 	u32 child_relid;
517 } __packed;
518 
519 /*
520  * Request Offer -- no parameters, SynIC message contains the partition ID
521  * Set Snoop -- no parameters, SynIC message contains the partition ID
522  * Clear Snoop -- no parameters, SynIC message contains the partition ID
523  * All Offers Delivered -- no parameters, SynIC message contains the partition
524  *		           ID
525  * Flush Client -- no parameters, SynIC message contains the partition ID
526  */
527 
528 /* Open Channel parameters */
529 struct vmbus_channel_open_channel {
530 	struct vmbus_channel_message_header header;
531 
532 	/* Identifies the specific VMBus channel that is being opened. */
533 	u32 child_relid;
534 
535 	/* ID making a particular open request at a channel offer unique. */
536 	u32 openid;
537 
538 	/* GPADL for the channel's ring buffer. */
539 	u32 ringbuffer_gpadlhandle;
540 
541 	/*
542 	 * Starting with win8, this field will be used to specify
543 	 * the target virtual processor on which to deliver the interrupt for
544 	 * the host to guest communication.
545 	 * Prior to win8, incoming channel interrupts would only
546 	 * be delivered on cpu 0. Setting this value to 0 would
547 	 * preserve the earlier behavior.
548 	 */
549 	u32 target_vp;
550 
551 	/*
552 	 * The upstream ring buffer begins at offset zero in the memory
553 	 * described by RingBufferGpadlHandle. The downstream ring buffer
554 	 * follows it at this offset (in pages).
555 	 */
556 	u32 downstream_ringbuffer_pageoffset;
557 
558 	/* User-specific data to be passed along to the server endpoint. */
559 	unsigned char userdata[MAX_USER_DEFINED_BYTES];
560 } __packed;
561 
562 /* Open Channel Result parameters */
563 struct vmbus_channel_open_result {
564 	struct vmbus_channel_message_header header;
565 	u32 child_relid;
566 	u32 openid;
567 	u32 status;
568 } __packed;
569 
570 /* Modify Channel Result parameters */
571 struct vmbus_channel_modifychannel_response {
572 	struct vmbus_channel_message_header header;
573 	u32 child_relid;
574 	u32 status;
575 } __packed;
576 
577 /* Close channel parameters; */
578 struct vmbus_channel_close_channel {
579 	struct vmbus_channel_message_header header;
580 	u32 child_relid;
581 } __packed;
582 
583 /* Channel Message GPADL */
584 #define GPADL_TYPE_RING_BUFFER		1
585 #define GPADL_TYPE_SERVER_SAVE_AREA	2
586 #define GPADL_TYPE_TRANSACTION		8
587 
588 /*
589  * The number of PFNs in a GPADL message is defined by the number of
590  * pages that would be spanned by ByteCount and ByteOffset.  If the
591  * implied number of PFNs won't fit in this packet, there will be a
592  * follow-up packet that contains more.
593  */
594 struct vmbus_channel_gpadl_header {
595 	struct vmbus_channel_message_header header;
596 	u32 child_relid;
597 	u32 gpadl;
598 	u16 range_buflen;
599 	u16 rangecount;
600 	struct gpa_range range[];
601 } __packed;
602 
603 /* This is the followup packet that contains more PFNs. */
604 struct vmbus_channel_gpadl_body {
605 	struct vmbus_channel_message_header header;
606 	u32 msgnumber;
607 	u32 gpadl;
608 	u64 pfn[];
609 } __packed;
610 
611 struct vmbus_channel_gpadl_created {
612 	struct vmbus_channel_message_header header;
613 	u32 child_relid;
614 	u32 gpadl;
615 	u32 creation_status;
616 } __packed;
617 
618 struct vmbus_channel_gpadl_teardown {
619 	struct vmbus_channel_message_header header;
620 	u32 child_relid;
621 	u32 gpadl;
622 } __packed;
623 
624 struct vmbus_channel_gpadl_torndown {
625 	struct vmbus_channel_message_header header;
626 	u32 gpadl;
627 } __packed;
628 
629 struct vmbus_channel_relid_released {
630 	struct vmbus_channel_message_header header;
631 	u32 child_relid;
632 } __packed;
633 
634 /*
635  * Used by the paravisor only, means that the encrypted ring buffers and
636  * the encrypted external memory are supported
637  */
638 #define VMBUS_FEATURE_FLAG_CONFIDENTIAL_CHANNELS	0x10
639 
640 struct vmbus_channel_initiate_contact {
641 	struct vmbus_channel_message_header header;
642 	u32 vmbus_version_requested;
643 	u32 target_vcpu; /* The VCPU the host should respond to */
644 	union {
645 		u64 interrupt_page;
646 		struct {
647 			u8	msg_sint;
648 			u8	msg_vtl;
649 			u8	reserved[2];
650 			u32 feature_flags; /* VMBus version 6.0 */
651 		};
652 	};
653 	u64 monitor_page1;
654 	u64 monitor_page2;
655 } __packed;
656 
657 /* Hyper-V socket: guest's connect()-ing to host */
658 struct vmbus_channel_tl_connect_request {
659 	struct vmbus_channel_message_header header;
660 	guid_t guest_endpoint_id;
661 	guid_t host_service_id;
662 } __packed;
663 
664 /* Modify Channel parameters, cf. vmbus_send_modifychannel() */
665 struct vmbus_channel_modifychannel {
666 	struct vmbus_channel_message_header header;
667 	u32 child_relid;
668 	u32 target_vp;
669 } __packed;
670 
671 struct vmbus_channel_version_response {
672 	struct vmbus_channel_message_header header;
673 	u8 version_supported;
674 
675 	u8 connection_state;
676 	u16 padding;
677 
678 	/*
679 	 * On new hosts that support VMBus protocol 5.0, we must use
680 	 * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
681 	 * and for subsequent messages, we must use the Message Connection ID
682 	 * field in the host-returned Version Response Message.
683 	 *
684 	 * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
685 	 */
686 	u32 msg_conn_id;
687 } __packed;
688 
689 enum vmbus_channel_state {
690 	CHANNEL_OFFER_STATE,
691 	CHANNEL_OPENING_STATE,
692 	CHANNEL_OPEN_STATE,
693 	CHANNEL_OPENED_STATE,
694 };
695 
696 /*
697  * Represents each channel msg on the vmbus connection This is a
698  * variable-size data structure depending on the msg type itself
699  */
700 struct vmbus_channel_msginfo {
701 	/* Bookkeeping stuff */
702 	struct list_head msglistentry;
703 
704 	/* So far, this is only used to handle gpadl body message */
705 	struct list_head submsglist;
706 
707 	/* Synchronize the request/response if needed */
708 	struct completion  waitevent;
709 	struct vmbus_channel *waiting_channel;
710 	union {
711 		struct vmbus_channel_version_supported version_supported;
712 		struct vmbus_channel_open_result open_result;
713 		struct vmbus_channel_gpadl_torndown gpadl_torndown;
714 		struct vmbus_channel_gpadl_created gpadl_created;
715 		struct vmbus_channel_version_response version_response;
716 		struct vmbus_channel_modifychannel_response modify_response;
717 	} response;
718 
719 	u32 msgsize;
720 	/*
721 	 * The channel message that goes out on the "wire".
722 	 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
723 	 */
724 	unsigned char msg[];
725 };
726 
727 enum vmbus_device_type {
728 	HV_IDE = 0,
729 	HV_SCSI,
730 	HV_FC,
731 	HV_NIC,
732 	HV_ND,
733 	HV_PCIE,
734 	HV_FB,
735 	HV_KBD,
736 	HV_MOUSE,
737 	HV_KVP,
738 	HV_TS,
739 	HV_HB,
740 	HV_SHUTDOWN,
741 	HV_FCOPY,
742 	HV_BACKUP,
743 	HV_DM,
744 	HV_UNKNOWN,
745 };
746 
747 /*
748  * Provides request ids for VMBus. Encapsulates guest memory
749  * addresses and stores the next available slot in req_arr
750  * to generate new ids in constant time.
751  */
752 struct vmbus_requestor {
753 	u64 *req_arr;
754 	unsigned long *req_bitmap; /* is a given slot available? */
755 	u32 size;
756 	u64 next_request_id;
757 	spinlock_t req_lock; /* provides atomicity */
758 };
759 
760 #define VMBUS_NO_RQSTOR U64_MAX
761 #define VMBUS_RQST_ERROR (U64_MAX - 1)
762 #define VMBUS_RQST_ADDR_ANY U64_MAX
763 /* NetVSC-specific */
764 #define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
765 /* StorVSC-specific */
766 #define VMBUS_RQST_INIT (U64_MAX - 2)
767 #define VMBUS_RQST_RESET (U64_MAX - 3)
768 
769 struct vmbus_device {
770 	/* preferred ring buffer size in KB, 0 means no preferred size for this device */
771 	size_t pref_ring_size;
772 	u16  dev_type;
773 	guid_t guid;
774 	bool perf_device;
775 	bool allowed_in_isolated;
776 };
777 
778 #define VMBUS_DEFAULT_MAX_PKT_SIZE 4096
779 
780 struct vmbus_gpadl {
781 	u32 gpadl_handle;
782 	u32 size;
783 	void *buffer;
784 	bool decrypted;
785 };
786 
787 struct vmbus_channel {
788 	struct list_head listentry;
789 
790 	struct hv_device *device_obj;
791 
792 	enum vmbus_channel_state state;
793 
794 	struct vmbus_channel_offer_channel offermsg;
795 	/*
796 	 * These are based on the OfferMsg.MonitorId.
797 	 * Save it here for easy access.
798 	 */
799 	u8 monitor_grp;
800 	u8 monitor_bit;
801 
802 	bool rescind; /* got rescind msg */
803 	bool rescind_ref; /* got rescind msg, got channel reference */
804 	struct completion rescind_event;
805 
806 	struct vmbus_gpadl ringbuffer_gpadlhandle;
807 
808 	/* Allocated memory for ring buffer */
809 	struct page *ringbuffer_page;
810 	u32 ringbuffer_pagecount;
811 	u32 ringbuffer_send_offset;
812 	struct hv_ring_buffer_info outbound;	/* send to parent */
813 	struct hv_ring_buffer_info inbound;	/* receive from parent */
814 
815 	struct vmbus_channel_close_channel close_msg;
816 
817 	/* Statistics */
818 	u64	interrupts;	/* Host to Guest interrupts */
819 	u64	sig_events;	/* Guest to Host events */
820 
821 	/*
822 	 * Guest to host interrupts caused by the outbound ring buffer changing
823 	 * from empty to not empty.
824 	 */
825 	u64 intr_out_empty;
826 
827 	/*
828 	 * Indicates that a full outbound ring buffer was encountered. The flag
829 	 * is set to true when a full outbound ring buffer is encountered and
830 	 * set to false when a write to the outbound ring buffer is completed.
831 	 */
832 	bool out_full_flag;
833 
834 	/* Channel callback's invoked in softirq context */
835 	struct tasklet_struct callback_event;
836 	void (*onchannel_callback)(void *context);
837 	void *channel_callback_context;
838 
839 	void (*change_target_cpu_callback)(struct vmbus_channel *channel,
840 			u32 old, u32 new);
841 
842 	/*
843 	 * Synchronize channel scheduling and channel removal; see the inline
844 	 * comments in vmbus_chan_sched() and vmbus_reset_channel_cb().
845 	 */
846 	spinlock_t sched_lock;
847 
848 	/*
849 	 * A channel can be marked for one of three modes of reading:
850 	 *   BATCHED - callback called from taslket and should read
851 	 *            channel until empty. Interrupts from the host
852 	 *            are masked while read is in process (default).
853 	 *   DIRECT - callback called from tasklet (softirq).
854 	 *   ISR - callback called in interrupt context and must
855 	 *         invoke its own deferred processing.
856 	 *         Host interrupts are disabled and must be re-enabled
857 	 *         when ring is empty.
858 	 */
859 	enum hv_callback_mode {
860 		HV_CALL_BATCHED,
861 		HV_CALL_DIRECT,
862 		HV_CALL_ISR
863 	} callback_mode;
864 
865 	bool is_dedicated_interrupt;
866 	u64 sig_event;
867 
868 	/*
869 	 * Starting with win8, this field will be used to specify the
870 	 * target CPU on which to deliver the interrupt for the host
871 	 * to guest communication.
872 	 *
873 	 * Prior to win8, incoming channel interrupts would only be
874 	 * delivered on CPU 0. Setting this value to 0 would preserve
875 	 * the earlier behavior.
876 	 */
877 	u32 target_cpu;
878 	/*
879 	 * Support for sub-channels. For high performance devices,
880 	 * it will be useful to have multiple sub-channels to support
881 	 * a scalable communication infrastructure with the host.
882 	 * The support for sub-channels is implemented as an extension
883 	 * to the current infrastructure.
884 	 * The initial offer is considered the primary channel and this
885 	 * offer message will indicate if the host supports sub-channels.
886 	 * The guest is free to ask for sub-channels to be offered and can
887 	 * open these sub-channels as a normal "primary" channel. However,
888 	 * all sub-channels will have the same type and instance guids as the
889 	 * primary channel. Requests sent on a given channel will result in a
890 	 * response on the same channel.
891 	 */
892 
893 	/*
894 	 * Sub-channel creation callback. This callback will be called in
895 	 * process context when a sub-channel offer is received from the host.
896 	 * The guest can open the sub-channel in the context of this callback.
897 	 */
898 	void (*sc_creation_callback)(struct vmbus_channel *new_sc);
899 
900 	/*
901 	 * Channel rescind callback. Some channels (the hvsock ones), need to
902 	 * register a callback which is invoked in vmbus_onoffer_rescind().
903 	 */
904 	void (*chn_rescind_callback)(struct vmbus_channel *channel);
905 
906 	/*
907 	 * All Sub-channels of a primary channel are linked here.
908 	 */
909 	struct list_head sc_list;
910 	/*
911 	 * The primary channel this sub-channel belongs to.
912 	 * This will be NULL for the primary channel.
913 	 */
914 	struct vmbus_channel *primary_channel;
915 	/*
916 	 * Support per-channel state for use by vmbus drivers.
917 	 */
918 	void *per_channel_state;
919 
920 	/*
921 	 * Defer freeing channel until after all cpu's have
922 	 * gone through grace period.
923 	 */
924 	struct rcu_head rcu;
925 
926 	/*
927 	 * For sysfs per-channel properties.
928 	 */
929 	struct kobject			kobj;
930 
931 	/*
932 	 * For performance critical channels (storage, networking
933 	 * etc,), Hyper-V has a mechanism to enhance the throughput
934 	 * at the expense of latency:
935 	 * When the host is to be signaled, we just set a bit in a shared page
936 	 * and this bit will be inspected by the hypervisor within a certain
937 	 * window and if the bit is set, the host will be signaled. The window
938 	 * of time is the monitor latency - currently around 100 usecs. This
939 	 * mechanism improves throughput by:
940 	 *
941 	 * A) Making the host more efficient - each time it wakes up,
942 	 *    potentially it will process more number of packets. The
943 	 *    monitor latency allows a batch to build up.
944 	 * B) By deferring the hypercall to signal, we will also minimize
945 	 *    the interrupts.
946 	 *
947 	 * Clearly, these optimizations improve throughput at the expense of
948 	 * latency. Furthermore, since the channel is shared for both
949 	 * control and data messages, control messages currently suffer
950 	 * unnecessary latency adversely impacting performance and boot
951 	 * time. To fix this issue, permit tagging the channel as being
952 	 * in "low latency" mode. In this mode, we will bypass the monitor
953 	 * mechanism.
954 	 */
955 	bool low_latency;
956 
957 	bool probe_done;
958 
959 	/*
960 	 * Cache the device ID here for easy access; this is useful, in
961 	 * particular, in situations where the channel's device_obj has
962 	 * not been allocated/initialized yet.
963 	 */
964 	u16 device_id;
965 
966 	/*
967 	 * We must offload the handling of the primary/sub channels
968 	 * from the single-threaded vmbus_connection.work_queue to
969 	 * two different workqueue, otherwise we can block
970 	 * vmbus_connection.work_queue and hang: see vmbus_process_offer().
971 	 */
972 	struct work_struct add_channel_work;
973 
974 	/*
975 	 * Guest to host interrupts caused by the inbound ring buffer changing
976 	 * from full to not full while a packet is waiting.
977 	 */
978 	u64 intr_in_full;
979 
980 	/*
981 	 * The total number of write operations that encountered a full
982 	 * outbound ring buffer.
983 	 */
984 	u64 out_full_total;
985 
986 	/*
987 	 * The number of write operations that were the first to encounter a
988 	 * full outbound ring buffer.
989 	 */
990 	u64 out_full_first;
991 
992 	/* enabling/disabling fuzz testing on the channel (default is false)*/
993 	bool fuzz_testing_state;
994 
995 	/*
996 	 * Interrupt delay will delay the guest from emptying the ring buffer
997 	 * for a specific amount of time. The delay is in microseconds and will
998 	 * be between 1 to a maximum of 1000, its default is 0 (no delay).
999 	 * The  Message delay will delay guest reading on a per message basis
1000 	 * in microseconds between 1 to 1000 with the default being 0
1001 	 * (no delay).
1002 	 */
1003 	u32 fuzz_testing_interrupt_delay;
1004 	u32 fuzz_testing_message_delay;
1005 
1006 	/* callback to generate a request ID from a request address */
1007 	u64 (*next_request_id_callback)(struct vmbus_channel *channel, u64 rqst_addr);
1008 	/* callback to retrieve a request address from a request ID */
1009 	u64 (*request_addr_callback)(struct vmbus_channel *channel, u64 rqst_id);
1010 
1011 	/* request/transaction ids for VMBus */
1012 	struct vmbus_requestor requestor;
1013 	u32 rqstor_size;
1014 
1015 	/* The max size of a packet on this channel */
1016 	u32 max_pkt_size;
1017 
1018 	/* function to mmap ring buffer memory to the channel's sysfs ring attribute */
1019 	int (*mmap_ring_buffer)(struct vmbus_channel *channel, struct vm_area_struct *vma);
1020 
1021 	/* boolean to control visibility of sysfs for ring buffer */
1022 	bool ring_sysfs_visible;
1023 	/* The ring buffer is encrypted */
1024 	bool co_ring_buffer;
1025 	/* The external memory is encrypted */
1026 	bool co_external_memory;
1027 };
1028 
1029 #define lock_requestor(channel, flags)					\
1030 do {									\
1031 	struct vmbus_requestor *rqstor = &(channel)->requestor;		\
1032 									\
1033 	spin_lock_irqsave(&rqstor->req_lock, flags);			\
1034 } while (0)
1035 
unlock_requestor(struct vmbus_channel * channel,unsigned long flags)1036 static __always_inline void unlock_requestor(struct vmbus_channel *channel,
1037 					     unsigned long flags)
1038 {
1039 	struct vmbus_requestor *rqstor = &channel->requestor;
1040 
1041 	spin_unlock_irqrestore(&rqstor->req_lock, flags);
1042 }
1043 
1044 u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr);
1045 u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
1046 			       u64 rqst_addr);
1047 u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
1048 			     u64 rqst_addr);
1049 u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id);
1050 
is_co_ring_buffer(const struct vmbus_channel_offer_channel * o)1051 static inline bool is_co_ring_buffer(const struct vmbus_channel_offer_channel *o)
1052 {
1053 	return !!(o->offer.chn_flags & VMBUS_CHANNEL_CONFIDENTIAL_RING_BUFFER);
1054 }
1055 
is_co_external_memory(const struct vmbus_channel_offer_channel * o)1056 static inline bool is_co_external_memory(const struct vmbus_channel_offer_channel *o)
1057 {
1058 	return !!(o->offer.chn_flags & VMBUS_CHANNEL_CONFIDENTIAL_EXTERNAL_MEMORY);
1059 }
1060 
is_hvsock_offer(const struct vmbus_channel_offer_channel * o)1061 static inline bool is_hvsock_offer(const struct vmbus_channel_offer_channel *o)
1062 {
1063 	return !!(o->offer.chn_flags & VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
1064 }
1065 
is_hvsock_channel(const struct vmbus_channel * c)1066 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
1067 {
1068 	return is_hvsock_offer(&c->offermsg);
1069 }
1070 
is_sub_channel(const struct vmbus_channel * c)1071 static inline bool is_sub_channel(const struct vmbus_channel *c)
1072 {
1073 	return c->offermsg.offer.sub_channel_index != 0;
1074 }
1075 
set_channel_read_mode(struct vmbus_channel * c,enum hv_callback_mode mode)1076 static inline void set_channel_read_mode(struct vmbus_channel *c,
1077 					enum hv_callback_mode mode)
1078 {
1079 	c->callback_mode = mode;
1080 }
1081 
set_per_channel_state(struct vmbus_channel * c,void * s)1082 static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
1083 {
1084 	c->per_channel_state = s;
1085 }
1086 
get_per_channel_state(struct vmbus_channel * c)1087 static inline void *get_per_channel_state(struct vmbus_channel *c)
1088 {
1089 	return c->per_channel_state;
1090 }
1091 
set_channel_pending_send_size(struct vmbus_channel * c,u32 size)1092 static inline void set_channel_pending_send_size(struct vmbus_channel *c,
1093 						 u32 size)
1094 {
1095 	unsigned long flags;
1096 
1097 	if (size) {
1098 		spin_lock_irqsave(&c->outbound.ring_lock, flags);
1099 		++c->out_full_total;
1100 
1101 		if (!c->out_full_flag) {
1102 			++c->out_full_first;
1103 			c->out_full_flag = true;
1104 		}
1105 		spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
1106 	} else {
1107 		c->out_full_flag = false;
1108 	}
1109 
1110 	c->outbound.ring_buffer->pending_send_sz = size;
1111 }
1112 
1113 void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
1114 
1115 int vmbus_request_offers(void);
1116 
1117 /*
1118  * APIs for managing sub-channels.
1119  */
1120 
1121 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1122 			void (*sc_cr_cb)(struct vmbus_channel *new_sc));
1123 
1124 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1125 		void (*chn_rescind_cb)(struct vmbus_channel *));
1126 
1127 /* The format must be the same as struct vmdata_gpa_direct */
1128 struct vmbus_channel_packet_page_buffer {
1129 	u16 type;
1130 	u16 dataoffset8;
1131 	u16 length8;
1132 	u16 flags;
1133 	u64 transactionid;
1134 	u32 reserved;
1135 	u32 rangecount;
1136 	struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1137 } __packed;
1138 
1139 /* The format must be the same as struct vmdata_gpa_direct */
1140 struct vmbus_channel_packet_multipage_buffer {
1141 	u16 type;
1142 	u16 dataoffset8;
1143 	u16 length8;
1144 	u16 flags;
1145 	u64 transactionid;
1146 	u32 reserved;
1147 	u32 rangecount;		/* Always 1 in this case */
1148 	struct hv_multipage_buffer range;
1149 } __packed;
1150 
1151 /* The format must be the same as struct vmdata_gpa_direct */
1152 struct vmbus_packet_mpb_array {
1153 	u16 type;
1154 	u16 dataoffset8;
1155 	u16 length8;
1156 	u16 flags;
1157 	u64 transactionid;
1158 	u32 reserved;
1159 	u32 rangecount;         /* Always 1 in this case */
1160 	struct hv_mpb_array range;
1161 } __packed;
1162 
1163 int vmbus_alloc_ring(struct vmbus_channel *channel,
1164 		     u32 send_size, u32 recv_size);
1165 void vmbus_free_ring(struct vmbus_channel *channel);
1166 
1167 int vmbus_connect_ring(struct vmbus_channel *channel,
1168 		       void (*onchannel_callback)(void *context),
1169 		       void *context);
1170 int vmbus_disconnect_ring(struct vmbus_channel *channel);
1171 
1172 extern int vmbus_open(struct vmbus_channel *channel,
1173 			    u32 send_ringbuffersize,
1174 			    u32 recv_ringbuffersize,
1175 			    void *userdata,
1176 			    u32 userdatalen,
1177 			    void (*onchannel_callback)(void *context),
1178 			    void *context);
1179 
1180 extern void vmbus_close(struct vmbus_channel *channel);
1181 
1182 extern int vmbus_sendpacket_getid(struct vmbus_channel *channel,
1183 				  void *buffer,
1184 				  u32 bufferLen,
1185 				  u64 requestid,
1186 				  u64 *trans_id,
1187 				  enum vmbus_packet_type type,
1188 				  u32 flags);
1189 extern int vmbus_sendpacket(struct vmbus_channel *channel,
1190 				  void *buffer,
1191 				  u32 bufferLen,
1192 				  u64 requestid,
1193 				  enum vmbus_packet_type type,
1194 				  u32 flags);
1195 
1196 extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1197 				     struct vmbus_packet_mpb_array *mpb,
1198 				     u32 desc_size,
1199 				     void *buffer,
1200 				     u32 bufferlen,
1201 				     u64 requestid);
1202 
1203 extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1204 				      void *kbuffer,
1205 				      u32 size,
1206 				      struct vmbus_gpadl *gpadl);
1207 
1208 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1209 				     struct vmbus_gpadl *gpadl);
1210 
1211 void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1212 
1213 extern int vmbus_recvpacket(struct vmbus_channel *channel,
1214 				  void *buffer,
1215 				  u32 bufferlen,
1216 				  u32 *buffer_actual_len,
1217 				  u64 *requestid);
1218 
1219 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1220 				     void *buffer,
1221 				     u32 bufferlen,
1222 				     u32 *buffer_actual_len,
1223 				     u64 *requestid);
1224 
1225 /* Base driver object */
1226 struct hv_driver {
1227 	const char *name;
1228 
1229 	/*
1230 	 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1231 	 * channel flag, actually doesn't mean a synthetic device because the
1232 	 * offer's if_type/if_instance can change for every new hvsock
1233 	 * connection.
1234 	 *
1235 	 * However, to facilitate the notification of new-offer/rescind-offer
1236 	 * from vmbus driver to hvsock driver, we can handle hvsock offer as
1237 	 * a special vmbus device, and hence we need the below flag to
1238 	 * indicate if the driver is the hvsock driver or not: we need to
1239 	 * specially treat the hvosck offer & driver in vmbus_match().
1240 	 */
1241 	bool hvsock;
1242 
1243 	/* the device type supported by this driver */
1244 	guid_t dev_type;
1245 	const struct hv_vmbus_device_id *id_table;
1246 
1247 	struct device_driver driver;
1248 
1249 	/* dynamic device GUID's */
1250 	struct  {
1251 		spinlock_t lock;
1252 		struct list_head list;
1253 	} dynids;
1254 
1255 	int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1256 	void (*remove)(struct hv_device *dev);
1257 	void (*shutdown)(struct hv_device *);
1258 
1259 	int (*suspend)(struct hv_device *);
1260 	int (*resume)(struct hv_device *);
1261 
1262 };
1263 
1264 /* Base device object */
1265 struct hv_device {
1266 	/* the device type id of this device */
1267 	guid_t dev_type;
1268 
1269 	/* the device instance id of this device */
1270 	guid_t dev_instance;
1271 	u16 vendor_id;
1272 	u16 device_id;
1273 
1274 	struct device device;
1275 	/*
1276 	 * Driver name to force a match.  Do not set directly, because core
1277 	 * frees it.  Use driver_set_override() to set or clear it.
1278 	 */
1279 	const char *driver_override;
1280 
1281 	struct vmbus_channel *channel;
1282 	struct kset	     *channels_kset;
1283 	struct device_dma_parameters dma_parms;
1284 	u64 dma_mask;
1285 
1286 	/* place holder to keep track of the dir for hv device in debugfs */
1287 	struct dentry *debug_dir;
1288 
1289 };
1290 
1291 
1292 #define device_to_hv_device(d)	container_of_const(d, struct hv_device, device)
1293 #define drv_to_hv_drv(d)	container_of_const(d, struct hv_driver, driver)
1294 
hv_set_drvdata(struct hv_device * dev,void * data)1295 static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1296 {
1297 	dev_set_drvdata(&dev->device, data);
1298 }
1299 
hv_get_drvdata(struct hv_device * dev)1300 static inline void *hv_get_drvdata(struct hv_device *dev)
1301 {
1302 	return dev_get_drvdata(&dev->device);
1303 }
1304 
1305 struct device *hv_get_vmbus_root_device(void);
1306 
1307 struct hv_ring_buffer_debug_info {
1308 	u32 current_interrupt_mask;
1309 	u32 current_read_index;
1310 	u32 current_write_index;
1311 	u32 bytes_avail_toread;
1312 	u32 bytes_avail_towrite;
1313 };
1314 
1315 
1316 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
1317 				struct hv_ring_buffer_debug_info *debug_info);
1318 
1319 bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel);
1320 
1321 /* Vmbus interface */
1322 #define vmbus_driver_register(driver)	\
1323 	__vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1324 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1325 					 struct module *owner,
1326 					 const char *mod_name);
1327 void vmbus_driver_unregister(struct hv_driver *hv_driver);
1328 
1329 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1330 
1331 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1332 			resource_size_t min, resource_size_t max,
1333 			resource_size_t size, resource_size_t align,
1334 			bool fb_overlap_ok);
1335 void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1336 
1337 /*
1338  * GUID definitions of various offer types - services offered to the guest.
1339  */
1340 
1341 /*
1342  * Network GUID
1343  * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1344  */
1345 #define HV_NIC_GUID \
1346 	.guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1347 			  0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1348 
1349 /*
1350  * IDE GUID
1351  * {32412632-86cb-44a2-9b5c-50d1417354f5}
1352  */
1353 #define HV_IDE_GUID \
1354 	.guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1355 			  0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1356 
1357 /*
1358  * SCSI GUID
1359  * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1360  */
1361 #define HV_SCSI_GUID \
1362 	.guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1363 			  0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1364 
1365 /*
1366  * Shutdown GUID
1367  * {0e0b6031-5213-4934-818b-38d90ced39db}
1368  */
1369 #define HV_SHUTDOWN_GUID \
1370 	.guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1371 			  0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1372 
1373 /*
1374  * Time Synch GUID
1375  * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1376  */
1377 #define HV_TS_GUID \
1378 	.guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1379 			  0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1380 
1381 /*
1382  * Heartbeat GUID
1383  * {57164f39-9115-4e78-ab55-382f3bd5422d}
1384  */
1385 #define HV_HEART_BEAT_GUID \
1386 	.guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1387 			  0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1388 
1389 /*
1390  * KVP GUID
1391  * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1392  */
1393 #define HV_KVP_GUID \
1394 	.guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1395 			  0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1396 
1397 /*
1398  * Dynamic memory GUID
1399  * {525074dc-8985-46e2-8057-a307dc18a502}
1400  */
1401 #define HV_DM_GUID \
1402 	.guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1403 			  0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1404 
1405 /*
1406  * Mouse GUID
1407  * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1408  */
1409 #define HV_MOUSE_GUID \
1410 	.guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1411 			  0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1412 
1413 /*
1414  * Keyboard GUID
1415  * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1416  */
1417 #define HV_KBD_GUID \
1418 	.guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1419 			  0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1420 
1421 /*
1422  * VSS (Backup/Restore) GUID
1423  */
1424 #define HV_VSS_GUID \
1425 	.guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1426 			  0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1427 /*
1428  * Synthetic Video GUID
1429  * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1430  */
1431 #define HV_SYNTHVID_GUID \
1432 	.guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1433 			  0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1434 
1435 /*
1436  * Synthetic FC GUID
1437  * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1438  */
1439 #define HV_SYNTHFC_GUID \
1440 	.guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1441 			  0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1442 
1443 /*
1444  * Guest File Copy Service
1445  * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1446  */
1447 
1448 #define HV_FCOPY_GUID \
1449 	.guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1450 			  0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1451 
1452 /*
1453  * NetworkDirect. This is the guest RDMA service.
1454  * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1455  */
1456 #define HV_ND_GUID \
1457 	.guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1458 			  0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1459 
1460 /*
1461  * PCI Express Pass Through
1462  * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1463  */
1464 
1465 #define HV_PCIE_GUID \
1466 	.guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1467 			  0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1468 
1469 /*
1470  * Linux doesn't support these 4 devices: the first two are for
1471  * Automatic Virtual Machine Activation, the third is for
1472  * Remote Desktop Virtualization, and the fourth is Initial
1473  * Machine Configuration (IMC) used only by Windows guests.
1474  * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1475  * {3375baf4-9e15-4b30-b765-67acb10d607b}
1476  * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
1477  * {c376c1c3-d276-48d2-90a9-c04748072c60}
1478  */
1479 
1480 #define HV_AVMA1_GUID \
1481 	.guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1482 			  0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1483 
1484 #define HV_AVMA2_GUID \
1485 	.guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1486 			  0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1487 
1488 #define HV_RDV_GUID \
1489 	.guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1490 			  0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1491 
1492 #define HV_IMC_GUID \
1493 	.guid = GUID_INIT(0xc376c1c3, 0xd276, 0x48d2, 0x90, 0xa9, \
1494 			  0xc0, 0x47, 0x48, 0x07, 0x2c, 0x60)
1495 
1496 /*
1497  * Common header for Hyper-V ICs
1498  */
1499 
1500 #define ICMSGTYPE_NEGOTIATE		0
1501 #define ICMSGTYPE_HEARTBEAT		1
1502 #define ICMSGTYPE_KVPEXCHANGE		2
1503 #define ICMSGTYPE_SHUTDOWN		3
1504 #define ICMSGTYPE_TIMESYNC		4
1505 #define ICMSGTYPE_VSS			5
1506 #define ICMSGTYPE_FCOPY			7
1507 
1508 #define ICMSGHDRFLAG_TRANSACTION	1
1509 #define ICMSGHDRFLAG_REQUEST		2
1510 #define ICMSGHDRFLAG_RESPONSE		4
1511 
1512 
1513 /*
1514  * While we want to handle util services as regular devices,
1515  * there is only one instance of each of these services; so
1516  * we statically allocate the service specific state.
1517  */
1518 
1519 struct hv_util_service {
1520 	u8 *recv_buffer;
1521 	void *channel;
1522 	void (*util_cb)(void *);
1523 	int (*util_init)(struct hv_util_service *);
1524 	int (*util_init_transport)(void);
1525 	void (*util_deinit)(void);
1526 	int (*util_pre_suspend)(void);
1527 	int (*util_pre_resume)(void);
1528 };
1529 
1530 struct vmbuspipe_hdr {
1531 	u32 flags;
1532 	u32 msgsize;
1533 } __packed;
1534 
1535 struct ic_version {
1536 	u16 major;
1537 	u16 minor;
1538 } __packed;
1539 
1540 struct icmsg_hdr {
1541 	struct ic_version icverframe;
1542 	u16 icmsgtype;
1543 	struct ic_version icvermsg;
1544 	u16 icmsgsize;
1545 	u32 status;
1546 	u8 ictransaction_id;
1547 	u8 icflags;
1548 	u8 reserved[2];
1549 } __packed;
1550 
1551 #define IC_VERSION_NEGOTIATION_MAX_VER_COUNT 100
1552 #define ICMSG_HDR (sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr))
1553 #define ICMSG_NEGOTIATE_PKT_SIZE(icframe_vercnt, icmsg_vercnt) \
1554 	(ICMSG_HDR + sizeof(struct icmsg_negotiate) + \
1555 	 (((icframe_vercnt) + (icmsg_vercnt)) * sizeof(struct ic_version)))
1556 
1557 struct icmsg_negotiate {
1558 	u16 icframe_vercnt;
1559 	u16 icmsg_vercnt;
1560 	u32 reserved;
1561 	struct ic_version icversion_data[]; /* any size array */
1562 } __packed;
1563 
1564 struct shutdown_msg_data {
1565 	u32 reason_code;
1566 	u32 timeout_seconds;
1567 	u32 flags;
1568 	u8  display_message[2048];
1569 } __packed;
1570 
1571 struct heartbeat_msg_data {
1572 	u64 seq_num;
1573 	u32 reserved[8];
1574 } __packed;
1575 
1576 /* Time Sync IC defs */
1577 #define ICTIMESYNCFLAG_PROBE	0
1578 #define ICTIMESYNCFLAG_SYNC	1
1579 #define ICTIMESYNCFLAG_SAMPLE	2
1580 
1581 #ifdef __x86_64__
1582 #define WLTIMEDELTA	116444736000000000L	/* in 100ns unit */
1583 #else
1584 #define WLTIMEDELTA	116444736000000000LL
1585 #endif
1586 
1587 struct ictimesync_data {
1588 	u64 parenttime;
1589 	u64 childtime;
1590 	u64 roundtriptime;
1591 	u8 flags;
1592 } __packed;
1593 
1594 struct ictimesync_ref_data {
1595 	u64 parenttime;
1596 	u64 vmreferencetime;
1597 	u8 flags;
1598 	char leapflags;
1599 	char stratum;
1600 	u8 reserved[3];
1601 } __packed;
1602 
1603 struct hyperv_service_callback {
1604 	u8 msg_type;
1605 	char *log_msg;
1606 	guid_t data;
1607 	struct vmbus_channel *channel;
1608 	void (*callback)(void *context);
1609 };
1610 
1611 struct hv_dma_range {
1612 	dma_addr_t dma;
1613 	u32 mapping_size;
1614 };
1615 
1616 #define MAX_SRV_VER	0x7ffffff
1617 extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen,
1618 				const int *fw_version, int fw_vercnt,
1619 				const int *srv_version, int srv_vercnt,
1620 				int *nego_fw_version, int *nego_srv_version);
1621 
1622 void hv_process_channel_removal(struct vmbus_channel *channel);
1623 
1624 void vmbus_setevent(struct vmbus_channel *channel);
1625 /*
1626  * Negotiated version with the Host.
1627  */
1628 
1629 extern __u32 vmbus_proto_version;
1630 
1631 int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1632 				  const guid_t *shv_host_servie_id);
1633 int vmbus_send_modifychannel(struct vmbus_channel *channel, u32 target_vp);
1634 void vmbus_set_event(struct vmbus_channel *channel);
1635 int vmbus_channel_set_cpu(struct vmbus_channel *channel, u32 target_cpu);
1636 
1637 /* Get the start of the ring buffer. */
1638 static inline void *
hv_get_ring_buffer(const struct hv_ring_buffer_info * ring_info)1639 hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1640 {
1641 	return ring_info->ring_buffer->buffer;
1642 }
1643 
1644 /*
1645  * Mask off host interrupt callback notifications
1646  */
hv_begin_read(struct hv_ring_buffer_info * rbi)1647 static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1648 {
1649 	rbi->ring_buffer->interrupt_mask = 1;
1650 
1651 	/* make sure mask update is not reordered */
1652 	virt_mb();
1653 }
1654 
1655 /*
1656  * Re-enable host callback and return number of outstanding bytes
1657  */
hv_end_read(struct hv_ring_buffer_info * rbi)1658 static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1659 {
1660 
1661 	rbi->ring_buffer->interrupt_mask = 0;
1662 
1663 	/* make sure mask update is not reordered */
1664 	virt_mb();
1665 
1666 	/*
1667 	 * Now check to see if the ring buffer is still empty.
1668 	 * If it is not, we raced and we need to process new
1669 	 * incoming messages.
1670 	 */
1671 	return hv_get_bytes_to_read(rbi);
1672 }
1673 
1674 /*
1675  * An API to support in-place processing of incoming VMBUS packets.
1676  */
1677 
1678 /* Get data payload associated with descriptor */
hv_pkt_data(const struct vmpacket_descriptor * desc)1679 static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1680 {
1681 	return (void *)((unsigned long)desc + (desc->offset8 << 3));
1682 }
1683 
1684 /* Get data size associated with descriptor */
hv_pkt_datalen(const struct vmpacket_descriptor * desc)1685 static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1686 {
1687 	return (desc->len8 << 3) - (desc->offset8 << 3);
1688 }
1689 
1690 /* Get packet length associated with descriptor */
hv_pkt_len(const struct vmpacket_descriptor * desc)1691 static inline u32 hv_pkt_len(const struct vmpacket_descriptor *desc)
1692 {
1693 	return desc->len8 << 3;
1694 }
1695 
1696 struct vmpacket_descriptor *
1697 hv_pkt_iter_first(struct vmbus_channel *channel);
1698 
1699 struct vmpacket_descriptor *
1700 __hv_pkt_iter_next(struct vmbus_channel *channel,
1701 		   const struct vmpacket_descriptor *pkt);
1702 
1703 void hv_pkt_iter_close(struct vmbus_channel *channel);
1704 
1705 static inline struct vmpacket_descriptor *
hv_pkt_iter_next(struct vmbus_channel * channel,const struct vmpacket_descriptor * pkt)1706 hv_pkt_iter_next(struct vmbus_channel *channel,
1707 		 const struct vmpacket_descriptor *pkt)
1708 {
1709 	struct vmpacket_descriptor *nxt;
1710 
1711 	nxt = __hv_pkt_iter_next(channel, pkt);
1712 	if (!nxt)
1713 		hv_pkt_iter_close(channel);
1714 
1715 	return nxt;
1716 }
1717 
1718 #define foreach_vmbus_pkt(pkt, channel) \
1719 	for (pkt = hv_pkt_iter_first(channel); pkt; \
1720 	    pkt = hv_pkt_iter_next(channel, pkt))
1721 
1722 /*
1723  * Interface for passing data between SR-IOV PF and VF drivers. The VF driver
1724  * sends requests to read and write blocks. Each block must be 128 bytes or
1725  * smaller. Optionally, the VF driver can register a callback function which
1726  * will be invoked when the host says that one or more of the first 64 block
1727  * IDs is "invalid" which means that the VF driver should reread them.
1728  */
1729 #define HV_CONFIG_BLOCK_SIZE_MAX 128
1730 
1731 int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
1732 			unsigned int block_id, unsigned int *bytes_returned);
1733 int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
1734 			 unsigned int block_id);
1735 int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
1736 				void (*block_invalidate)(void *context,
1737 							 u64 block_mask));
1738 
1739 struct hyperv_pci_block_ops {
1740 	int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
1741 			  unsigned int block_id, unsigned int *bytes_returned);
1742 	int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
1743 			   unsigned int block_id);
1744 	int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
1745 				  void (*block_invalidate)(void *context,
1746 							   u64 block_mask));
1747 };
1748 
1749 extern struct hyperv_pci_block_ops hvpci_block_ops;
1750 
virt_to_hvpfn(void * addr)1751 static inline unsigned long virt_to_hvpfn(void *addr)
1752 {
1753 	phys_addr_t paddr;
1754 
1755 	if (is_vmalloc_addr(addr))
1756 		paddr = page_to_phys(vmalloc_to_page(addr)) +
1757 				     offset_in_page(addr);
1758 	else
1759 		paddr = __pa(addr);
1760 
1761 	return  paddr >> HV_HYP_PAGE_SHIFT;
1762 }
1763 
1764 #define NR_HV_HYP_PAGES_IN_PAGE	(PAGE_SIZE / HV_HYP_PAGE_SIZE)
1765 #define offset_in_hvpage(ptr)	((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
1766 #define HVPFN_UP(x)	(((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
1767 #define HVPFN_DOWN(x)	((x) >> HV_HYP_PAGE_SHIFT)
1768 #define page_to_hvpfn(page)	(page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
1769 
1770 #endif /* _HYPERV_H */
1771