xref: /linux/drivers/hv/hyperv_vmbus.h (revision 8fd12b03c7c888303c3c45559d8c3e270a916f9f)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *
4  * Copyright (c) 2011, Microsoft Corporation.
5  *
6  * Authors:
7  *   Haiyang Zhang <haiyangz@microsoft.com>
8  *   Hank Janssen  <hjanssen@microsoft.com>
9  *   K. Y. Srinivasan <kys@microsoft.com>
10  */
11 
12 #ifndef _HYPERV_VMBUS_H
13 #define _HYPERV_VMBUS_H
14 
15 #include <linux/list.h>
16 #include <linux/bitops.h>
17 #include <asm/sync_bitops.h>
18 #include <asm/mshyperv.h>
19 #include <linux/atomic.h>
20 #include <linux/hyperv.h>
21 #include <linux/interrupt.h>
22 #include <hyperv/hvhdk.h>
23 
24 #include "hv_trace.h"
25 
26 /*
27  * Timeout for services such as KVP and fcopy.
28  */
29 #define HV_UTIL_TIMEOUT 30
30 
31 /*
32  * Timeout for guest-host handshake for services.
33  */
34 #define HV_UTIL_NEGO_TIMEOUT 55
35 
36 void vmbus_isr(void);
37 
38 /* Definitions for the monitored notification facility */
39 union hv_monitor_trigger_group {
40 	u64 as_uint64;
41 	struct {
42 		u32 pending;
43 		u32 armed;
44 	};
45 };
46 
47 struct hv_monitor_parameter {
48 	union hv_connection_id connectionid;
49 	u16 flagnumber;
50 	u16 rsvdz;
51 };
52 
53 union hv_monitor_trigger_state {
54 	u32 asu32;
55 
56 	struct {
57 		u32 group_enable:4;
58 		u32 rsvdz:28;
59 	};
60 };
61 
62 /* struct hv_monitor_page Layout */
63 /* ------------------------------------------------------ */
64 /* | 0   | TriggerState (4 bytes) | Rsvd1 (4 bytes)     | */
65 /* | 8   | TriggerGroup[0]                              | */
66 /* | 10  | TriggerGroup[1]                              | */
67 /* | 18  | TriggerGroup[2]                              | */
68 /* | 20  | TriggerGroup[3]                              | */
69 /* | 28  | Rsvd2[0]                                     | */
70 /* | 30  | Rsvd2[1]                                     | */
71 /* | 38  | Rsvd2[2]                                     | */
72 /* | 40  | NextCheckTime[0][0]    | NextCheckTime[0][1] | */
73 /* | ...                                                | */
74 /* | 240 | Latency[0][0..3]                             | */
75 /* | 340 | Rsvz3[0]                                     | */
76 /* | 440 | Parameter[0][0]                              | */
77 /* | 448 | Parameter[0][1]                              | */
78 /* | ...                                                | */
79 /* | 840 | Rsvd4[0]                                     | */
80 /* ------------------------------------------------------ */
81 struct hv_monitor_page {
82 	union hv_monitor_trigger_state trigger_state;
83 	u32 rsvdz1;
84 
85 	union hv_monitor_trigger_group trigger_group[4];
86 	u64 rsvdz2[3];
87 
88 	s32 next_checktime[4][32];
89 
90 	u16 latency[4][32];
91 	u64 rsvdz3[32];
92 
93 	struct hv_monitor_parameter parameter[4][32];
94 
95 	u8 rsvdz4[1984];
96 };
97 
98 #define HV_HYPERCALL_PARAM_ALIGN	sizeof(u64)
99 
100 /* Definition of the hv_post_message hypercall input structure. */
101 struct hv_input_post_message {
102 	union hv_connection_id connectionid;
103 	u32 reserved;
104 	u32 message_type;
105 	u32 payload_size;
106 	u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
107 };
108 
109 
110 enum {
111 	VMBUS_MESSAGE_CONNECTION_ID	= 1,
112 	VMBUS_MESSAGE_CONNECTION_ID_4	= 4,
113 	VMBUS_MESSAGE_PORT_ID		= 1,
114 	VMBUS_EVENT_CONNECTION_ID	= 2,
115 	VMBUS_EVENT_PORT_ID		= 2,
116 	VMBUS_MONITOR_CONNECTION_ID	= 3,
117 	VMBUS_MONITOR_PORT_ID		= 3,
118 	VMBUS_MESSAGE_SINT		= 2,
119 };
120 
121 /*
122  * Per cpu state for channel handling
123  */
124 struct hv_per_cpu_context {
125 	/*
126 	 * SynIC pages for communicating with the host.
127 	 *
128 	 * These pages are accessible to the host partition and the hypervisor.
129 	 * They may be used for exchanging data with the host partition and the
130 	 * hypervisor even when they aren't trusted yet the guest partition
131 	 * must be prepared to handle the malicious behavior.
132 	 */
133 	void *hyp_synic_message_page;
134 	void *hyp_synic_event_page;
135 	/*
136 	 * SynIC pages for communicating with the paravisor.
137 	 *
138 	 * These pages may be accessed from within the guest partition only in
139 	 * CoCo VMs. Neither the host partition nor the hypervisor can access
140 	 * these pages in that case; they are used for exchanging data with the
141 	 * paravisor.
142 	 */
143 	void *para_synic_message_page;
144 	void *para_synic_event_page;
145 
146 	/*
147 	 * The page is only used in hv_post_message() for a TDX VM (with the
148 	 * paravisor) to post a messages to Hyper-V: when such a VM calls
149 	 * HVCALL_POST_MESSAGE, it can't use the hyperv_pcpu_input_arg (which
150 	 * is encrypted in such a VM) as the hypercall input page, because
151 	 * the input page for HVCALL_POST_MESSAGE must be decrypted in such a
152 	 * VM, so post_msg_page (which is decrypted in hv_synic_alloc()) is
153 	 * introduced for this purpose. See hyperv_init() for more comments.
154 	 */
155 	void *post_msg_page;
156 
157 	/*
158 	 * Starting with win8, we can take channel interrupts on any CPU;
159 	 * we will manage the tasklet that handles events messages on a per CPU
160 	 * basis.
161 	 */
162 	struct tasklet_struct msg_dpc;
163 };
164 
165 struct hv_context {
166 	/* We only support running on top of Hyper-V
167 	 * So at this point this really can only contain the Hyper-V ID
168 	 */
169 	u64 guestid;
170 
171 	struct hv_per_cpu_context __percpu *cpu_context;
172 
173 	/*
174 	 * To manage allocations in a NUMA node.
175 	 * Array indexed by numa node ID.
176 	 */
177 	struct cpumask *hv_numa_map;
178 };
179 
180 extern struct hv_context hv_context;
181 
182 /* Hv Interface */
183 
184 extern int hv_init(void);
185 
186 extern int hv_post_message(union hv_connection_id connection_id,
187 			 enum hv_message_type message_type,
188 			 void *payload, size_t payload_size);
189 
190 extern int hv_synic_alloc(void);
191 
192 extern void hv_synic_free(void);
193 
194 extern void hv_hyp_synic_enable_regs(unsigned int cpu);
195 extern int hv_synic_init(unsigned int cpu);
196 
197 extern void hv_hyp_synic_disable_regs(unsigned int cpu);
198 extern int hv_synic_cleanup(unsigned int cpu);
199 
200 /* Interface */
201 
202 void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
203 
204 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
205 		       struct page *pages, u32 pagecnt, u32 max_pkt_size,
206 			   bool confidential);
207 
208 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
209 
210 int hv_ringbuffer_write(struct vmbus_channel *channel,
211 			const struct kvec *kv_list, u32 kv_count,
212 			u64 requestid, u64 *trans_id);
213 
214 int hv_ringbuffer_read(struct vmbus_channel *channel,
215 		       void *buffer, u32 buflen, u32 *buffer_actual_len,
216 		       u64 *requestid, bool raw);
217 
218 /*
219  * The Maximum number of channels (16384) is determined by the size of the
220  * interrupt page, which is HV_HYP_PAGE_SIZE. 1/2 of HV_HYP_PAGE_SIZE is to
221  * send endpoint interrupts, and the other is to receive endpoint interrupts.
222  */
223 #define MAX_NUM_CHANNELS	((HV_HYP_PAGE_SIZE >> 1) << 3)
224 
225 /* The value here must be in multiple of 32 */
226 #define MAX_NUM_CHANNELS_SUPPORTED	256
227 
228 #define MAX_CHANNEL_RELIDS					\
229 	max(MAX_NUM_CHANNELS_SUPPORTED, HV_EVENT_FLAGS_COUNT)
230 
231 enum vmbus_connect_state {
232 	DISCONNECTED,
233 	CONNECTING,
234 	CONNECTED,
235 	DISCONNECTING
236 };
237 
238 #define MAX_SIZE_CHANNEL_MESSAGE	HV_MESSAGE_PAYLOAD_BYTE_COUNT
239 
240 /*
241  * The CPU that Hyper-V will interrupt for VMBUS messages, such as
242  * CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER.
243  */
244 #define VMBUS_CONNECT_CPU	0
245 
246 struct vmbus_connection {
247 	u32 msg_conn_id;
248 
249 	atomic_t offer_in_progress;
250 
251 	enum vmbus_connect_state conn_state;
252 
253 	atomic_t next_gpadl_handle;
254 
255 	struct completion  unload_event;
256 	/*
257 	 * Represents channel interrupts. Each bit position represents a
258 	 * channel.  When a channel sends an interrupt via VMBUS, it finds its
259 	 * bit in the sendInterruptPage, set it and calls Hv to generate a port
260 	 * event. The other end receives the port event and parse the
261 	 * recvInterruptPage to see which bit is set
262 	 */
263 	void *int_page;
264 	void *send_int_page;
265 	void *recv_int_page;
266 
267 	/*
268 	 * 2 pages - 1st page for parent->child notification and 2nd
269 	 * is child->parent notification
270 	 */
271 	struct hv_monitor_page *monitor_pages[2];
272 	struct list_head chn_msg_list;
273 	spinlock_t channelmsg_lock;
274 
275 	/* List of channels */
276 	struct list_head chn_list;
277 	struct mutex channel_mutex;
278 
279 	/* Array of channel pointers, indexed by relid */
280 	struct vmbus_channel **channels;
281 	u32 relid_hiwater;
282 
283 	/*
284 	 * An offer message is handled first on the work_queue, and then
285 	 * is further handled on handle_primary_chan_wq or
286 	 * handle_sub_chan_wq.
287 	 */
288 	struct workqueue_struct *work_queue;
289 	struct workqueue_struct *handle_primary_chan_wq;
290 	struct workqueue_struct *handle_sub_chan_wq;
291 	struct workqueue_struct *rescind_work_queue;
292 
293 	/*
294 	 * On suspension of the vmbus, the accumulated offer messages
295 	 * must be dropped.
296 	 */
297 	bool ignore_any_offer_msg;
298 
299 	/*
300 	 * The number of sub-channels and hv_sock channels that should be
301 	 * cleaned up upon suspend: sub-channels will be re-created upon
302 	 * resume, and hv_sock channels should not survive suspend.
303 	 */
304 	atomic_t nr_chan_close_on_suspend;
305 	/*
306 	 * vmbus_bus_suspend() waits for "nr_chan_close_on_suspend" to
307 	 * drop to zero.
308 	 */
309 	struct completion ready_for_suspend_event;
310 
311 	/*
312 	 * Completed once the host has offered all boot-time channels.
313 	 * Note that some channels may still be under process on a workqueue.
314 	 */
315 	struct completion all_offers_delivered_event;
316 };
317 
318 
319 struct vmbus_msginfo {
320 	/* Bookkeeping stuff */
321 	struct list_head msglist_entry;
322 
323 	/* The message itself */
324 	unsigned char msg[];
325 };
326 
327 
328 extern struct vmbus_connection vmbus_connection;
329 
330 int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version);
331 
vmbus_send_interrupt(u32 relid)332 static inline void vmbus_send_interrupt(u32 relid)
333 {
334 	sync_set_bit(relid, vmbus_connection.send_int_page);
335 }
336 
337 enum vmbus_message_handler_type {
338 	/* The related handler can sleep. */
339 	VMHT_BLOCKING = 0,
340 
341 	/* The related handler must NOT sleep. */
342 	VMHT_NON_BLOCKING = 1,
343 };
344 
345 struct vmbus_channel_message_table_entry {
346 	enum vmbus_channel_message_type message_type;
347 	enum vmbus_message_handler_type handler_type;
348 	void (*message_handler)(struct vmbus_channel_message_header *msg);
349 	u32 min_payload_len;
350 };
351 
352 extern const struct vmbus_channel_message_table_entry
353 	channel_message_table[CHANNELMSG_COUNT];
354 
355 
356 /* General vmbus interface */
357 
358 bool vmbus_is_confidential(void);
359 
360 #if IS_ENABLED(CONFIG_HYPERV_VMBUS)
361 /* Free the message slot and signal end-of-message if required */
vmbus_signal_eom(struct hv_message * msg,u32 old_msg_type)362 static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
363 {
364 	/*
365 	 * On crash we're reading some other CPU's message page and we need
366 	 * to be careful: this other CPU may already had cleared the header
367 	 * and the host may already had delivered some other message there.
368 	 * In case we blindly write msg->header.message_type we're going
369 	 * to lose it. We can still lose a message of the same type but
370 	 * we count on the fact that there can only be one
371 	 * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
372 	 * on crash.
373 	 */
374 	if (!try_cmpxchg(&msg->header.message_type,
375 			 &old_msg_type, HVMSG_NONE))
376 		return;
377 
378 	/*
379 	 * The cmpxchg() above does an implicit memory barrier to
380 	 * ensure the write to MessageType (ie set to
381 	 * HVMSG_NONE) happens before we read the
382 	 * MessagePending and EOMing. Otherwise, the EOMing
383 	 * will not deliver any more messages since there is
384 	 * no empty slot
385 	 */
386 	if (msg->header.message_flags.msg_pending) {
387 		/*
388 		 * This will cause message queue rescan to
389 		 * possibly deliver another msg from the
390 		 * hypervisor
391 		 */
392 		if (vmbus_is_confidential())
393 			hv_para_set_synic_register(HV_MSR_EOM, 0);
394 		else
395 			hv_set_msr(HV_MSR_EOM, 0);
396 	}
397 }
398 
399 extern int vmbus_interrupt;
400 extern int vmbus_irq;
401 #endif /* CONFIG_HYPERV_VMBUS */
402 
403 struct hv_device *vmbus_device_create(const guid_t *type,
404 				      const guid_t *instance,
405 				      struct vmbus_channel *channel);
406 
407 int vmbus_device_register(struct hv_device *child_device_obj);
408 void vmbus_device_unregister(struct hv_device *device_obj);
409 int vmbus_add_channel_kobj(struct hv_device *device_obj,
410 			   struct vmbus_channel *channel);
411 
412 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel);
413 
414 void vmbus_channel_map_relid(struct vmbus_channel *channel);
415 void vmbus_channel_unmap_relid(struct vmbus_channel *channel);
416 
417 struct vmbus_channel *relid2channel(u32 relid);
418 
419 void vmbus_free_channels(void);
420 
421 /* Connection interface */
422 
423 int vmbus_connect(void);
424 void vmbus_disconnect(void);
425 
426 int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep);
427 
428 void vmbus_on_event(unsigned long data);
429 void vmbus_on_msg_dpc(unsigned long data);
430 
431 int hv_kvp_init(struct hv_util_service *srv);
432 int hv_kvp_init_transport(void);
433 void hv_kvp_deinit(void);
434 int hv_kvp_pre_suspend(void);
435 int hv_kvp_pre_resume(void);
436 void hv_kvp_onchannelcallback(void *context);
437 
438 int hv_vss_init(struct hv_util_service *srv);
439 int hv_vss_init_transport(void);
440 void hv_vss_deinit(void);
441 int hv_vss_pre_suspend(void);
442 int hv_vss_pre_resume(void);
443 void hv_vss_onchannelcallback(void *context);
444 void vmbus_initiate_unload(bool crash);
445 
hv_poll_channel(struct vmbus_channel * channel,void (* cb)(void *))446 static inline void hv_poll_channel(struct vmbus_channel *channel,
447 				   void (*cb)(void *))
448 {
449 	if (!channel)
450 		return;
451 	cb(channel);
452 }
453 
454 enum hvutil_device_state {
455 	HVUTIL_DEVICE_INIT = 0,  /* driver is loaded, waiting for userspace */
456 	HVUTIL_READY,            /* userspace is registered */
457 	HVUTIL_HOSTMSG_RECEIVED, /* message from the host was received */
458 	HVUTIL_USERSPACE_REQ,    /* request to userspace was sent */
459 	HVUTIL_USERSPACE_RECV,   /* reply from userspace was received */
460 	HVUTIL_DEVICE_DYING,     /* driver unload is in progress */
461 };
462 
463 enum delay {
464 	INTERRUPT_DELAY = 0,
465 	MESSAGE_DELAY   = 1,
466 };
467 
468 extern const struct vmbus_device vmbus_devs[];
469 
hv_is_perf_channel(struct vmbus_channel * channel)470 static inline bool hv_is_perf_channel(struct vmbus_channel *channel)
471 {
472 	return vmbus_devs[channel->device_id].perf_device;
473 }
474 
hv_dev_ring_size(struct vmbus_channel * channel)475 static inline size_t hv_dev_ring_size(struct vmbus_channel *channel)
476 {
477 	return vmbus_devs[channel->device_id].pref_ring_size;
478 }
479 
hv_is_allocated_cpu(unsigned int cpu)480 static inline bool hv_is_allocated_cpu(unsigned int cpu)
481 {
482 	struct vmbus_channel *channel, *sc;
483 
484 	lockdep_assert_held(&vmbus_connection.channel_mutex);
485 	/*
486 	 * List additions/deletions as well as updates of the target CPUs are
487 	 * protected by channel_mutex.
488 	 */
489 	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
490 		if (!hv_is_perf_channel(channel))
491 			continue;
492 		if (channel->target_cpu == cpu)
493 			return true;
494 		list_for_each_entry(sc, &channel->sc_list, sc_list) {
495 			if (sc->target_cpu == cpu)
496 				return true;
497 		}
498 	}
499 	return false;
500 }
501 
hv_set_allocated_cpu(unsigned int cpu)502 static inline void hv_set_allocated_cpu(unsigned int cpu)
503 {
504 	cpumask_set_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
505 }
506 
hv_clear_allocated_cpu(unsigned int cpu)507 static inline void hv_clear_allocated_cpu(unsigned int cpu)
508 {
509 	if (hv_is_allocated_cpu(cpu))
510 		return;
511 	cpumask_clear_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
512 }
513 
hv_update_allocated_cpus(unsigned int old_cpu,unsigned int new_cpu)514 static inline void hv_update_allocated_cpus(unsigned int old_cpu,
515 					  unsigned int new_cpu)
516 {
517 	hv_set_allocated_cpu(new_cpu);
518 	hv_clear_allocated_cpu(old_cpu);
519 }
520 
521 #ifdef CONFIG_HYPERV_TESTING
522 
523 int hv_debug_add_dev_dir(struct hv_device *dev);
524 void hv_debug_rm_dev_dir(struct hv_device *dev);
525 void hv_debug_rm_all_dir(void);
526 int hv_debug_init(void);
527 void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type);
528 
529 #else /* CONFIG_HYPERV_TESTING */
530 
hv_debug_rm_dev_dir(struct hv_device * dev)531 static inline void hv_debug_rm_dev_dir(struct hv_device *dev) {};
hv_debug_rm_all_dir(void)532 static inline void hv_debug_rm_all_dir(void) {};
hv_debug_delay_test(struct vmbus_channel * channel,enum delay delay_type)533 static inline void hv_debug_delay_test(struct vmbus_channel *channel,
534 				       enum delay delay_type) {};
hv_debug_init(void)535 static inline int hv_debug_init(void)
536 {
537 	return -1;
538 }
539 
hv_debug_add_dev_dir(struct hv_device * dev)540 static inline int hv_debug_add_dev_dir(struct hv_device *dev)
541 {
542 	return -1;
543 }
544 
545 #endif /* CONFIG_HYPERV_TESTING */
546 
547 /* Create and remove sysfs entry for memory mapped ring buffers for a channel */
548 int hv_create_ring_sysfs(struct vmbus_channel *channel,
549 			 int (*hv_mmap_prepare_ring_buffer)(struct vmbus_channel *channel,
550 							    struct vm_area_desc *desc));
551 int hv_remove_ring_sysfs(struct vmbus_channel *channel);
552 
553 #endif /* _HYPERV_VMBUS_H */
554