xref: /linux/drivers/hv/hyperv_vmbus.h (revision feb06d2690bb826fd33798a99ce5cff8d07b38f9)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *
4  * Copyright (c) 2011, Microsoft Corporation.
5  *
6  * Authors:
7  *   Haiyang Zhang <haiyangz@microsoft.com>
8  *   Hank Janssen  <hjanssen@microsoft.com>
9  *   K. Y. Srinivasan <kys@microsoft.com>
10  */
11 
12 #ifndef _HYPERV_VMBUS_H
13 #define _HYPERV_VMBUS_H
14 
15 #include <linux/list.h>
16 #include <linux/bitops.h>
17 #include <asm/sync_bitops.h>
18 #include <asm/mshyperv.h>
19 #include <linux/atomic.h>
20 #include <linux/hyperv.h>
21 #include <linux/interrupt.h>
22 #include <hyperv/hvhdk.h>
23 
24 #include "hv_trace.h"
25 
26 /*
27  * Timeout for services such as KVP and fcopy.
28  */
29 #define HV_UTIL_TIMEOUT 30
30 
31 /*
32  * Timeout for guest-host handshake for services.
33  */
34 #define HV_UTIL_NEGO_TIMEOUT 55
35 
36 void vmbus_isr(void);
37 
38 /* Definitions for the monitored notification facility */
39 union hv_monitor_trigger_group {
40 	u64 as_uint64;
41 	struct {
42 		u32 pending;
43 		u32 armed;
44 	};
45 };
46 
47 struct hv_monitor_parameter {
48 	union hv_connection_id connectionid;
49 	u16 flagnumber;
50 	u16 rsvdz;
51 };
52 
53 union hv_monitor_trigger_state {
54 	u32 asu32;
55 
56 	struct {
57 		u32 group_enable:4;
58 		u32 rsvdz:28;
59 	};
60 };
61 
62 /* struct hv_monitor_page Layout */
63 /* ------------------------------------------------------ */
64 /* | 0   | TriggerState (4 bytes) | Rsvd1 (4 bytes)     | */
65 /* | 8   | TriggerGroup[0]                              | */
66 /* | 10  | TriggerGroup[1]                              | */
67 /* | 18  | TriggerGroup[2]                              | */
68 /* | 20  | TriggerGroup[3]                              | */
69 /* | 28  | Rsvd2[0]                                     | */
70 /* | 30  | Rsvd2[1]                                     | */
71 /* | 38  | Rsvd2[2]                                     | */
72 /* | 40  | NextCheckTime[0][0]    | NextCheckTime[0][1] | */
73 /* | ...                                                | */
74 /* | 240 | Latency[0][0..3]                             | */
75 /* | 340 | Rsvz3[0]                                     | */
76 /* | 440 | Parameter[0][0]                              | */
77 /* | 448 | Parameter[0][1]                              | */
78 /* | ...                                                | */
79 /* | 840 | Rsvd4[0]                                     | */
80 /* ------------------------------------------------------ */
81 struct hv_monitor_page {
82 	union hv_monitor_trigger_state trigger_state;
83 	u32 rsvdz1;
84 
85 	union hv_monitor_trigger_group trigger_group[4];
86 	u64 rsvdz2[3];
87 
88 	s32 next_checktime[4][32];
89 
90 	u16 latency[4][32];
91 	u64 rsvdz3[32];
92 
93 	struct hv_monitor_parameter parameter[4][32];
94 
95 	u8 rsvdz4[1984];
96 };
97 
98 #define HV_HYPERCALL_PARAM_ALIGN	sizeof(u64)
99 
100 /* Definition of the hv_post_message hypercall input structure. */
101 struct hv_input_post_message {
102 	union hv_connection_id connectionid;
103 	u32 reserved;
104 	u32 message_type;
105 	u32 payload_size;
106 	u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
107 };
108 
109 
110 enum {
111 	VMBUS_MESSAGE_CONNECTION_ID	= 1,
112 	VMBUS_MESSAGE_CONNECTION_ID_4	= 4,
113 	VMBUS_MESSAGE_PORT_ID		= 1,
114 	VMBUS_EVENT_CONNECTION_ID	= 2,
115 	VMBUS_EVENT_PORT_ID		= 2,
116 	VMBUS_MONITOR_CONNECTION_ID	= 3,
117 	VMBUS_MONITOR_PORT_ID		= 3,
118 	VMBUS_MESSAGE_SINT		= 2,
119 };
120 
121 /*
122  * Per cpu state for channel handling
123  */
124 struct hv_per_cpu_context {
125 	/*
126 	 * SynIC pages for communicating with the host.
127 	 *
128 	 * These pages are accessible to the host partition and the hypervisor.
129 	 * They may be used for exchanging data with the host partition and the
130 	 * hypervisor even when they aren't trusted yet the guest partition
131 	 * must be prepared to handle the malicious behavior.
132 	 */
133 	void *hyp_synic_message_page;
134 	void *hyp_synic_event_page;
135 	/*
136 	 * SynIC pages for communicating with the paravisor.
137 	 *
138 	 * These pages may be accessed from within the guest partition only in
139 	 * CoCo VMs. Neither the host partition nor the hypervisor can access
140 	 * these pages in that case; they are used for exchanging data with the
141 	 * paravisor.
142 	 */
143 	void *para_synic_message_page;
144 	void *para_synic_event_page;
145 
146 	/*
147 	 * The page is only used in hv_post_message() for a TDX VM (with the
148 	 * paravisor) to post a messages to Hyper-V: when such a VM calls
149 	 * HVCALL_POST_MESSAGE, it can't use the hyperv_pcpu_input_arg (which
150 	 * is encrypted in such a VM) as the hypercall input page, because
151 	 * the input page for HVCALL_POST_MESSAGE must be decrypted in such a
152 	 * VM, so post_msg_page (which is decrypted in hv_synic_alloc()) is
153 	 * introduced for this purpose. See hyperv_init() for more comments.
154 	 */
155 	void *post_msg_page;
156 
157 	/*
158 	 * Starting with win8, we can take channel interrupts on any CPU;
159 	 * we will manage the tasklet that handles events messages on a per CPU
160 	 * basis.
161 	 */
162 	struct tasklet_struct msg_dpc;
163 };
164 
165 struct hv_context {
166 	/* We only support running on top of Hyper-V
167 	 * So at this point this really can only contain the Hyper-V ID
168 	 */
169 	u64 guestid;
170 
171 	struct hv_per_cpu_context __percpu *cpu_context;
172 
173 	/*
174 	 * To manage allocations in a NUMA node.
175 	 * Array indexed by numa node ID.
176 	 */
177 	struct cpumask *hv_numa_map;
178 };
179 
180 extern struct hv_context hv_context;
181 
182 /* Hv Interface */
183 
184 extern int hv_init(void);
185 
186 extern int hv_post_message(union hv_connection_id connection_id,
187 			 enum hv_message_type message_type,
188 			 void *payload, size_t payload_size);
189 
190 extern int hv_synic_alloc(void);
191 
192 extern void hv_synic_free(void);
193 
194 extern void hv_hyp_synic_enable_regs(unsigned int cpu);
195 extern int hv_synic_init(unsigned int cpu);
196 
197 extern void hv_hyp_synic_disable_regs(unsigned int cpu);
198 extern int hv_synic_cleanup(unsigned int cpu);
199 
200 /* Interface */
201 
202 void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
203 
204 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
205 		       struct page *pages, u32 pagecnt, u32 max_pkt_size,
206 			   bool confidential);
207 
208 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
209 
210 int hv_ringbuffer_write(struct vmbus_channel *channel,
211 			const struct kvec *kv_list, u32 kv_count,
212 			u64 requestid, u64 *trans_id);
213 
214 int hv_ringbuffer_read(struct vmbus_channel *channel,
215 		       void *buffer, u32 buflen, u32 *buffer_actual_len,
216 		       u64 *requestid, bool raw);
217 
218 /*
219  * The Maximum number of channels (16384) is determined by the size of the
220  * interrupt page, which is HV_HYP_PAGE_SIZE. 1/2 of HV_HYP_PAGE_SIZE is to
221  * send endpoint interrupts, and the other is to receive endpoint interrupts.
222  */
223 #define MAX_NUM_CHANNELS	((HV_HYP_PAGE_SIZE >> 1) << 3)
224 
225 /* The value here must be in multiple of 32 */
226 #define MAX_NUM_CHANNELS_SUPPORTED	256
227 
228 #define MAX_CHANNEL_RELIDS					\
229 	max(MAX_NUM_CHANNELS_SUPPORTED, HV_EVENT_FLAGS_COUNT)
230 
231 enum vmbus_connect_state {
232 	DISCONNECTED,
233 	CONNECTING,
234 	CONNECTED,
235 	DISCONNECTING
236 };
237 
238 #define MAX_SIZE_CHANNEL_MESSAGE	HV_MESSAGE_PAYLOAD_BYTE_COUNT
239 
240 /*
241  * The CPU that Hyper-V will interrupt for VMBUS messages, such as
242  * CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER.
243  */
244 #define VMBUS_CONNECT_CPU	0
245 
246 struct vmbus_connection {
247 	u32 msg_conn_id;
248 
249 	atomic_t offer_in_progress;
250 
251 	enum vmbus_connect_state conn_state;
252 
253 	atomic_t next_gpadl_handle;
254 
255 	struct completion  unload_event;
256 	/*
257 	 * Represents channel interrupts. Each bit position represents a
258 	 * channel.  When a channel sends an interrupt via VMBUS, it finds its
259 	 * bit in the sendInterruptPage, set it and calls Hv to generate a port
260 	 * event. The other end receives the port event and parse the
261 	 * recvInterruptPage to see which bit is set
262 	 */
263 	void *int_page;
264 	void *send_int_page;
265 	void *recv_int_page;
266 
267 	/*
268 	 * 2 pages - 1st page for parent->child notification and 2nd
269 	 * is child->parent notification
270 	 */
271 	struct hv_monitor_page *monitor_pages[2];
272 	struct list_head chn_msg_list;
273 	spinlock_t channelmsg_lock;
274 
275 	/* List of channels */
276 	struct list_head chn_list;
277 	struct mutex channel_mutex;
278 
279 	/* Array of channels */
280 	struct vmbus_channel **channels;
281 
282 	/*
283 	 * An offer message is handled first on the work_queue, and then
284 	 * is further handled on handle_primary_chan_wq or
285 	 * handle_sub_chan_wq.
286 	 */
287 	struct workqueue_struct *work_queue;
288 	struct workqueue_struct *handle_primary_chan_wq;
289 	struct workqueue_struct *handle_sub_chan_wq;
290 	struct workqueue_struct *rescind_work_queue;
291 
292 	/*
293 	 * On suspension of the vmbus, the accumulated offer messages
294 	 * must be dropped.
295 	 */
296 	bool ignore_any_offer_msg;
297 
298 	/*
299 	 * The number of sub-channels and hv_sock channels that should be
300 	 * cleaned up upon suspend: sub-channels will be re-created upon
301 	 * resume, and hv_sock channels should not survive suspend.
302 	 */
303 	atomic_t nr_chan_close_on_suspend;
304 	/*
305 	 * vmbus_bus_suspend() waits for "nr_chan_close_on_suspend" to
306 	 * drop to zero.
307 	 */
308 	struct completion ready_for_suspend_event;
309 
310 	/*
311 	 * Completed once the host has offered all boot-time channels.
312 	 * Note that some channels may still be under process on a workqueue.
313 	 */
314 	struct completion all_offers_delivered_event;
315 };
316 
317 
318 struct vmbus_msginfo {
319 	/* Bookkeeping stuff */
320 	struct list_head msglist_entry;
321 
322 	/* The message itself */
323 	unsigned char msg[];
324 };
325 
326 
327 extern struct vmbus_connection vmbus_connection;
328 
329 int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version);
330 
vmbus_send_interrupt(u32 relid)331 static inline void vmbus_send_interrupt(u32 relid)
332 {
333 	sync_set_bit(relid, vmbus_connection.send_int_page);
334 }
335 
336 enum vmbus_message_handler_type {
337 	/* The related handler can sleep. */
338 	VMHT_BLOCKING = 0,
339 
340 	/* The related handler must NOT sleep. */
341 	VMHT_NON_BLOCKING = 1,
342 };
343 
344 struct vmbus_channel_message_table_entry {
345 	enum vmbus_channel_message_type message_type;
346 	enum vmbus_message_handler_type handler_type;
347 	void (*message_handler)(struct vmbus_channel_message_header *msg);
348 	u32 min_payload_len;
349 };
350 
351 extern const struct vmbus_channel_message_table_entry
352 	channel_message_table[CHANNELMSG_COUNT];
353 
354 
355 /* General vmbus interface */
356 
357 bool vmbus_is_confidential(void);
358 
359 #if IS_ENABLED(CONFIG_HYPERV_VMBUS)
360 /* Free the message slot and signal end-of-message if required */
vmbus_signal_eom(struct hv_message * msg,u32 old_msg_type)361 static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
362 {
363 	/*
364 	 * On crash we're reading some other CPU's message page and we need
365 	 * to be careful: this other CPU may already had cleared the header
366 	 * and the host may already had delivered some other message there.
367 	 * In case we blindly write msg->header.message_type we're going
368 	 * to lose it. We can still lose a message of the same type but
369 	 * we count on the fact that there can only be one
370 	 * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
371 	 * on crash.
372 	 */
373 	if (cmpxchg(&msg->header.message_type, old_msg_type,
374 		    HVMSG_NONE) != old_msg_type)
375 		return;
376 
377 	/*
378 	 * The cmxchg() above does an implicit memory barrier to
379 	 * ensure the write to MessageType (ie set to
380 	 * HVMSG_NONE) happens before we read the
381 	 * MessagePending and EOMing. Otherwise, the EOMing
382 	 * will not deliver any more messages since there is
383 	 * no empty slot
384 	 */
385 	if (msg->header.message_flags.msg_pending) {
386 		/*
387 		 * This will cause message queue rescan to
388 		 * possibly deliver another msg from the
389 		 * hypervisor
390 		 */
391 		if (vmbus_is_confidential())
392 			hv_para_set_synic_register(HV_MSR_EOM, 0);
393 		else
394 			hv_set_msr(HV_MSR_EOM, 0);
395 	}
396 }
397 
398 extern int vmbus_interrupt;
399 extern int vmbus_irq;
400 #endif /* CONFIG_HYPERV_VMBUS */
401 
402 struct hv_device *vmbus_device_create(const guid_t *type,
403 				      const guid_t *instance,
404 				      struct vmbus_channel *channel);
405 
406 int vmbus_device_register(struct hv_device *child_device_obj);
407 void vmbus_device_unregister(struct hv_device *device_obj);
408 int vmbus_add_channel_kobj(struct hv_device *device_obj,
409 			   struct vmbus_channel *channel);
410 
411 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel);
412 
413 void vmbus_channel_map_relid(struct vmbus_channel *channel);
414 void vmbus_channel_unmap_relid(struct vmbus_channel *channel);
415 
416 struct vmbus_channel *relid2channel(u32 relid);
417 
418 void vmbus_free_channels(void);
419 
420 /* Connection interface */
421 
422 int vmbus_connect(void);
423 void vmbus_disconnect(void);
424 
425 int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep);
426 
427 void vmbus_on_event(unsigned long data);
428 void vmbus_on_msg_dpc(unsigned long data);
429 
430 int hv_kvp_init(struct hv_util_service *srv);
431 int hv_kvp_init_transport(void);
432 void hv_kvp_deinit(void);
433 int hv_kvp_pre_suspend(void);
434 int hv_kvp_pre_resume(void);
435 void hv_kvp_onchannelcallback(void *context);
436 
437 int hv_vss_init(struct hv_util_service *srv);
438 int hv_vss_init_transport(void);
439 void hv_vss_deinit(void);
440 int hv_vss_pre_suspend(void);
441 int hv_vss_pre_resume(void);
442 void hv_vss_onchannelcallback(void *context);
443 void vmbus_initiate_unload(bool crash);
444 
hv_poll_channel(struct vmbus_channel * channel,void (* cb)(void *))445 static inline void hv_poll_channel(struct vmbus_channel *channel,
446 				   void (*cb)(void *))
447 {
448 	if (!channel)
449 		return;
450 	cb(channel);
451 }
452 
453 enum hvutil_device_state {
454 	HVUTIL_DEVICE_INIT = 0,  /* driver is loaded, waiting for userspace */
455 	HVUTIL_READY,            /* userspace is registered */
456 	HVUTIL_HOSTMSG_RECEIVED, /* message from the host was received */
457 	HVUTIL_USERSPACE_REQ,    /* request to userspace was sent */
458 	HVUTIL_USERSPACE_RECV,   /* reply from userspace was received */
459 	HVUTIL_DEVICE_DYING,     /* driver unload is in progress */
460 };
461 
462 enum delay {
463 	INTERRUPT_DELAY = 0,
464 	MESSAGE_DELAY   = 1,
465 };
466 
467 extern const struct vmbus_device vmbus_devs[];
468 
hv_is_perf_channel(struct vmbus_channel * channel)469 static inline bool hv_is_perf_channel(struct vmbus_channel *channel)
470 {
471 	return vmbus_devs[channel->device_id].perf_device;
472 }
473 
hv_dev_ring_size(struct vmbus_channel * channel)474 static inline size_t hv_dev_ring_size(struct vmbus_channel *channel)
475 {
476 	return vmbus_devs[channel->device_id].pref_ring_size;
477 }
478 
hv_is_allocated_cpu(unsigned int cpu)479 static inline bool hv_is_allocated_cpu(unsigned int cpu)
480 {
481 	struct vmbus_channel *channel, *sc;
482 
483 	lockdep_assert_held(&vmbus_connection.channel_mutex);
484 	/*
485 	 * List additions/deletions as well as updates of the target CPUs are
486 	 * protected by channel_mutex.
487 	 */
488 	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
489 		if (!hv_is_perf_channel(channel))
490 			continue;
491 		if (channel->target_cpu == cpu)
492 			return true;
493 		list_for_each_entry(sc, &channel->sc_list, sc_list) {
494 			if (sc->target_cpu == cpu)
495 				return true;
496 		}
497 	}
498 	return false;
499 }
500 
hv_set_allocated_cpu(unsigned int cpu)501 static inline void hv_set_allocated_cpu(unsigned int cpu)
502 {
503 	cpumask_set_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
504 }
505 
hv_clear_allocated_cpu(unsigned int cpu)506 static inline void hv_clear_allocated_cpu(unsigned int cpu)
507 {
508 	if (hv_is_allocated_cpu(cpu))
509 		return;
510 	cpumask_clear_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
511 }
512 
hv_update_allocated_cpus(unsigned int old_cpu,unsigned int new_cpu)513 static inline void hv_update_allocated_cpus(unsigned int old_cpu,
514 					  unsigned int new_cpu)
515 {
516 	hv_set_allocated_cpu(new_cpu);
517 	hv_clear_allocated_cpu(old_cpu);
518 }
519 
520 #ifdef CONFIG_HYPERV_TESTING
521 
522 int hv_debug_add_dev_dir(struct hv_device *dev);
523 void hv_debug_rm_dev_dir(struct hv_device *dev);
524 void hv_debug_rm_all_dir(void);
525 int hv_debug_init(void);
526 void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type);
527 
528 #else /* CONFIG_HYPERV_TESTING */
529 
hv_debug_rm_dev_dir(struct hv_device * dev)530 static inline void hv_debug_rm_dev_dir(struct hv_device *dev) {};
hv_debug_rm_all_dir(void)531 static inline void hv_debug_rm_all_dir(void) {};
hv_debug_delay_test(struct vmbus_channel * channel,enum delay delay_type)532 static inline void hv_debug_delay_test(struct vmbus_channel *channel,
533 				       enum delay delay_type) {};
hv_debug_init(void)534 static inline int hv_debug_init(void)
535 {
536 	return -1;
537 }
538 
hv_debug_add_dev_dir(struct hv_device * dev)539 static inline int hv_debug_add_dev_dir(struct hv_device *dev)
540 {
541 	return -1;
542 }
543 
544 #endif /* CONFIG_HYPERV_TESTING */
545 
546 /* Create and remove sysfs entry for memory mapped ring buffers for a channel */
547 int hv_create_ring_sysfs(struct vmbus_channel *channel,
548 			 int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
549 						    struct vm_area_struct *vma));
550 int hv_remove_ring_sysfs(struct vmbus_channel *channel);
551 
552 #endif /* _HYPERV_VMBUS_H */
553