xref: /linux/drivers/hv/mshv_synic.c (revision a5210135489ae7bc1ef1cb4a8157361dd7b468cd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2023, Microsoft Corporation.
4  *
5  * mshv_root module's main interrupt handler and associated functionality.
6  *
7  * Authors: Microsoft Linux virtualization team
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/mm.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/cpuhotplug.h>
16 #include <linux/reboot.h>
17 #include <asm/mshyperv.h>
18 #include <linux/acpi.h>
19 
20 #include "mshv_eventfd.h"
21 #include "mshv.h"
22 
23 static int synic_cpuhp_online;
24 static struct hv_synic_pages __percpu *synic_pages;
25 static int mshv_sint_vector = -1; /* hwirq for the SynIC SINTs */
26 static int mshv_sint_irq = -1; /* Linux IRQ for mshv_sint_vector */
27 
synic_event_ring_get_queued_port(u32 sint_index)28 static u32 synic_event_ring_get_queued_port(u32 sint_index)
29 {
30 	struct hv_synic_event_ring_page **event_ring_page;
31 	volatile struct hv_synic_event_ring *ring;
32 	struct hv_synic_pages *spages;
33 	u8 **synic_eventring_tail;
34 	u32 message;
35 	u8 tail;
36 
37 	spages = this_cpu_ptr(synic_pages);
38 	event_ring_page = &spages->synic_event_ring_page;
39 	synic_eventring_tail = (u8 **)this_cpu_ptr(hv_synic_eventring_tail);
40 
41 	if (unlikely(!*synic_eventring_tail)) {
42 		pr_debug("Missing synic event ring tail!\n");
43 		return 0;
44 	}
45 	tail = (*synic_eventring_tail)[sint_index];
46 
47 	if (unlikely(!*event_ring_page)) {
48 		pr_debug("Missing synic event ring page!\n");
49 		return 0;
50 	}
51 
52 	ring = &(*event_ring_page)->sint_event_ring[sint_index];
53 
54 	/*
55 	 * Get the message.
56 	 */
57 	message = ring->data[tail];
58 
59 	if (!message) {
60 		if (ring->ring_full) {
61 			/*
62 			 * Ring is marked full, but we would have consumed all
63 			 * the messages. Notify the hypervisor that ring is now
64 			 * empty and check again.
65 			 */
66 			ring->ring_full = 0;
67 			hv_call_notify_port_ring_empty(sint_index);
68 			message = ring->data[tail];
69 		}
70 
71 		if (!message) {
72 			ring->signal_masked = 0;
73 			/*
74 			 * Unmask the signal and sync with hypervisor
75 			 * before one last check for any message.
76 			 */
77 			mb();
78 			message = ring->data[tail];
79 
80 			/*
81 			 * Ok, lets bail out.
82 			 */
83 			if (!message)
84 				return 0;
85 		}
86 
87 		ring->signal_masked = 1;
88 	}
89 
90 	/*
91 	 * Clear the message in the ring buffer.
92 	 */
93 	ring->data[tail] = 0;
94 
95 	if (++tail == HV_SYNIC_EVENT_RING_MESSAGE_COUNT)
96 		tail = 0;
97 
98 	(*synic_eventring_tail)[sint_index] = tail;
99 
100 	return message;
101 }
102 
103 static bool
mshv_doorbell_isr(struct hv_message * msg)104 mshv_doorbell_isr(struct hv_message *msg)
105 {
106 	struct hv_notification_message_payload *notification;
107 	u32 port;
108 
109 	if (msg->header.message_type != HVMSG_SYNIC_SINT_INTERCEPT)
110 		return false;
111 
112 	notification = (struct hv_notification_message_payload *)msg->u.payload;
113 	if (notification->sint_index != HV_SYNIC_DOORBELL_SINT_INDEX)
114 		return false;
115 
116 	while ((port = synic_event_ring_get_queued_port(HV_SYNIC_DOORBELL_SINT_INDEX))) {
117 		struct port_table_info ptinfo = { 0 };
118 
119 		if (mshv_portid_lookup(port, &ptinfo)) {
120 			pr_debug("Failed to get port info from port_table!\n");
121 			continue;
122 		}
123 
124 		if (ptinfo.hv_port_type != HV_PORT_TYPE_DOORBELL) {
125 			pr_debug("Not a doorbell port!, port: %d, port_type: %d\n",
126 				 port, ptinfo.hv_port_type);
127 			continue;
128 		}
129 
130 		/* Invoke the callback */
131 		ptinfo.hv_port_doorbell.doorbell_cb(port,
132 						 ptinfo.hv_port_doorbell.data);
133 	}
134 
135 	return true;
136 }
137 
mshv_async_call_completion_isr(struct hv_message * msg)138 static bool mshv_async_call_completion_isr(struct hv_message *msg)
139 {
140 	bool handled = false;
141 	struct hv_async_completion_message_payload *async_msg;
142 	struct mshv_partition *partition;
143 	u64 partition_id;
144 
145 	if (msg->header.message_type != HVMSG_ASYNC_CALL_COMPLETION)
146 		goto out;
147 
148 	async_msg =
149 		(struct hv_async_completion_message_payload *)msg->u.payload;
150 
151 	partition_id = async_msg->partition_id;
152 
153 	/*
154 	 * Hold this lock for the rest of the isr, because the partition could
155 	 * be released anytime.
156 	 * e.g. the MSHV_RUN_VP thread could wake on another cpu; it could
157 	 * release the partition unless we hold this!
158 	 */
159 	rcu_read_lock();
160 
161 	partition = mshv_partition_find(partition_id);
162 
163 	if (unlikely(!partition)) {
164 		pr_debug("failed to find partition %llu\n", partition_id);
165 		goto unlock_out;
166 	}
167 
168 	partition->async_hypercall_status = async_msg->status;
169 	complete(&partition->async_hypercall);
170 
171 	handled = true;
172 
173 unlock_out:
174 	rcu_read_unlock();
175 out:
176 	return handled;
177 }
178 
kick_vp(struct mshv_vp * vp)179 static void kick_vp(struct mshv_vp *vp)
180 {
181 	atomic64_inc(&vp->run.vp_signaled_count);
182 	vp->run.kicked_by_hv = 1;
183 	wake_up(&vp->run.vp_suspend_queue);
184 }
185 
186 static void
handle_bitset_message(const struct hv_vp_signal_bitset_scheduler_message * msg)187 handle_bitset_message(const struct hv_vp_signal_bitset_scheduler_message *msg)
188 {
189 	int bank_idx, vps_signaled = 0, bank_mask_size;
190 	struct mshv_partition *partition;
191 	const struct hv_vpset *vpset;
192 	const u64 *bank_contents;
193 	u64 partition_id = msg->partition_id;
194 
195 	if (msg->vp_bitset.bitset.format != HV_GENERIC_SET_SPARSE_4K) {
196 		pr_debug("scheduler message format is not HV_GENERIC_SET_SPARSE_4K");
197 		return;
198 	}
199 
200 	if (msg->vp_count == 0) {
201 		pr_debug("scheduler message with no VP specified");
202 		return;
203 	}
204 
205 	rcu_read_lock();
206 
207 	partition = mshv_partition_find(partition_id);
208 	if (unlikely(!partition)) {
209 		pr_debug("failed to find partition %llu\n", partition_id);
210 		goto unlock_out;
211 	}
212 
213 	vpset = &msg->vp_bitset.bitset;
214 
215 	bank_idx = -1;
216 	bank_contents = vpset->bank_contents;
217 	bank_mask_size = sizeof(vpset->valid_bank_mask) * BITS_PER_BYTE;
218 
219 	while (true) {
220 		int vp_bank_idx = -1;
221 		int vp_bank_size = sizeof(*bank_contents) * BITS_PER_BYTE;
222 		int vp_index;
223 
224 		bank_idx = find_next_bit((unsigned long *)&vpset->valid_bank_mask,
225 					 bank_mask_size, bank_idx + 1);
226 		if (bank_idx == bank_mask_size)
227 			break;
228 
229 		while (true) {
230 			struct mshv_vp *vp;
231 
232 			vp_bank_idx = find_next_bit((unsigned long *)bank_contents,
233 						    vp_bank_size, vp_bank_idx + 1);
234 			if (vp_bank_idx == vp_bank_size)
235 				break;
236 
237 			vp_index = (bank_idx * vp_bank_size) + vp_bank_idx;
238 
239 			/* This shouldn't happen, but just in case. */
240 			if (unlikely(vp_index >= MSHV_MAX_VPS)) {
241 				pr_debug("VP index %u out of bounds\n",
242 					 vp_index);
243 				goto unlock_out;
244 			}
245 
246 			vp = partition->pt_vp_array[vp_index];
247 			if (unlikely(!vp)) {
248 				pr_debug("failed to find VP %u\n", vp_index);
249 				goto unlock_out;
250 			}
251 
252 			kick_vp(vp);
253 			vps_signaled++;
254 		}
255 
256 		bank_contents++;
257 	}
258 
259 unlock_out:
260 	rcu_read_unlock();
261 
262 	if (vps_signaled != msg->vp_count)
263 		pr_debug("asked to signal %u VPs but only did %u\n",
264 			 msg->vp_count, vps_signaled);
265 }
266 
267 static void
handle_pair_message(const struct hv_vp_signal_pair_scheduler_message * msg)268 handle_pair_message(const struct hv_vp_signal_pair_scheduler_message *msg)
269 {
270 	struct mshv_partition *partition = NULL;
271 	struct mshv_vp *vp;
272 	int idx;
273 
274 	rcu_read_lock();
275 
276 	for (idx = 0; idx < msg->vp_count; idx++) {
277 		u64 partition_id = msg->partition_ids[idx];
278 		u32 vp_index = msg->vp_indexes[idx];
279 
280 		if (idx == 0 || partition->pt_id != partition_id) {
281 			partition = mshv_partition_find(partition_id);
282 			if (unlikely(!partition)) {
283 				pr_debug("failed to find partition %llu\n",
284 					 partition_id);
285 				break;
286 			}
287 		}
288 
289 		/* This shouldn't happen, but just in case. */
290 		if (unlikely(vp_index >= MSHV_MAX_VPS)) {
291 			pr_debug("VP index %u out of bounds\n", vp_index);
292 			break;
293 		}
294 
295 		vp = partition->pt_vp_array[vp_index];
296 		if (!vp) {
297 			pr_debug("failed to find VP %u\n", vp_index);
298 			break;
299 		}
300 
301 		kick_vp(vp);
302 	}
303 
304 	rcu_read_unlock();
305 }
306 
307 static bool
mshv_scheduler_isr(struct hv_message * msg)308 mshv_scheduler_isr(struct hv_message *msg)
309 {
310 	if (msg->header.message_type != HVMSG_SCHEDULER_VP_SIGNAL_BITSET &&
311 	    msg->header.message_type != HVMSG_SCHEDULER_VP_SIGNAL_PAIR)
312 		return false;
313 
314 	if (msg->header.message_type == HVMSG_SCHEDULER_VP_SIGNAL_BITSET)
315 		handle_bitset_message((struct hv_vp_signal_bitset_scheduler_message *)
316 				      msg->u.payload);
317 	else
318 		handle_pair_message((struct hv_vp_signal_pair_scheduler_message *)
319 				    msg->u.payload);
320 
321 	return true;
322 }
323 
324 static bool
mshv_intercept_isr(struct hv_message * msg)325 mshv_intercept_isr(struct hv_message *msg)
326 {
327 	struct mshv_partition *partition;
328 	bool handled = false;
329 	struct mshv_vp *vp;
330 	u64 partition_id;
331 	u32 vp_index;
332 
333 	partition_id = msg->header.sender;
334 
335 	rcu_read_lock();
336 
337 	partition = mshv_partition_find(partition_id);
338 	if (unlikely(!partition)) {
339 		pr_debug("failed to find partition %llu\n",
340 			 partition_id);
341 		goto unlock_out;
342 	}
343 
344 	if (msg->header.message_type == HVMSG_X64_APIC_EOI) {
345 		/*
346 		 * Check if this gsi is registered in the
347 		 * ack_notifier list and invoke the callback
348 		 * if registered.
349 		 */
350 
351 		/*
352 		 * If there is a notifier, the ack callback is supposed
353 		 * to handle the VMEXIT. So we need not pass this message
354 		 * to vcpu thread.
355 		 */
356 		struct hv_x64_apic_eoi_message *eoi_msg =
357 			(struct hv_x64_apic_eoi_message *)&msg->u.payload[0];
358 
359 		if (mshv_notify_acked_gsi(partition, eoi_msg->interrupt_vector)) {
360 			handled = true;
361 			goto unlock_out;
362 		}
363 	}
364 
365 	/*
366 	 * We should get an opaque intercept message here for all intercept
367 	 * messages, since we're using the mapped VP intercept message page.
368 	 *
369 	 * The intercept message will have been placed in intercept message
370 	 * page at this point.
371 	 *
372 	 * Make sure the message type matches our expectation.
373 	 */
374 	if (msg->header.message_type != HVMSG_OPAQUE_INTERCEPT) {
375 		pr_debug("wrong message type %d", msg->header.message_type);
376 		goto unlock_out;
377 	}
378 
379 	/*
380 	 * Since we directly index the vp, and it has to exist for us to be here
381 	 * (because the vp is only deleted when the partition is), no additional
382 	 * locking is needed here
383 	 */
384 	vp_index =
385 	       ((struct hv_opaque_intercept_message *)msg->u.payload)->vp_index;
386 	vp = partition->pt_vp_array[vp_index];
387 	if (unlikely(!vp)) {
388 		pr_debug("failed to find VP %u\n", vp_index);
389 		goto unlock_out;
390 	}
391 
392 	kick_vp(vp);
393 
394 	handled = true;
395 
396 unlock_out:
397 	rcu_read_unlock();
398 
399 	return handled;
400 }
401 
mshv_isr(void)402 void mshv_isr(void)
403 {
404 	struct hv_synic_pages *spages = this_cpu_ptr(synic_pages);
405 	struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
406 	struct hv_message *msg;
407 	bool handled;
408 
409 	if (unlikely(!(*msg_page))) {
410 		pr_debug("Missing synic page!\n");
411 		return;
412 	}
413 
414 	msg = &((*msg_page)->sint_message[HV_SYNIC_INTERCEPTION_SINT_INDEX]);
415 
416 	/*
417 	 * If the type isn't set, there isn't really a message;
418 	 * it may be some other hyperv interrupt
419 	 */
420 	if (msg->header.message_type == HVMSG_NONE)
421 		return;
422 
423 	handled = mshv_doorbell_isr(msg);
424 
425 	if (!handled)
426 		handled = mshv_scheduler_isr(msg);
427 
428 	if (!handled)
429 		handled = mshv_async_call_completion_isr(msg);
430 
431 	if (!handled)
432 		handled = mshv_intercept_isr(msg);
433 
434 	if (handled) {
435 		/*
436 		 * Acknowledge message with hypervisor if another message is
437 		 * pending.
438 		 */
439 		msg->header.message_type = HVMSG_NONE;
440 		/*
441 		 * Ensure the write is complete so the hypervisor will deliver
442 		 * the next message if available.
443 		 */
444 		mb();
445 		if (msg->header.message_flags.msg_pending)
446 			hv_set_non_nested_msr(HV_MSR_EOM, 0);
447 	} else {
448 		pr_warn_once("%s: unknown message type 0x%x\n", __func__,
449 			     msg->header.message_type);
450 	}
451 }
452 
mshv_synic_cpu_init(unsigned int cpu)453 static int mshv_synic_cpu_init(unsigned int cpu)
454 {
455 	union hv_synic_simp simp;
456 	union hv_synic_siefp siefp;
457 	union hv_synic_sirbp sirbp;
458 	union hv_synic_sint sint;
459 	union hv_synic_scontrol sctrl;
460 	struct hv_synic_pages *spages = this_cpu_ptr(synic_pages);
461 	struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
462 	struct hv_synic_event_flags_page **event_flags_page =
463 			&spages->synic_event_flags_page;
464 	struct hv_synic_event_ring_page **event_ring_page =
465 			&spages->synic_event_ring_page;
466 
467 	/* Setup the Synic's message page */
468 	simp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIMP);
469 	simp.simp_enabled = true;
470 	*msg_page = memremap(simp.base_simp_gpa << HV_HYP_PAGE_SHIFT,
471 			     HV_HYP_PAGE_SIZE,
472 			     MEMREMAP_WB);
473 
474 	if (!(*msg_page))
475 		return -EFAULT;
476 
477 	hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
478 
479 	/* Setup the Synic's event flags page */
480 	siefp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIEFP);
481 	siefp.siefp_enabled = true;
482 	*event_flags_page = memremap(siefp.base_siefp_gpa << PAGE_SHIFT,
483 				     PAGE_SIZE, MEMREMAP_WB);
484 
485 	if (!(*event_flags_page))
486 		goto cleanup;
487 
488 	hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
489 
490 	/* Setup the Synic's event ring page */
491 	sirbp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIRBP);
492 	sirbp.sirbp_enabled = true;
493 	*event_ring_page = memremap(sirbp.base_sirbp_gpa << PAGE_SHIFT,
494 				    PAGE_SIZE, MEMREMAP_WB);
495 
496 	if (!(*event_ring_page))
497 		goto cleanup;
498 
499 	hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
500 
501 	if (mshv_sint_irq != -1)
502 		enable_percpu_irq(mshv_sint_irq, 0);
503 
504 	/* Enable intercepts */
505 	sint.as_uint64 = 0;
506 	sint.vector = mshv_sint_vector;
507 	sint.masked = false;
508 	sint.auto_eoi = hv_recommend_using_aeoi();
509 	hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX,
510 			      sint.as_uint64);
511 
512 	/* Doorbell SINT */
513 	sint.as_uint64 = 0;
514 	sint.vector = mshv_sint_vector;
515 	sint.masked = false;
516 	sint.as_intercept = 1;
517 	sint.auto_eoi = hv_recommend_using_aeoi();
518 	hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX,
519 			      sint.as_uint64);
520 
521 	/* Enable global synic bit */
522 	sctrl.as_uint64 = hv_get_non_nested_msr(HV_MSR_SCONTROL);
523 	sctrl.enable = 1;
524 	hv_set_non_nested_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
525 
526 	return 0;
527 
528 cleanup:
529 	if (*event_ring_page) {
530 		sirbp.sirbp_enabled = false;
531 		hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
532 		memunmap(*event_ring_page);
533 	}
534 	if (*event_flags_page) {
535 		siefp.siefp_enabled = false;
536 		hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
537 		memunmap(*event_flags_page);
538 	}
539 	if (*msg_page) {
540 		simp.simp_enabled = false;
541 		hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
542 		memunmap(*msg_page);
543 	}
544 
545 	return -EFAULT;
546 }
547 
mshv_synic_cpu_exit(unsigned int cpu)548 static int mshv_synic_cpu_exit(unsigned int cpu)
549 {
550 	union hv_synic_sint sint;
551 	union hv_synic_simp simp;
552 	union hv_synic_siefp siefp;
553 	union hv_synic_sirbp sirbp;
554 	union hv_synic_scontrol sctrl;
555 	struct hv_synic_pages *spages = this_cpu_ptr(synic_pages);
556 	struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
557 	struct hv_synic_event_flags_page **event_flags_page =
558 		&spages->synic_event_flags_page;
559 	struct hv_synic_event_ring_page **event_ring_page =
560 		&spages->synic_event_ring_page;
561 
562 	/* Disable the interrupt */
563 	sint.as_uint64 = hv_get_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX);
564 	sint.masked = true;
565 	hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX,
566 			      sint.as_uint64);
567 
568 	/* Disable Doorbell SINT */
569 	sint.as_uint64 = hv_get_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX);
570 	sint.masked = true;
571 	hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX,
572 			      sint.as_uint64);
573 
574 	if (mshv_sint_irq != -1)
575 		disable_percpu_irq(mshv_sint_irq);
576 
577 	/* Disable Synic's event ring page */
578 	sirbp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIRBP);
579 	sirbp.sirbp_enabled = false;
580 	hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
581 	memunmap(*event_ring_page);
582 
583 	/* Disable Synic's event flags page */
584 	siefp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIEFP);
585 	siefp.siefp_enabled = false;
586 	hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
587 	memunmap(*event_flags_page);
588 
589 	/* Disable Synic's message page */
590 	simp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIMP);
591 	simp.simp_enabled = false;
592 	hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
593 	memunmap(*msg_page);
594 
595 	/* Disable global synic bit */
596 	sctrl.as_uint64 = hv_get_non_nested_msr(HV_MSR_SCONTROL);
597 	sctrl.enable = 0;
598 	hv_set_non_nested_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
599 
600 	return 0;
601 }
602 
603 int
mshv_register_doorbell(u64 partition_id,doorbell_cb_t doorbell_cb,void * data,u64 gpa,u64 val,u64 flags)604 mshv_register_doorbell(u64 partition_id, doorbell_cb_t doorbell_cb, void *data,
605 		       u64 gpa, u64 val, u64 flags)
606 {
607 	struct hv_connection_info connection_info = { 0 };
608 	union hv_connection_id connection_id = { 0 };
609 	struct port_table_info *port_table_info;
610 	struct hv_port_info port_info = { 0 };
611 	union hv_port_id port_id = { 0 };
612 	int ret;
613 
614 	port_table_info = kmalloc_obj(*port_table_info);
615 	if (!port_table_info)
616 		return -ENOMEM;
617 
618 	port_table_info->hv_port_type = HV_PORT_TYPE_DOORBELL;
619 	port_table_info->hv_port_doorbell.doorbell_cb = doorbell_cb;
620 	port_table_info->hv_port_doorbell.data = data;
621 	ret = mshv_portid_alloc(port_table_info);
622 	if (ret < 0) {
623 		kfree(port_table_info);
624 		return ret;
625 	}
626 
627 	port_id.u.id = ret;
628 	port_info.port_type = HV_PORT_TYPE_DOORBELL;
629 	port_info.doorbell_port_info.target_sint = HV_SYNIC_DOORBELL_SINT_INDEX;
630 	port_info.doorbell_port_info.target_vp = HV_ANY_VP;
631 	ret = hv_call_create_port(hv_current_partition_id, port_id, partition_id,
632 				  &port_info,
633 				  0, 0, NUMA_NO_NODE);
634 
635 	if (ret < 0) {
636 		mshv_portid_free(port_id.u.id);
637 		return ret;
638 	}
639 
640 	connection_id.u.id = port_id.u.id;
641 	connection_info.port_type = HV_PORT_TYPE_DOORBELL;
642 	connection_info.doorbell_connection_info.gpa = gpa;
643 	connection_info.doorbell_connection_info.trigger_value = val;
644 	connection_info.doorbell_connection_info.flags = flags;
645 
646 	ret = hv_call_connect_port(hv_current_partition_id, port_id, partition_id,
647 				   connection_id, &connection_info, 0, NUMA_NO_NODE);
648 	if (ret < 0) {
649 		hv_call_delete_port(hv_current_partition_id, port_id);
650 		mshv_portid_free(port_id.u.id);
651 		return ret;
652 	}
653 
654 	// lets use the port_id as the doorbell_id
655 	return port_id.u.id;
656 }
657 
658 void
mshv_unregister_doorbell(u64 partition_id,int doorbell_portid)659 mshv_unregister_doorbell(u64 partition_id, int doorbell_portid)
660 {
661 	union hv_port_id port_id = { 0 };
662 	union hv_connection_id connection_id = { 0 };
663 
664 	connection_id.u.id = doorbell_portid;
665 	hv_call_disconnect_port(partition_id, connection_id);
666 
667 	port_id.u.id = doorbell_portid;
668 	hv_call_delete_port(hv_current_partition_id, port_id);
669 
670 	mshv_portid_free(doorbell_portid);
671 }
672 
mshv_synic_reboot_notify(struct notifier_block * nb,unsigned long code,void * unused)673 static int mshv_synic_reboot_notify(struct notifier_block *nb,
674 			      unsigned long code, void *unused)
675 {
676 	if (!hv_root_partition())
677 		return 0;
678 
679 	cpuhp_remove_state(synic_cpuhp_online);
680 	return 0;
681 }
682 
683 static struct notifier_block mshv_synic_reboot_nb = {
684 	.notifier_call = mshv_synic_reboot_notify,
685 };
686 
687 #ifndef HYPERVISOR_CALLBACK_VECTOR
688 static DEFINE_PER_CPU(long, mshv_evt);
689 
mshv_percpu_isr(int irq,void * dev_id)690 static irqreturn_t mshv_percpu_isr(int irq, void *dev_id)
691 {
692 	mshv_isr();
693 	return IRQ_HANDLED;
694 }
695 
696 #ifdef CONFIG_ACPI
mshv_acpi_setup_sint_irq(void)697 static int __init mshv_acpi_setup_sint_irq(void)
698 {
699 	return acpi_register_gsi(NULL, mshv_sint_vector, ACPI_EDGE_SENSITIVE,
700 					ACPI_ACTIVE_HIGH);
701 }
702 
mshv_acpi_cleanup_sint_irq(void)703 static void mshv_acpi_cleanup_sint_irq(void)
704 {
705 	acpi_unregister_gsi(mshv_sint_vector);
706 }
707 #else
mshv_acpi_setup_sint_irq(void)708 static int __init mshv_acpi_setup_sint_irq(void)
709 {
710 	return -ENODEV;
711 }
712 
mshv_acpi_cleanup_sint_irq(void)713 static void mshv_acpi_cleanup_sint_irq(void)
714 {
715 }
716 #endif
717 
mshv_sint_vector_setup(void)718 static int __init mshv_sint_vector_setup(void)
719 {
720 	int ret;
721 	struct hv_register_assoc reg = {
722 		.name = HV_ARM64_REGISTER_SINT_RESERVED_INTERRUPT_ID,
723 	};
724 	union hv_input_vtl input_vtl = { 0 };
725 
726 	if (acpi_disabled)
727 		return -ENODEV;
728 
729 	ret = hv_call_get_vp_registers(HV_VP_INDEX_SELF, HV_PARTITION_ID_SELF,
730 				1, input_vtl, &reg);
731 	if (ret || !reg.value.reg64)
732 		return -ENODEV;
733 
734 	mshv_sint_vector = reg.value.reg64;
735 	ret = mshv_acpi_setup_sint_irq();
736 	if (ret < 0) {
737 		pr_err("Failed to setup IRQ for MSHV SINT vector %d: %d\n",
738 			mshv_sint_vector, ret);
739 		goto out_fail;
740 	}
741 
742 	mshv_sint_irq = ret;
743 
744 	ret = request_percpu_irq(mshv_sint_irq, mshv_percpu_isr, "MSHV",
745 		&mshv_evt);
746 	if (ret)
747 		goto out_unregister;
748 
749 	return 0;
750 
751 out_unregister:
752 	mshv_acpi_cleanup_sint_irq();
753 out_fail:
754 	return ret;
755 }
756 
mshv_sint_vector_cleanup(void)757 static void mshv_sint_vector_cleanup(void)
758 {
759 	free_percpu_irq(mshv_sint_irq, &mshv_evt);
760 	mshv_acpi_cleanup_sint_irq();
761 }
762 #else /* !HYPERVISOR_CALLBACK_VECTOR */
mshv_sint_vector_setup(void)763 static int __init mshv_sint_vector_setup(void)
764 {
765 	mshv_sint_vector = HYPERVISOR_CALLBACK_VECTOR;
766 	return 0;
767 }
768 
mshv_sint_vector_cleanup(void)769 static void mshv_sint_vector_cleanup(void)
770 {
771 }
772 #endif /* HYPERVISOR_CALLBACK_VECTOR */
773 
mshv_synic_init(struct device * dev)774 int __init mshv_synic_init(struct device *dev)
775 {
776 	int ret = 0;
777 
778 	ret = mshv_sint_vector_setup();
779 	if (ret)
780 		return ret;
781 
782 	synic_pages = alloc_percpu(struct hv_synic_pages);
783 	if (!synic_pages) {
784 		dev_err(dev, "Failed to allocate percpu synic page\n");
785 		ret = -ENOMEM;
786 		goto sint_vector_cleanup;
787 	}
788 
789 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mshv_synic",
790 				mshv_synic_cpu_init,
791 				mshv_synic_cpu_exit);
792 	if (ret < 0) {
793 		dev_err(dev, "Failed to setup cpu hotplug state: %i\n", ret);
794 		goto free_synic_pages;
795 	}
796 
797 	synic_cpuhp_online = ret;
798 
799 	ret = register_reboot_notifier(&mshv_synic_reboot_nb);
800 	if (ret)
801 		goto remove_cpuhp_state;
802 
803 	return 0;
804 
805 remove_cpuhp_state:
806 	cpuhp_remove_state(synic_cpuhp_online);
807 free_synic_pages:
808 	free_percpu(synic_pages);
809 sint_vector_cleanup:
810 	mshv_sint_vector_cleanup();
811 	return ret;
812 }
813 
mshv_synic_exit(void)814 void mshv_synic_exit(void)
815 {
816 	unregister_reboot_notifier(&mshv_synic_reboot_nb);
817 	cpuhp_remove_state(synic_cpuhp_online);
818 	free_percpu(synic_pages);
819 	mshv_sint_vector_cleanup();
820 }
821