1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2023, Microsoft Corporation.
4 *
5 * mshv_root module's main interrupt handler and associated functionality.
6 *
7 * Authors: Microsoft Linux virtualization team
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/mm.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/random.h>
16 #include <linux/cpuhotplug.h>
17 #include <linux/reboot.h>
18 #include <asm/mshyperv.h>
19 #include <linux/acpi.h>
20
21 #include "mshv_eventfd.h"
22 #include "mshv.h"
23
24 static int synic_cpuhp_online;
25 static struct hv_synic_pages __percpu *synic_pages;
26 static int mshv_sint_vector = -1; /* hwirq for the SynIC SINTs */
27 static int mshv_sint_irq = -1; /* Linux IRQ for mshv_sint_vector */
28
synic_event_ring_get_queued_port(u32 sint_index)29 static u32 synic_event_ring_get_queued_port(u32 sint_index)
30 {
31 struct hv_synic_event_ring_page **event_ring_page;
32 volatile struct hv_synic_event_ring *ring;
33 struct hv_synic_pages *spages;
34 u8 **synic_eventring_tail;
35 u32 message;
36 u8 tail;
37
38 spages = this_cpu_ptr(synic_pages);
39 event_ring_page = &spages->synic_event_ring_page;
40 synic_eventring_tail = (u8 **)this_cpu_ptr(hv_synic_eventring_tail);
41
42 if (unlikely(!*synic_eventring_tail)) {
43 pr_debug("Missing synic event ring tail!\n");
44 return 0;
45 }
46 tail = (*synic_eventring_tail)[sint_index];
47
48 if (unlikely(!*event_ring_page)) {
49 pr_debug("Missing synic event ring page!\n");
50 return 0;
51 }
52
53 ring = &(*event_ring_page)->sint_event_ring[sint_index];
54
55 /*
56 * Get the message.
57 */
58 message = ring->data[tail];
59
60 if (!message) {
61 if (ring->ring_full) {
62 /*
63 * Ring is marked full, but we would have consumed all
64 * the messages. Notify the hypervisor that ring is now
65 * empty and check again.
66 */
67 ring->ring_full = 0;
68 hv_call_notify_port_ring_empty(sint_index);
69 message = ring->data[tail];
70 }
71
72 if (!message) {
73 ring->signal_masked = 0;
74 /*
75 * Unmask the signal and sync with hypervisor
76 * before one last check for any message.
77 */
78 mb();
79 message = ring->data[tail];
80
81 /*
82 * Ok, lets bail out.
83 */
84 if (!message)
85 return 0;
86 }
87
88 ring->signal_masked = 1;
89 }
90
91 /*
92 * Clear the message in the ring buffer.
93 */
94 ring->data[tail] = 0;
95
96 if (++tail == HV_SYNIC_EVENT_RING_MESSAGE_COUNT)
97 tail = 0;
98
99 (*synic_eventring_tail)[sint_index] = tail;
100
101 return message;
102 }
103
104 static bool
mshv_doorbell_isr(struct hv_message * msg)105 mshv_doorbell_isr(struct hv_message *msg)
106 {
107 struct hv_notification_message_payload *notification;
108 u32 port;
109
110 if (msg->header.message_type != HVMSG_SYNIC_SINT_INTERCEPT)
111 return false;
112
113 notification = (struct hv_notification_message_payload *)msg->u.payload;
114 if (notification->sint_index != HV_SYNIC_DOORBELL_SINT_INDEX)
115 return false;
116
117 while ((port = synic_event_ring_get_queued_port(HV_SYNIC_DOORBELL_SINT_INDEX))) {
118 struct port_table_info ptinfo = { 0 };
119
120 if (mshv_portid_lookup(port, &ptinfo)) {
121 pr_debug("Failed to get port info from port_table!\n");
122 continue;
123 }
124
125 if (ptinfo.hv_port_type != HV_PORT_TYPE_DOORBELL) {
126 pr_debug("Not a doorbell port!, port: %d, port_type: %d\n",
127 port, ptinfo.hv_port_type);
128 continue;
129 }
130
131 /* Invoke the callback */
132 ptinfo.hv_port_doorbell.doorbell_cb(port,
133 ptinfo.hv_port_doorbell.data);
134 }
135
136 return true;
137 }
138
mshv_async_call_completion_isr(struct hv_message * msg)139 static bool mshv_async_call_completion_isr(struct hv_message *msg)
140 {
141 bool handled = false;
142 struct hv_async_completion_message_payload *async_msg;
143 struct mshv_partition *partition;
144 u64 partition_id;
145
146 if (msg->header.message_type != HVMSG_ASYNC_CALL_COMPLETION)
147 goto out;
148
149 async_msg =
150 (struct hv_async_completion_message_payload *)msg->u.payload;
151
152 partition_id = async_msg->partition_id;
153
154 /*
155 * Hold this lock for the rest of the isr, because the partition could
156 * be released anytime.
157 * e.g. the MSHV_RUN_VP thread could wake on another cpu; it could
158 * release the partition unless we hold this!
159 */
160 rcu_read_lock();
161
162 partition = mshv_partition_find(partition_id);
163
164 if (unlikely(!partition)) {
165 pr_debug("failed to find partition %llu\n", partition_id);
166 goto unlock_out;
167 }
168
169 partition->async_hypercall_status = async_msg->status;
170 complete(&partition->async_hypercall);
171
172 handled = true;
173
174 unlock_out:
175 rcu_read_unlock();
176 out:
177 return handled;
178 }
179
kick_vp(struct mshv_vp * vp)180 static void kick_vp(struct mshv_vp *vp)
181 {
182 atomic64_inc(&vp->run.vp_signaled_count);
183 vp->run.kicked_by_hv = 1;
184 wake_up(&vp->run.vp_suspend_queue);
185 }
186
187 static void
handle_bitset_message(const struct hv_vp_signal_bitset_scheduler_message * msg)188 handle_bitset_message(const struct hv_vp_signal_bitset_scheduler_message *msg)
189 {
190 int bank_idx, vps_signaled = 0, bank_mask_size;
191 struct mshv_partition *partition;
192 const struct hv_vpset *vpset;
193 const u64 *bank_contents;
194 u64 partition_id = msg->partition_id;
195
196 if (msg->vp_bitset.bitset.format != HV_GENERIC_SET_SPARSE_4K) {
197 pr_debug("scheduler message format is not HV_GENERIC_SET_SPARSE_4K");
198 return;
199 }
200
201 if (msg->vp_count == 0) {
202 pr_debug("scheduler message with no VP specified");
203 return;
204 }
205
206 rcu_read_lock();
207
208 partition = mshv_partition_find(partition_id);
209 if (unlikely(!partition)) {
210 pr_debug("failed to find partition %llu\n", partition_id);
211 goto unlock_out;
212 }
213
214 vpset = &msg->vp_bitset.bitset;
215
216 bank_idx = -1;
217 bank_contents = vpset->bank_contents;
218 bank_mask_size = sizeof(vpset->valid_bank_mask) * BITS_PER_BYTE;
219
220 while (true) {
221 int vp_bank_idx = -1;
222 int vp_bank_size = sizeof(*bank_contents) * BITS_PER_BYTE;
223 int vp_index;
224
225 bank_idx = find_next_bit((unsigned long *)&vpset->valid_bank_mask,
226 bank_mask_size, bank_idx + 1);
227 if (bank_idx == bank_mask_size)
228 break;
229
230 while (true) {
231 struct mshv_vp *vp;
232
233 vp_bank_idx = find_next_bit((unsigned long *)bank_contents,
234 vp_bank_size, vp_bank_idx + 1);
235 if (vp_bank_idx == vp_bank_size)
236 break;
237
238 vp_index = (bank_idx * vp_bank_size) + vp_bank_idx;
239
240 /* This shouldn't happen, but just in case. */
241 if (unlikely(vp_index >= MSHV_MAX_VPS)) {
242 pr_debug("VP index %u out of bounds\n",
243 vp_index);
244 goto unlock_out;
245 }
246
247 vp = partition->pt_vp_array[vp_index];
248 if (unlikely(!vp)) {
249 pr_debug("failed to find VP %u\n", vp_index);
250 goto unlock_out;
251 }
252
253 kick_vp(vp);
254 vps_signaled++;
255 }
256
257 bank_contents++;
258 }
259
260 unlock_out:
261 rcu_read_unlock();
262
263 if (vps_signaled != msg->vp_count)
264 pr_debug("asked to signal %u VPs but only did %u\n",
265 msg->vp_count, vps_signaled);
266 }
267
268 static void
handle_pair_message(const struct hv_vp_signal_pair_scheduler_message * msg)269 handle_pair_message(const struct hv_vp_signal_pair_scheduler_message *msg)
270 {
271 struct mshv_partition *partition = NULL;
272 struct mshv_vp *vp;
273 int idx;
274
275 rcu_read_lock();
276
277 for (idx = 0; idx < msg->vp_count; idx++) {
278 u64 partition_id = msg->partition_ids[idx];
279 u32 vp_index = msg->vp_indexes[idx];
280
281 if (idx == 0 || partition->pt_id != partition_id) {
282 partition = mshv_partition_find(partition_id);
283 if (unlikely(!partition)) {
284 pr_debug("failed to find partition %llu\n",
285 partition_id);
286 break;
287 }
288 }
289
290 /* This shouldn't happen, but just in case. */
291 if (unlikely(vp_index >= MSHV_MAX_VPS)) {
292 pr_debug("VP index %u out of bounds\n", vp_index);
293 break;
294 }
295
296 vp = partition->pt_vp_array[vp_index];
297 if (!vp) {
298 pr_debug("failed to find VP %u\n", vp_index);
299 break;
300 }
301
302 kick_vp(vp);
303 }
304
305 rcu_read_unlock();
306 }
307
308 static bool
mshv_scheduler_isr(struct hv_message * msg)309 mshv_scheduler_isr(struct hv_message *msg)
310 {
311 if (msg->header.message_type != HVMSG_SCHEDULER_VP_SIGNAL_BITSET &&
312 msg->header.message_type != HVMSG_SCHEDULER_VP_SIGNAL_PAIR)
313 return false;
314
315 if (msg->header.message_type == HVMSG_SCHEDULER_VP_SIGNAL_BITSET)
316 handle_bitset_message((struct hv_vp_signal_bitset_scheduler_message *)
317 msg->u.payload);
318 else
319 handle_pair_message((struct hv_vp_signal_pair_scheduler_message *)
320 msg->u.payload);
321
322 return true;
323 }
324
325 static bool
mshv_intercept_isr(struct hv_message * msg)326 mshv_intercept_isr(struct hv_message *msg)
327 {
328 struct mshv_partition *partition;
329 bool handled = false;
330 struct mshv_vp *vp;
331 u64 partition_id;
332 u32 vp_index;
333
334 partition_id = msg->header.sender;
335
336 rcu_read_lock();
337
338 partition = mshv_partition_find(partition_id);
339 if (unlikely(!partition)) {
340 pr_debug("failed to find partition %llu\n",
341 partition_id);
342 goto unlock_out;
343 }
344
345 if (msg->header.message_type == HVMSG_X64_APIC_EOI) {
346 /*
347 * Check if this gsi is registered in the
348 * ack_notifier list and invoke the callback
349 * if registered.
350 */
351
352 /*
353 * If there is a notifier, the ack callback is supposed
354 * to handle the VMEXIT. So we need not pass this message
355 * to vcpu thread.
356 */
357 struct hv_x64_apic_eoi_message *eoi_msg =
358 (struct hv_x64_apic_eoi_message *)&msg->u.payload[0];
359
360 if (mshv_notify_acked_gsi(partition, eoi_msg->interrupt_vector)) {
361 handled = true;
362 goto unlock_out;
363 }
364 }
365
366 /*
367 * We should get an opaque intercept message here for all intercept
368 * messages, since we're using the mapped VP intercept message page.
369 *
370 * The intercept message will have been placed in intercept message
371 * page at this point.
372 *
373 * Make sure the message type matches our expectation.
374 */
375 if (msg->header.message_type != HVMSG_OPAQUE_INTERCEPT) {
376 pr_debug("wrong message type %d", msg->header.message_type);
377 goto unlock_out;
378 }
379
380 /*
381 * Since we directly index the vp, and it has to exist for us to be here
382 * (because the vp is only deleted when the partition is), no additional
383 * locking is needed here
384 */
385 vp_index =
386 ((struct hv_opaque_intercept_message *)msg->u.payload)->vp_index;
387 vp = partition->pt_vp_array[vp_index];
388 if (unlikely(!vp)) {
389 pr_debug("failed to find VP %u\n", vp_index);
390 goto unlock_out;
391 }
392
393 kick_vp(vp);
394
395 handled = true;
396
397 unlock_out:
398 rcu_read_unlock();
399
400 return handled;
401 }
402
mshv_isr(void)403 void mshv_isr(void)
404 {
405 struct hv_synic_pages *spages = this_cpu_ptr(synic_pages);
406 struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
407 struct hv_message *msg;
408 bool handled;
409
410 if (unlikely(!(*msg_page))) {
411 pr_debug("Missing synic page!\n");
412 return;
413 }
414
415 msg = &((*msg_page)->sint_message[HV_SYNIC_INTERCEPTION_SINT_INDEX]);
416
417 /*
418 * If the type isn't set, there isn't really a message;
419 * it may be some other hyperv interrupt
420 */
421 if (msg->header.message_type == HVMSG_NONE)
422 return;
423
424 handled = mshv_doorbell_isr(msg);
425
426 if (!handled)
427 handled = mshv_scheduler_isr(msg);
428
429 if (!handled)
430 handled = mshv_async_call_completion_isr(msg);
431
432 if (!handled)
433 handled = mshv_intercept_isr(msg);
434
435 if (handled) {
436 /*
437 * Acknowledge message with hypervisor if another message is
438 * pending.
439 */
440 msg->header.message_type = HVMSG_NONE;
441 /*
442 * Ensure the write is complete so the hypervisor will deliver
443 * the next message if available.
444 */
445 mb();
446 if (msg->header.message_flags.msg_pending)
447 hv_set_non_nested_msr(HV_MSR_EOM, 0);
448
449 add_interrupt_randomness(mshv_sint_vector);
450 } else {
451 pr_warn_once("%s: unknown message type 0x%x\n", __func__,
452 msg->header.message_type);
453 }
454 }
455
mshv_synic_cpu_init(unsigned int cpu)456 static int mshv_synic_cpu_init(unsigned int cpu)
457 {
458 union hv_synic_simp simp;
459 union hv_synic_siefp siefp;
460 union hv_synic_sirbp sirbp;
461 union hv_synic_sint sint;
462 union hv_synic_scontrol sctrl;
463 struct hv_synic_pages *spages = this_cpu_ptr(synic_pages);
464 struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
465 struct hv_synic_event_flags_page **event_flags_page =
466 &spages->synic_event_flags_page;
467 struct hv_synic_event_ring_page **event_ring_page =
468 &spages->synic_event_ring_page;
469
470 /* Setup the Synic's message page */
471 simp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIMP);
472 simp.simp_enabled = true;
473 *msg_page = memremap(simp.base_simp_gpa << HV_HYP_PAGE_SHIFT,
474 HV_HYP_PAGE_SIZE,
475 MEMREMAP_WB);
476
477 if (!(*msg_page))
478 return -EFAULT;
479
480 hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
481
482 /* Setup the Synic's event flags page */
483 siefp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIEFP);
484 siefp.siefp_enabled = true;
485 *event_flags_page = memremap(siefp.base_siefp_gpa << PAGE_SHIFT,
486 PAGE_SIZE, MEMREMAP_WB);
487
488 if (!(*event_flags_page))
489 goto cleanup;
490
491 hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
492
493 /* Setup the Synic's event ring page */
494 sirbp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIRBP);
495 sirbp.sirbp_enabled = true;
496 *event_ring_page = memremap(sirbp.base_sirbp_gpa << PAGE_SHIFT,
497 PAGE_SIZE, MEMREMAP_WB);
498
499 if (!(*event_ring_page))
500 goto cleanup;
501
502 hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
503
504 if (mshv_sint_irq != -1)
505 enable_percpu_irq(mshv_sint_irq, 0);
506
507 /* Enable intercepts */
508 sint.as_uint64 = 0;
509 sint.vector = mshv_sint_vector;
510 sint.masked = false;
511 sint.auto_eoi = hv_recommend_using_aeoi();
512 hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX,
513 sint.as_uint64);
514
515 /* Doorbell SINT */
516 sint.as_uint64 = 0;
517 sint.vector = mshv_sint_vector;
518 sint.masked = false;
519 sint.as_intercept = 1;
520 sint.auto_eoi = hv_recommend_using_aeoi();
521 hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX,
522 sint.as_uint64);
523
524 /* Enable global synic bit */
525 sctrl.as_uint64 = hv_get_non_nested_msr(HV_MSR_SCONTROL);
526 sctrl.enable = 1;
527 hv_set_non_nested_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
528
529 return 0;
530
531 cleanup:
532 if (*event_ring_page) {
533 sirbp.sirbp_enabled = false;
534 hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
535 memunmap(*event_ring_page);
536 }
537 if (*event_flags_page) {
538 siefp.siefp_enabled = false;
539 hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
540 memunmap(*event_flags_page);
541 }
542 if (*msg_page) {
543 simp.simp_enabled = false;
544 hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
545 memunmap(*msg_page);
546 }
547
548 return -EFAULT;
549 }
550
mshv_synic_cpu_exit(unsigned int cpu)551 static int mshv_synic_cpu_exit(unsigned int cpu)
552 {
553 union hv_synic_sint sint;
554 union hv_synic_simp simp;
555 union hv_synic_siefp siefp;
556 union hv_synic_sirbp sirbp;
557 union hv_synic_scontrol sctrl;
558 struct hv_synic_pages *spages = this_cpu_ptr(synic_pages);
559 struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
560 struct hv_synic_event_flags_page **event_flags_page =
561 &spages->synic_event_flags_page;
562 struct hv_synic_event_ring_page **event_ring_page =
563 &spages->synic_event_ring_page;
564
565 /* Disable the interrupt */
566 sint.as_uint64 = hv_get_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX);
567 sint.masked = true;
568 hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX,
569 sint.as_uint64);
570
571 /* Disable Doorbell SINT */
572 sint.as_uint64 = hv_get_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX);
573 sint.masked = true;
574 hv_set_non_nested_msr(HV_MSR_SINT0 + HV_SYNIC_DOORBELL_SINT_INDEX,
575 sint.as_uint64);
576
577 if (mshv_sint_irq != -1)
578 disable_percpu_irq(mshv_sint_irq);
579
580 /* Disable Synic's event ring page */
581 sirbp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIRBP);
582 sirbp.sirbp_enabled = false;
583 hv_set_non_nested_msr(HV_MSR_SIRBP, sirbp.as_uint64);
584 memunmap(*event_ring_page);
585
586 /* Disable Synic's event flags page */
587 siefp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIEFP);
588 siefp.siefp_enabled = false;
589 hv_set_non_nested_msr(HV_MSR_SIEFP, siefp.as_uint64);
590 memunmap(*event_flags_page);
591
592 /* Disable Synic's message page */
593 simp.as_uint64 = hv_get_non_nested_msr(HV_MSR_SIMP);
594 simp.simp_enabled = false;
595 hv_set_non_nested_msr(HV_MSR_SIMP, simp.as_uint64);
596 memunmap(*msg_page);
597
598 /* Disable global synic bit */
599 sctrl.as_uint64 = hv_get_non_nested_msr(HV_MSR_SCONTROL);
600 sctrl.enable = 0;
601 hv_set_non_nested_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
602
603 return 0;
604 }
605
606 int
mshv_register_doorbell(u64 partition_id,doorbell_cb_t doorbell_cb,void * data,u64 gpa,u64 val,u64 flags)607 mshv_register_doorbell(u64 partition_id, doorbell_cb_t doorbell_cb, void *data,
608 u64 gpa, u64 val, u64 flags)
609 {
610 struct hv_connection_info connection_info = { 0 };
611 union hv_connection_id connection_id = { 0 };
612 struct port_table_info *port_table_info;
613 struct hv_port_info port_info = { 0 };
614 union hv_port_id port_id = { 0 };
615 int ret;
616
617 port_table_info = kmalloc_obj(*port_table_info);
618 if (!port_table_info)
619 return -ENOMEM;
620
621 port_table_info->hv_port_type = HV_PORT_TYPE_DOORBELL;
622 port_table_info->hv_port_doorbell.doorbell_cb = doorbell_cb;
623 port_table_info->hv_port_doorbell.data = data;
624 ret = mshv_portid_alloc(port_table_info);
625 if (ret < 0) {
626 kfree(port_table_info);
627 return ret;
628 }
629
630 port_id.u.id = ret;
631 port_info.port_type = HV_PORT_TYPE_DOORBELL;
632 port_info.doorbell_port_info.target_sint = HV_SYNIC_DOORBELL_SINT_INDEX;
633 port_info.doorbell_port_info.target_vp = HV_ANY_VP;
634 ret = hv_call_create_port(hv_current_partition_id, port_id, partition_id,
635 &port_info,
636 0, 0, NUMA_NO_NODE);
637
638 if (ret < 0) {
639 mshv_portid_free(port_id.u.id);
640 return ret;
641 }
642
643 connection_id.u.id = port_id.u.id;
644 connection_info.port_type = HV_PORT_TYPE_DOORBELL;
645 connection_info.doorbell_connection_info.gpa = gpa;
646 connection_info.doorbell_connection_info.trigger_value = val;
647 connection_info.doorbell_connection_info.flags = flags;
648
649 ret = hv_call_connect_port(hv_current_partition_id, port_id, partition_id,
650 connection_id, &connection_info, 0, NUMA_NO_NODE);
651 if (ret < 0) {
652 hv_call_delete_port(hv_current_partition_id, port_id);
653 mshv_portid_free(port_id.u.id);
654 return ret;
655 }
656
657 // lets use the port_id as the doorbell_id
658 return port_id.u.id;
659 }
660
661 void
mshv_unregister_doorbell(u64 partition_id,int doorbell_portid)662 mshv_unregister_doorbell(u64 partition_id, int doorbell_portid)
663 {
664 union hv_port_id port_id = { 0 };
665 union hv_connection_id connection_id = { 0 };
666
667 connection_id.u.id = doorbell_portid;
668 hv_call_disconnect_port(partition_id, connection_id);
669
670 port_id.u.id = doorbell_portid;
671 hv_call_delete_port(hv_current_partition_id, port_id);
672
673 mshv_portid_free(doorbell_portid);
674 }
675
mshv_synic_reboot_notify(struct notifier_block * nb,unsigned long code,void * unused)676 static int mshv_synic_reboot_notify(struct notifier_block *nb,
677 unsigned long code, void *unused)
678 {
679 if (!hv_root_partition())
680 return 0;
681
682 cpuhp_remove_state(synic_cpuhp_online);
683 return 0;
684 }
685
686 static struct notifier_block mshv_synic_reboot_nb = {
687 .notifier_call = mshv_synic_reboot_notify,
688 };
689
690 #ifndef HYPERVISOR_CALLBACK_VECTOR
691 static DEFINE_PER_CPU(long, mshv_evt);
692
mshv_percpu_isr(int irq,void * dev_id)693 static irqreturn_t mshv_percpu_isr(int irq, void *dev_id)
694 {
695 mshv_isr();
696 return IRQ_HANDLED;
697 }
698
699 #ifdef CONFIG_ACPI
mshv_acpi_setup_sint_irq(void)700 static int __init mshv_acpi_setup_sint_irq(void)
701 {
702 return acpi_register_gsi(NULL, mshv_sint_vector, ACPI_EDGE_SENSITIVE,
703 ACPI_ACTIVE_HIGH);
704 }
705
mshv_acpi_cleanup_sint_irq(void)706 static void mshv_acpi_cleanup_sint_irq(void)
707 {
708 acpi_unregister_gsi(mshv_sint_vector);
709 }
710 #else
mshv_acpi_setup_sint_irq(void)711 static int __init mshv_acpi_setup_sint_irq(void)
712 {
713 return -ENODEV;
714 }
715
mshv_acpi_cleanup_sint_irq(void)716 static void mshv_acpi_cleanup_sint_irq(void)
717 {
718 }
719 #endif
720
mshv_sint_vector_setup(void)721 static int __init mshv_sint_vector_setup(void)
722 {
723 int ret;
724 struct hv_register_assoc reg = {
725 .name = HV_ARM64_REGISTER_SINT_RESERVED_INTERRUPT_ID,
726 };
727 union hv_input_vtl input_vtl = { 0 };
728
729 if (acpi_disabled)
730 return -ENODEV;
731
732 ret = hv_call_get_vp_registers(HV_VP_INDEX_SELF, HV_PARTITION_ID_SELF,
733 1, input_vtl, ®);
734 if (ret || !reg.value.reg64)
735 return -ENODEV;
736
737 mshv_sint_vector = reg.value.reg64;
738 ret = mshv_acpi_setup_sint_irq();
739 if (ret < 0) {
740 pr_err("Failed to setup IRQ for MSHV SINT vector %d: %d\n",
741 mshv_sint_vector, ret);
742 goto out_fail;
743 }
744
745 mshv_sint_irq = ret;
746
747 ret = request_percpu_irq(mshv_sint_irq, mshv_percpu_isr, "MSHV",
748 &mshv_evt);
749 if (ret)
750 goto out_unregister;
751
752 return 0;
753
754 out_unregister:
755 mshv_acpi_cleanup_sint_irq();
756 out_fail:
757 return ret;
758 }
759
mshv_sint_vector_cleanup(void)760 static void mshv_sint_vector_cleanup(void)
761 {
762 free_percpu_irq(mshv_sint_irq, &mshv_evt);
763 mshv_acpi_cleanup_sint_irq();
764 }
765 #else /* !HYPERVISOR_CALLBACK_VECTOR */
mshv_sint_vector_setup(void)766 static int __init mshv_sint_vector_setup(void)
767 {
768 mshv_sint_vector = HYPERVISOR_CALLBACK_VECTOR;
769 return 0;
770 }
771
mshv_sint_vector_cleanup(void)772 static void mshv_sint_vector_cleanup(void)
773 {
774 }
775 #endif /* HYPERVISOR_CALLBACK_VECTOR */
776
mshv_synic_init(struct device * dev)777 int __init mshv_synic_init(struct device *dev)
778 {
779 int ret = 0;
780
781 ret = mshv_sint_vector_setup();
782 if (ret)
783 return ret;
784
785 synic_pages = alloc_percpu(struct hv_synic_pages);
786 if (!synic_pages) {
787 dev_err(dev, "Failed to allocate percpu synic page\n");
788 ret = -ENOMEM;
789 goto sint_vector_cleanup;
790 }
791
792 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mshv_synic",
793 mshv_synic_cpu_init,
794 mshv_synic_cpu_exit);
795 if (ret < 0) {
796 dev_err(dev, "Failed to setup cpu hotplug state: %i\n", ret);
797 goto free_synic_pages;
798 }
799
800 synic_cpuhp_online = ret;
801
802 ret = register_reboot_notifier(&mshv_synic_reboot_nb);
803 if (ret)
804 goto remove_cpuhp_state;
805
806 return 0;
807
808 remove_cpuhp_state:
809 cpuhp_remove_state(synic_cpuhp_online);
810 free_synic_pages:
811 free_percpu(synic_pages);
812 sint_vector_cleanup:
813 mshv_sint_vector_cleanup();
814 return ret;
815 }
816
mshv_synic_exit(void)817 void mshv_synic_exit(void)
818 {
819 unregister_reboot_notifier(&mshv_synic_reboot_nb);
820 cpuhp_remove_state(synic_cpuhp_online);
821 free_percpu(synic_pages);
822 mshv_sint_vector_cleanup();
823 }
824