1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 * K. Y. Srinivasan <kys@microsoft.com>
9 */
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/platform_device.h>
16 #include <linux/interrupt.h>
17 #include <linux/sysctl.h>
18 #include <linux/slab.h>
19 #include <linux/acpi.h>
20 #include <linux/completion.h>
21 #include <linux/hyperv.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/of_address.h>
24 #include <linux/clockchips.h>
25 #include <linux/cpu.h>
26 #include <linux/sched/isolation.h>
27 #include <linux/sched/task_stack.h>
28
29 #include <linux/delay.h>
30 #include <linux/panic_notifier.h>
31 #include <linux/ptrace.h>
32 #include <linux/screen_info.h>
33 #include <linux/efi.h>
34 #include <linux/random.h>
35 #include <linux/kernel.h>
36 #include <linux/syscore_ops.h>
37 #include <linux/dma-map-ops.h>
38 #include <linux/pci.h>
39 #include <clocksource/hyperv_timer.h>
40 #include <asm/mshyperv.h>
41 #include "hyperv_vmbus.h"
42
43 struct vmbus_dynid {
44 struct list_head node;
45 struct hv_vmbus_device_id id;
46 };
47
48 /* VMBus Root Device */
49 static struct device *vmbus_root_device;
50
51 static int hyperv_cpuhp_online;
52
53 static long __percpu *vmbus_evt;
54
55 /* Values parsed from ACPI DSDT */
56 int vmbus_irq;
57 int vmbus_interrupt;
58
59 /*
60 * The panic notifier below is responsible solely for unloading the
61 * vmbus connection, which is necessary in a panic event.
62 *
63 * Notice an intrincate relation of this notifier with Hyper-V
64 * framebuffer panic notifier exists - we need vmbus connection alive
65 * there in order to succeed, so we need to order both with each other
66 * [see hvfb_on_panic()] - this is done using notifiers' priorities.
67 */
hv_panic_vmbus_unload(struct notifier_block * nb,unsigned long val,void * args)68 static int hv_panic_vmbus_unload(struct notifier_block *nb, unsigned long val,
69 void *args)
70 {
71 vmbus_initiate_unload(true);
72 return NOTIFY_DONE;
73 }
74 static struct notifier_block hyperv_panic_vmbus_unload_block = {
75 .notifier_call = hv_panic_vmbus_unload,
76 .priority = INT_MIN + 1, /* almost the latest one to execute */
77 };
78
79 static const char *fb_mmio_name = "fb_range";
80 static struct resource *fb_mmio;
81 static struct resource *hyperv_mmio;
82 static DEFINE_MUTEX(hyperv_mmio_lock);
83
hv_get_vmbus_root_device(void)84 struct device *hv_get_vmbus_root_device(void)
85 {
86 return vmbus_root_device;
87 }
88 EXPORT_SYMBOL_GPL(hv_get_vmbus_root_device);
89
vmbus_exists(void)90 static int vmbus_exists(void)
91 {
92 if (vmbus_root_device == NULL)
93 return -ENODEV;
94
95 return 0;
96 }
97
channel_monitor_group(const struct vmbus_channel * channel)98 static u8 channel_monitor_group(const struct vmbus_channel *channel)
99 {
100 return (u8)channel->offermsg.monitorid / 32;
101 }
102
channel_monitor_offset(const struct vmbus_channel * channel)103 static u8 channel_monitor_offset(const struct vmbus_channel *channel)
104 {
105 return (u8)channel->offermsg.monitorid % 32;
106 }
107
channel_pending(const struct vmbus_channel * channel,const struct hv_monitor_page * monitor_page)108 static u32 channel_pending(const struct vmbus_channel *channel,
109 const struct hv_monitor_page *monitor_page)
110 {
111 u8 monitor_group = channel_monitor_group(channel);
112
113 return monitor_page->trigger_group[monitor_group].pending;
114 }
115
channel_latency(const struct vmbus_channel * channel,const struct hv_monitor_page * monitor_page)116 static u32 channel_latency(const struct vmbus_channel *channel,
117 const struct hv_monitor_page *monitor_page)
118 {
119 u8 monitor_group = channel_monitor_group(channel);
120 u8 monitor_offset = channel_monitor_offset(channel);
121
122 return monitor_page->latency[monitor_group][monitor_offset];
123 }
124
channel_conn_id(struct vmbus_channel * channel,struct hv_monitor_page * monitor_page)125 static u32 channel_conn_id(struct vmbus_channel *channel,
126 struct hv_monitor_page *monitor_page)
127 {
128 u8 monitor_group = channel_monitor_group(channel);
129 u8 monitor_offset = channel_monitor_offset(channel);
130
131 return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
132 }
133
id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)134 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
135 char *buf)
136 {
137 struct hv_device *hv_dev = device_to_hv_device(dev);
138
139 if (!hv_dev->channel)
140 return -ENODEV;
141 return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
142 }
143 static DEVICE_ATTR_RO(id);
144
state_show(struct device * dev,struct device_attribute * dev_attr,char * buf)145 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
146 char *buf)
147 {
148 struct hv_device *hv_dev = device_to_hv_device(dev);
149
150 if (!hv_dev->channel)
151 return -ENODEV;
152 return sysfs_emit(buf, "%d\n", hv_dev->channel->state);
153 }
154 static DEVICE_ATTR_RO(state);
155
monitor_id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)156 static ssize_t monitor_id_show(struct device *dev,
157 struct device_attribute *dev_attr, char *buf)
158 {
159 struct hv_device *hv_dev = device_to_hv_device(dev);
160
161 if (!hv_dev->channel)
162 return -ENODEV;
163 return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
164 }
165 static DEVICE_ATTR_RO(monitor_id);
166
class_id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)167 static ssize_t class_id_show(struct device *dev,
168 struct device_attribute *dev_attr, char *buf)
169 {
170 struct hv_device *hv_dev = device_to_hv_device(dev);
171
172 if (!hv_dev->channel)
173 return -ENODEV;
174 return sysfs_emit(buf, "{%pUl}\n",
175 &hv_dev->channel->offermsg.offer.if_type);
176 }
177 static DEVICE_ATTR_RO(class_id);
178
device_id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)179 static ssize_t device_id_show(struct device *dev,
180 struct device_attribute *dev_attr, char *buf)
181 {
182 struct hv_device *hv_dev = device_to_hv_device(dev);
183
184 if (!hv_dev->channel)
185 return -ENODEV;
186 return sysfs_emit(buf, "{%pUl}\n",
187 &hv_dev->channel->offermsg.offer.if_instance);
188 }
189 static DEVICE_ATTR_RO(device_id);
190
modalias_show(struct device * dev,struct device_attribute * dev_attr,char * buf)191 static ssize_t modalias_show(struct device *dev,
192 struct device_attribute *dev_attr, char *buf)
193 {
194 struct hv_device *hv_dev = device_to_hv_device(dev);
195
196 return sysfs_emit(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
197 }
198 static DEVICE_ATTR_RO(modalias);
199
200 #ifdef CONFIG_NUMA
numa_node_show(struct device * dev,struct device_attribute * attr,char * buf)201 static ssize_t numa_node_show(struct device *dev,
202 struct device_attribute *attr, char *buf)
203 {
204 struct hv_device *hv_dev = device_to_hv_device(dev);
205
206 if (!hv_dev->channel)
207 return -ENODEV;
208
209 return sysfs_emit(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
210 }
211 static DEVICE_ATTR_RO(numa_node);
212 #endif
213
server_monitor_pending_show(struct device * dev,struct device_attribute * dev_attr,char * buf)214 static ssize_t server_monitor_pending_show(struct device *dev,
215 struct device_attribute *dev_attr,
216 char *buf)
217 {
218 struct hv_device *hv_dev = device_to_hv_device(dev);
219
220 if (!hv_dev->channel)
221 return -ENODEV;
222 return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
223 vmbus_connection.monitor_pages[0]));
224 }
225 static DEVICE_ATTR_RO(server_monitor_pending);
226
client_monitor_pending_show(struct device * dev,struct device_attribute * dev_attr,char * buf)227 static ssize_t client_monitor_pending_show(struct device *dev,
228 struct device_attribute *dev_attr,
229 char *buf)
230 {
231 struct hv_device *hv_dev = device_to_hv_device(dev);
232
233 if (!hv_dev->channel)
234 return -ENODEV;
235 return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
236 vmbus_connection.monitor_pages[1]));
237 }
238 static DEVICE_ATTR_RO(client_monitor_pending);
239
server_monitor_latency_show(struct device * dev,struct device_attribute * dev_attr,char * buf)240 static ssize_t server_monitor_latency_show(struct device *dev,
241 struct device_attribute *dev_attr,
242 char *buf)
243 {
244 struct hv_device *hv_dev = device_to_hv_device(dev);
245
246 if (!hv_dev->channel)
247 return -ENODEV;
248 return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
249 vmbus_connection.monitor_pages[0]));
250 }
251 static DEVICE_ATTR_RO(server_monitor_latency);
252
client_monitor_latency_show(struct device * dev,struct device_attribute * dev_attr,char * buf)253 static ssize_t client_monitor_latency_show(struct device *dev,
254 struct device_attribute *dev_attr,
255 char *buf)
256 {
257 struct hv_device *hv_dev = device_to_hv_device(dev);
258
259 if (!hv_dev->channel)
260 return -ENODEV;
261 return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
262 vmbus_connection.monitor_pages[1]));
263 }
264 static DEVICE_ATTR_RO(client_monitor_latency);
265
server_monitor_conn_id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)266 static ssize_t server_monitor_conn_id_show(struct device *dev,
267 struct device_attribute *dev_attr,
268 char *buf)
269 {
270 struct hv_device *hv_dev = device_to_hv_device(dev);
271
272 if (!hv_dev->channel)
273 return -ENODEV;
274 return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
275 vmbus_connection.monitor_pages[0]));
276 }
277 static DEVICE_ATTR_RO(server_monitor_conn_id);
278
client_monitor_conn_id_show(struct device * dev,struct device_attribute * dev_attr,char * buf)279 static ssize_t client_monitor_conn_id_show(struct device *dev,
280 struct device_attribute *dev_attr,
281 char *buf)
282 {
283 struct hv_device *hv_dev = device_to_hv_device(dev);
284
285 if (!hv_dev->channel)
286 return -ENODEV;
287 return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
288 vmbus_connection.monitor_pages[1]));
289 }
290 static DEVICE_ATTR_RO(client_monitor_conn_id);
291
out_intr_mask_show(struct device * dev,struct device_attribute * dev_attr,char * buf)292 static ssize_t out_intr_mask_show(struct device *dev,
293 struct device_attribute *dev_attr, char *buf)
294 {
295 struct hv_device *hv_dev = device_to_hv_device(dev);
296 struct hv_ring_buffer_debug_info outbound;
297 int ret;
298
299 if (!hv_dev->channel)
300 return -ENODEV;
301
302 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
303 &outbound);
304 if (ret < 0)
305 return ret;
306
307 return sysfs_emit(buf, "%d\n", outbound.current_interrupt_mask);
308 }
309 static DEVICE_ATTR_RO(out_intr_mask);
310
out_read_index_show(struct device * dev,struct device_attribute * dev_attr,char * buf)311 static ssize_t out_read_index_show(struct device *dev,
312 struct device_attribute *dev_attr, char *buf)
313 {
314 struct hv_device *hv_dev = device_to_hv_device(dev);
315 struct hv_ring_buffer_debug_info outbound;
316 int ret;
317
318 if (!hv_dev->channel)
319 return -ENODEV;
320
321 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
322 &outbound);
323 if (ret < 0)
324 return ret;
325 return sysfs_emit(buf, "%d\n", outbound.current_read_index);
326 }
327 static DEVICE_ATTR_RO(out_read_index);
328
out_write_index_show(struct device * dev,struct device_attribute * dev_attr,char * buf)329 static ssize_t out_write_index_show(struct device *dev,
330 struct device_attribute *dev_attr,
331 char *buf)
332 {
333 struct hv_device *hv_dev = device_to_hv_device(dev);
334 struct hv_ring_buffer_debug_info outbound;
335 int ret;
336
337 if (!hv_dev->channel)
338 return -ENODEV;
339
340 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
341 &outbound);
342 if (ret < 0)
343 return ret;
344 return sysfs_emit(buf, "%d\n", outbound.current_write_index);
345 }
346 static DEVICE_ATTR_RO(out_write_index);
347
out_read_bytes_avail_show(struct device * dev,struct device_attribute * dev_attr,char * buf)348 static ssize_t out_read_bytes_avail_show(struct device *dev,
349 struct device_attribute *dev_attr,
350 char *buf)
351 {
352 struct hv_device *hv_dev = device_to_hv_device(dev);
353 struct hv_ring_buffer_debug_info outbound;
354 int ret;
355
356 if (!hv_dev->channel)
357 return -ENODEV;
358
359 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
360 &outbound);
361 if (ret < 0)
362 return ret;
363 return sysfs_emit(buf, "%d\n", outbound.bytes_avail_toread);
364 }
365 static DEVICE_ATTR_RO(out_read_bytes_avail);
366
out_write_bytes_avail_show(struct device * dev,struct device_attribute * dev_attr,char * buf)367 static ssize_t out_write_bytes_avail_show(struct device *dev,
368 struct device_attribute *dev_attr,
369 char *buf)
370 {
371 struct hv_device *hv_dev = device_to_hv_device(dev);
372 struct hv_ring_buffer_debug_info outbound;
373 int ret;
374
375 if (!hv_dev->channel)
376 return -ENODEV;
377
378 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
379 &outbound);
380 if (ret < 0)
381 return ret;
382 return sysfs_emit(buf, "%d\n", outbound.bytes_avail_towrite);
383 }
384 static DEVICE_ATTR_RO(out_write_bytes_avail);
385
in_intr_mask_show(struct device * dev,struct device_attribute * dev_attr,char * buf)386 static ssize_t in_intr_mask_show(struct device *dev,
387 struct device_attribute *dev_attr, char *buf)
388 {
389 struct hv_device *hv_dev = device_to_hv_device(dev);
390 struct hv_ring_buffer_debug_info inbound;
391 int ret;
392
393 if (!hv_dev->channel)
394 return -ENODEV;
395
396 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
397 if (ret < 0)
398 return ret;
399
400 return sysfs_emit(buf, "%d\n", inbound.current_interrupt_mask);
401 }
402 static DEVICE_ATTR_RO(in_intr_mask);
403
in_read_index_show(struct device * dev,struct device_attribute * dev_attr,char * buf)404 static ssize_t in_read_index_show(struct device *dev,
405 struct device_attribute *dev_attr, char *buf)
406 {
407 struct hv_device *hv_dev = device_to_hv_device(dev);
408 struct hv_ring_buffer_debug_info inbound;
409 int ret;
410
411 if (!hv_dev->channel)
412 return -ENODEV;
413
414 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
415 if (ret < 0)
416 return ret;
417
418 return sysfs_emit(buf, "%d\n", inbound.current_read_index);
419 }
420 static DEVICE_ATTR_RO(in_read_index);
421
in_write_index_show(struct device * dev,struct device_attribute * dev_attr,char * buf)422 static ssize_t in_write_index_show(struct device *dev,
423 struct device_attribute *dev_attr, char *buf)
424 {
425 struct hv_device *hv_dev = device_to_hv_device(dev);
426 struct hv_ring_buffer_debug_info inbound;
427 int ret;
428
429 if (!hv_dev->channel)
430 return -ENODEV;
431
432 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
433 if (ret < 0)
434 return ret;
435
436 return sysfs_emit(buf, "%d\n", inbound.current_write_index);
437 }
438 static DEVICE_ATTR_RO(in_write_index);
439
in_read_bytes_avail_show(struct device * dev,struct device_attribute * dev_attr,char * buf)440 static ssize_t in_read_bytes_avail_show(struct device *dev,
441 struct device_attribute *dev_attr,
442 char *buf)
443 {
444 struct hv_device *hv_dev = device_to_hv_device(dev);
445 struct hv_ring_buffer_debug_info inbound;
446 int ret;
447
448 if (!hv_dev->channel)
449 return -ENODEV;
450
451 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
452 if (ret < 0)
453 return ret;
454
455 return sysfs_emit(buf, "%d\n", inbound.bytes_avail_toread);
456 }
457 static DEVICE_ATTR_RO(in_read_bytes_avail);
458
in_write_bytes_avail_show(struct device * dev,struct device_attribute * dev_attr,char * buf)459 static ssize_t in_write_bytes_avail_show(struct device *dev,
460 struct device_attribute *dev_attr,
461 char *buf)
462 {
463 struct hv_device *hv_dev = device_to_hv_device(dev);
464 struct hv_ring_buffer_debug_info inbound;
465 int ret;
466
467 if (!hv_dev->channel)
468 return -ENODEV;
469
470 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
471 if (ret < 0)
472 return ret;
473
474 return sysfs_emit(buf, "%d\n", inbound.bytes_avail_towrite);
475 }
476 static DEVICE_ATTR_RO(in_write_bytes_avail);
477
channel_vp_mapping_show(struct device * dev,struct device_attribute * dev_attr,char * buf)478 static ssize_t channel_vp_mapping_show(struct device *dev,
479 struct device_attribute *dev_attr,
480 char *buf)
481 {
482 struct hv_device *hv_dev = device_to_hv_device(dev);
483 struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
484 int n_written;
485 struct list_head *cur;
486
487 if (!channel)
488 return -ENODEV;
489
490 mutex_lock(&vmbus_connection.channel_mutex);
491
492 n_written = sysfs_emit(buf, "%u:%u\n",
493 channel->offermsg.child_relid,
494 channel->target_cpu);
495
496 list_for_each(cur, &channel->sc_list) {
497
498 cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
499 n_written += sysfs_emit_at(buf, n_written, "%u:%u\n",
500 cur_sc->offermsg.child_relid,
501 cur_sc->target_cpu);
502 }
503
504 mutex_unlock(&vmbus_connection.channel_mutex);
505
506 return n_written;
507 }
508 static DEVICE_ATTR_RO(channel_vp_mapping);
509
vendor_show(struct device * dev,struct device_attribute * dev_attr,char * buf)510 static ssize_t vendor_show(struct device *dev,
511 struct device_attribute *dev_attr,
512 char *buf)
513 {
514 struct hv_device *hv_dev = device_to_hv_device(dev);
515
516 return sysfs_emit(buf, "0x%x\n", hv_dev->vendor_id);
517 }
518 static DEVICE_ATTR_RO(vendor);
519
device_show(struct device * dev,struct device_attribute * dev_attr,char * buf)520 static ssize_t device_show(struct device *dev,
521 struct device_attribute *dev_attr,
522 char *buf)
523 {
524 struct hv_device *hv_dev = device_to_hv_device(dev);
525
526 return sysfs_emit(buf, "0x%x\n", hv_dev->device_id);
527 }
528 static DEVICE_ATTR_RO(device);
529
driver_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)530 static ssize_t driver_override_store(struct device *dev,
531 struct device_attribute *attr,
532 const char *buf, size_t count)
533 {
534 struct hv_device *hv_dev = device_to_hv_device(dev);
535 int ret;
536
537 ret = driver_set_override(dev, &hv_dev->driver_override, buf, count);
538 if (ret)
539 return ret;
540
541 return count;
542 }
543
driver_override_show(struct device * dev,struct device_attribute * attr,char * buf)544 static ssize_t driver_override_show(struct device *dev,
545 struct device_attribute *attr, char *buf)
546 {
547 struct hv_device *hv_dev = device_to_hv_device(dev);
548 ssize_t len;
549
550 device_lock(dev);
551 len = sysfs_emit(buf, "%s\n", hv_dev->driver_override);
552 device_unlock(dev);
553
554 return len;
555 }
556 static DEVICE_ATTR_RW(driver_override);
557
558 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
559 static struct attribute *vmbus_dev_attrs[] = {
560 &dev_attr_id.attr,
561 &dev_attr_state.attr,
562 &dev_attr_monitor_id.attr,
563 &dev_attr_class_id.attr,
564 &dev_attr_device_id.attr,
565 &dev_attr_modalias.attr,
566 #ifdef CONFIG_NUMA
567 &dev_attr_numa_node.attr,
568 #endif
569 &dev_attr_server_monitor_pending.attr,
570 &dev_attr_client_monitor_pending.attr,
571 &dev_attr_server_monitor_latency.attr,
572 &dev_attr_client_monitor_latency.attr,
573 &dev_attr_server_monitor_conn_id.attr,
574 &dev_attr_client_monitor_conn_id.attr,
575 &dev_attr_out_intr_mask.attr,
576 &dev_attr_out_read_index.attr,
577 &dev_attr_out_write_index.attr,
578 &dev_attr_out_read_bytes_avail.attr,
579 &dev_attr_out_write_bytes_avail.attr,
580 &dev_attr_in_intr_mask.attr,
581 &dev_attr_in_read_index.attr,
582 &dev_attr_in_write_index.attr,
583 &dev_attr_in_read_bytes_avail.attr,
584 &dev_attr_in_write_bytes_avail.attr,
585 &dev_attr_channel_vp_mapping.attr,
586 &dev_attr_vendor.attr,
587 &dev_attr_device.attr,
588 &dev_attr_driver_override.attr,
589 NULL,
590 };
591
592 /*
593 * Device-level attribute_group callback function. Returns the permission for
594 * each attribute, and returns 0 if an attribute is not visible.
595 */
vmbus_dev_attr_is_visible(struct kobject * kobj,struct attribute * attr,int idx)596 static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
597 struct attribute *attr, int idx)
598 {
599 struct device *dev = kobj_to_dev(kobj);
600 const struct hv_device *hv_dev = device_to_hv_device(dev);
601
602 /* Hide the monitor attributes if the monitor mechanism is not used. */
603 if (!hv_dev->channel->offermsg.monitor_allocated &&
604 (attr == &dev_attr_monitor_id.attr ||
605 attr == &dev_attr_server_monitor_pending.attr ||
606 attr == &dev_attr_client_monitor_pending.attr ||
607 attr == &dev_attr_server_monitor_latency.attr ||
608 attr == &dev_attr_client_monitor_latency.attr ||
609 attr == &dev_attr_server_monitor_conn_id.attr ||
610 attr == &dev_attr_client_monitor_conn_id.attr))
611 return 0;
612
613 return attr->mode;
614 }
615
616 static const struct attribute_group vmbus_dev_group = {
617 .attrs = vmbus_dev_attrs,
618 .is_visible = vmbus_dev_attr_is_visible
619 };
620 __ATTRIBUTE_GROUPS(vmbus_dev);
621
622 /* Set up the attribute for /sys/bus/vmbus/hibernation */
hibernation_show(const struct bus_type * bus,char * buf)623 static ssize_t hibernation_show(const struct bus_type *bus, char *buf)
624 {
625 return sprintf(buf, "%d\n", !!hv_is_hibernation_supported());
626 }
627
628 static BUS_ATTR_RO(hibernation);
629
630 static struct attribute *vmbus_bus_attrs[] = {
631 &bus_attr_hibernation.attr,
632 NULL,
633 };
634 static const struct attribute_group vmbus_bus_group = {
635 .attrs = vmbus_bus_attrs,
636 };
637 __ATTRIBUTE_GROUPS(vmbus_bus);
638
639 /*
640 * vmbus_uevent - add uevent for our device
641 *
642 * This routine is invoked when a device is added or removed on the vmbus to
643 * generate a uevent to udev in the userspace. The udev will then look at its
644 * rule and the uevent generated here to load the appropriate driver
645 *
646 * The alias string will be of the form vmbus:guid where guid is the string
647 * representation of the device guid (each byte of the guid will be
648 * represented with two hex characters.
649 */
vmbus_uevent(const struct device * device,struct kobj_uevent_env * env)650 static int vmbus_uevent(const struct device *device, struct kobj_uevent_env *env)
651 {
652 const struct hv_device *dev = device_to_hv_device(device);
653 const char *format = "MODALIAS=vmbus:%*phN";
654
655 return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
656 }
657
658 static const struct hv_vmbus_device_id *
hv_vmbus_dev_match(const struct hv_vmbus_device_id * id,const guid_t * guid)659 hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
660 {
661 if (id == NULL)
662 return NULL; /* empty device table */
663
664 for (; !guid_is_null(&id->guid); id++)
665 if (guid_equal(&id->guid, guid))
666 return id;
667
668 return NULL;
669 }
670
671 static const struct hv_vmbus_device_id *
hv_vmbus_dynid_match(struct hv_driver * drv,const guid_t * guid)672 hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
673 {
674 const struct hv_vmbus_device_id *id = NULL;
675 struct vmbus_dynid *dynid;
676
677 spin_lock(&drv->dynids.lock);
678 list_for_each_entry(dynid, &drv->dynids.list, node) {
679 if (guid_equal(&dynid->id.guid, guid)) {
680 id = &dynid->id;
681 break;
682 }
683 }
684 spin_unlock(&drv->dynids.lock);
685
686 return id;
687 }
688
689 static const struct hv_vmbus_device_id vmbus_device_null;
690
691 /*
692 * Return a matching hv_vmbus_device_id pointer.
693 * If there is no match, return NULL.
694 */
hv_vmbus_get_id(const struct hv_driver * drv,struct hv_device * dev)695 static const struct hv_vmbus_device_id *hv_vmbus_get_id(const struct hv_driver *drv,
696 struct hv_device *dev)
697 {
698 const guid_t *guid = &dev->dev_type;
699 const struct hv_vmbus_device_id *id;
700
701 /* When driver_override is set, only bind to the matching driver */
702 if (dev->driver_override && strcmp(dev->driver_override, drv->name))
703 return NULL;
704
705 /* Look at the dynamic ids first, before the static ones */
706 id = hv_vmbus_dynid_match((struct hv_driver *)drv, guid);
707 if (!id)
708 id = hv_vmbus_dev_match(drv->id_table, guid);
709
710 /* driver_override will always match, send a dummy id */
711 if (!id && dev->driver_override)
712 id = &vmbus_device_null;
713
714 return id;
715 }
716
717 /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices
718 *
719 * This function can race with vmbus_device_register(). This function is
720 * typically running on a user thread in response to writing to the "new_id"
721 * sysfs entry for a driver. vmbus_device_register() is running on a
722 * workqueue thread in response to the Hyper-V host offering a device to the
723 * guest. This function calls driver_attach(), which looks for an existing
724 * device matching the new id, and attaches the driver to which the new id
725 * has been assigned. vmbus_device_register() calls device_register(), which
726 * looks for a driver that matches the device being registered. If both
727 * operations are running simultaneously, the device driver probe function runs
728 * on whichever thread establishes the linkage between the driver and device.
729 *
730 * In most cases, it doesn't matter which thread runs the driver probe
731 * function. But if vmbus_device_register() does not find a matching driver,
732 * it proceeds to create the "channels" subdirectory and numbered per-channel
733 * subdirectory in sysfs. While that multi-step creation is in progress, this
734 * function could run the driver probe function. If the probe function checks
735 * for, or operates on, entries in the "channels" subdirectory, including by
736 * calling hv_create_ring_sysfs(), the operation may or may not succeed
737 * depending on the race. The race can't create a kernel failure in VMBus
738 * or device subsystem code, but probe functions in VMBus drivers doing such
739 * operations must be prepared for the failure case.
740 */
vmbus_add_dynid(struct hv_driver * drv,guid_t * guid)741 static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
742 {
743 struct vmbus_dynid *dynid;
744
745 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
746 if (!dynid)
747 return -ENOMEM;
748
749 dynid->id.guid = *guid;
750
751 spin_lock(&drv->dynids.lock);
752 list_add_tail(&dynid->node, &drv->dynids.list);
753 spin_unlock(&drv->dynids.lock);
754
755 return driver_attach(&drv->driver);
756 }
757
vmbus_free_dynids(struct hv_driver * drv)758 static void vmbus_free_dynids(struct hv_driver *drv)
759 {
760 struct vmbus_dynid *dynid, *n;
761
762 spin_lock(&drv->dynids.lock);
763 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
764 list_del(&dynid->node);
765 kfree(dynid);
766 }
767 spin_unlock(&drv->dynids.lock);
768 }
769
770 /*
771 * store_new_id - sysfs frontend to vmbus_add_dynid()
772 *
773 * Allow GUIDs to be added to an existing driver via sysfs.
774 */
new_id_store(struct device_driver * driver,const char * buf,size_t count)775 static ssize_t new_id_store(struct device_driver *driver, const char *buf,
776 size_t count)
777 {
778 struct hv_driver *drv = drv_to_hv_drv(driver);
779 guid_t guid;
780 ssize_t retval;
781
782 retval = guid_parse(buf, &guid);
783 if (retval)
784 return retval;
785
786 if (hv_vmbus_dynid_match(drv, &guid))
787 return -EEXIST;
788
789 retval = vmbus_add_dynid(drv, &guid);
790 if (retval)
791 return retval;
792 return count;
793 }
794 static DRIVER_ATTR_WO(new_id);
795
796 /*
797 * store_remove_id - remove a PCI device ID from this driver
798 *
799 * Removes a dynamic pci device ID to this driver.
800 */
remove_id_store(struct device_driver * driver,const char * buf,size_t count)801 static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
802 size_t count)
803 {
804 struct hv_driver *drv = drv_to_hv_drv(driver);
805 struct vmbus_dynid *dynid, *n;
806 guid_t guid;
807 ssize_t retval;
808
809 retval = guid_parse(buf, &guid);
810 if (retval)
811 return retval;
812
813 retval = -ENODEV;
814 spin_lock(&drv->dynids.lock);
815 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
816 struct hv_vmbus_device_id *id = &dynid->id;
817
818 if (guid_equal(&id->guid, &guid)) {
819 list_del(&dynid->node);
820 kfree(dynid);
821 retval = count;
822 break;
823 }
824 }
825 spin_unlock(&drv->dynids.lock);
826
827 return retval;
828 }
829 static DRIVER_ATTR_WO(remove_id);
830
831 static struct attribute *vmbus_drv_attrs[] = {
832 &driver_attr_new_id.attr,
833 &driver_attr_remove_id.attr,
834 NULL,
835 };
836 ATTRIBUTE_GROUPS(vmbus_drv);
837
838
839 /*
840 * vmbus_match - Attempt to match the specified device to the specified driver
841 */
vmbus_match(struct device * device,const struct device_driver * driver)842 static int vmbus_match(struct device *device, const struct device_driver *driver)
843 {
844 const struct hv_driver *drv = drv_to_hv_drv(driver);
845 struct hv_device *hv_dev = device_to_hv_device(device);
846
847 /* The hv_sock driver handles all hv_sock offers. */
848 if (is_hvsock_channel(hv_dev->channel))
849 return drv->hvsock;
850
851 if (hv_vmbus_get_id(drv, hv_dev))
852 return 1;
853
854 return 0;
855 }
856
857 /*
858 * vmbus_probe - Add the new vmbus's child device
859 */
vmbus_probe(struct device * child_device)860 static int vmbus_probe(struct device *child_device)
861 {
862 int ret = 0;
863 struct hv_driver *drv =
864 drv_to_hv_drv(child_device->driver);
865 struct hv_device *dev = device_to_hv_device(child_device);
866 const struct hv_vmbus_device_id *dev_id;
867
868 dev_id = hv_vmbus_get_id(drv, dev);
869 if (drv->probe) {
870 ret = drv->probe(dev, dev_id);
871 if (ret != 0)
872 pr_err("probe failed for device %s (%d)\n",
873 dev_name(child_device), ret);
874
875 } else {
876 pr_err("probe not set for driver %s\n",
877 dev_name(child_device));
878 ret = -ENODEV;
879 }
880 return ret;
881 }
882
883 /*
884 * vmbus_dma_configure -- Configure DMA coherence for VMbus device
885 */
vmbus_dma_configure(struct device * child_device)886 static int vmbus_dma_configure(struct device *child_device)
887 {
888 /*
889 * On ARM64, propagate the DMA coherence setting from the top level
890 * VMbus ACPI device to the child VMbus device being added here.
891 * On x86/x64 coherence is assumed and these calls have no effect.
892 */
893 hv_setup_dma_ops(child_device,
894 device_get_dma_attr(vmbus_root_device) == DEV_DMA_COHERENT);
895 return 0;
896 }
897
898 /*
899 * vmbus_remove - Remove a vmbus device
900 */
vmbus_remove(struct device * child_device)901 static void vmbus_remove(struct device *child_device)
902 {
903 struct hv_driver *drv;
904 struct hv_device *dev = device_to_hv_device(child_device);
905
906 if (child_device->driver) {
907 drv = drv_to_hv_drv(child_device->driver);
908 if (drv->remove)
909 drv->remove(dev);
910 }
911 }
912
913 /*
914 * vmbus_shutdown - Shutdown a vmbus device
915 */
vmbus_shutdown(struct device * child_device)916 static void vmbus_shutdown(struct device *child_device)
917 {
918 struct hv_driver *drv;
919 struct hv_device *dev = device_to_hv_device(child_device);
920
921
922 /* The device may not be attached yet */
923 if (!child_device->driver)
924 return;
925
926 drv = drv_to_hv_drv(child_device->driver);
927
928 if (drv->shutdown)
929 drv->shutdown(dev);
930 }
931
932 #ifdef CONFIG_PM_SLEEP
933 /*
934 * vmbus_suspend - Suspend a vmbus device
935 */
vmbus_suspend(struct device * child_device)936 static int vmbus_suspend(struct device *child_device)
937 {
938 struct hv_driver *drv;
939 struct hv_device *dev = device_to_hv_device(child_device);
940
941 /* The device may not be attached yet */
942 if (!child_device->driver)
943 return 0;
944
945 drv = drv_to_hv_drv(child_device->driver);
946 if (!drv->suspend)
947 return -EOPNOTSUPP;
948
949 return drv->suspend(dev);
950 }
951
952 /*
953 * vmbus_resume - Resume a vmbus device
954 */
vmbus_resume(struct device * child_device)955 static int vmbus_resume(struct device *child_device)
956 {
957 struct hv_driver *drv;
958 struct hv_device *dev = device_to_hv_device(child_device);
959
960 /* The device may not be attached yet */
961 if (!child_device->driver)
962 return 0;
963
964 drv = drv_to_hv_drv(child_device->driver);
965 if (!drv->resume)
966 return -EOPNOTSUPP;
967
968 return drv->resume(dev);
969 }
970 #else
971 #define vmbus_suspend NULL
972 #define vmbus_resume NULL
973 #endif /* CONFIG_PM_SLEEP */
974
975 /*
976 * vmbus_device_release - Final callback release of the vmbus child device
977 */
vmbus_device_release(struct device * device)978 static void vmbus_device_release(struct device *device)
979 {
980 struct hv_device *hv_dev = device_to_hv_device(device);
981 struct vmbus_channel *channel = hv_dev->channel;
982
983 hv_debug_rm_dev_dir(hv_dev);
984
985 mutex_lock(&vmbus_connection.channel_mutex);
986 hv_process_channel_removal(channel);
987 mutex_unlock(&vmbus_connection.channel_mutex);
988 kfree(hv_dev);
989 }
990
991 /*
992 * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
993 *
994 * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
995 * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
996 * is no way to wake up a Generation-2 VM.
997 *
998 * The other 4 ops are for hibernation.
999 */
1000
1001 static const struct dev_pm_ops vmbus_pm = {
1002 .suspend_noirq = NULL,
1003 .resume_noirq = NULL,
1004 .freeze_noirq = vmbus_suspend,
1005 .thaw_noirq = vmbus_resume,
1006 .poweroff_noirq = vmbus_suspend,
1007 .restore_noirq = vmbus_resume,
1008 };
1009
1010 /* The one and only one */
1011 static const struct bus_type hv_bus = {
1012 .name = "vmbus",
1013 .match = vmbus_match,
1014 .shutdown = vmbus_shutdown,
1015 .remove = vmbus_remove,
1016 .probe = vmbus_probe,
1017 .uevent = vmbus_uevent,
1018 .dma_configure = vmbus_dma_configure,
1019 .dev_groups = vmbus_dev_groups,
1020 .drv_groups = vmbus_drv_groups,
1021 .bus_groups = vmbus_bus_groups,
1022 .pm = &vmbus_pm,
1023 };
1024
1025 struct onmessage_work_context {
1026 struct work_struct work;
1027 struct {
1028 struct hv_message_header header;
1029 u8 payload[];
1030 } msg;
1031 };
1032
vmbus_onmessage_work(struct work_struct * work)1033 static void vmbus_onmessage_work(struct work_struct *work)
1034 {
1035 struct onmessage_work_context *ctx;
1036
1037 /* Do not process messages if we're in DISCONNECTED state */
1038 if (vmbus_connection.conn_state == DISCONNECTED)
1039 return;
1040
1041 ctx = container_of(work, struct onmessage_work_context,
1042 work);
1043 vmbus_onmessage((struct vmbus_channel_message_header *)
1044 &ctx->msg.payload);
1045 kfree(ctx);
1046 }
1047
vmbus_on_msg_dpc(unsigned long data)1048 void vmbus_on_msg_dpc(unsigned long data)
1049 {
1050 struct hv_per_cpu_context *hv_cpu = (void *)data;
1051 void *page_addr = hv_cpu->synic_message_page;
1052 struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
1053 VMBUS_MESSAGE_SINT;
1054 struct vmbus_channel_message_header *hdr;
1055 enum vmbus_channel_message_type msgtype;
1056 const struct vmbus_channel_message_table_entry *entry;
1057 struct onmessage_work_context *ctx;
1058 __u8 payload_size;
1059 u32 message_type;
1060
1061 /*
1062 * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
1063 * it is being used in 'struct vmbus_channel_message_header' definition
1064 * which is supposed to match hypervisor ABI.
1065 */
1066 BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
1067
1068 /*
1069 * Since the message is in memory shared with the host, an erroneous or
1070 * malicious Hyper-V could modify the message while vmbus_on_msg_dpc()
1071 * or individual message handlers are executing; to prevent this, copy
1072 * the message into private memory.
1073 */
1074 memcpy(&msg_copy, msg, sizeof(struct hv_message));
1075
1076 message_type = msg_copy.header.message_type;
1077 if (message_type == HVMSG_NONE)
1078 /* no msg */
1079 return;
1080
1081 hdr = (struct vmbus_channel_message_header *)msg_copy.u.payload;
1082 msgtype = hdr->msgtype;
1083
1084 trace_vmbus_on_msg_dpc(hdr);
1085
1086 if (msgtype >= CHANNELMSG_COUNT) {
1087 WARN_ONCE(1, "unknown msgtype=%d\n", msgtype);
1088 goto msg_handled;
1089 }
1090
1091 payload_size = msg_copy.header.payload_size;
1092 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
1093 WARN_ONCE(1, "payload size is too large (%d)\n", payload_size);
1094 goto msg_handled;
1095 }
1096
1097 entry = &channel_message_table[msgtype];
1098
1099 if (!entry->message_handler)
1100 goto msg_handled;
1101
1102 if (payload_size < entry->min_payload_len) {
1103 WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size);
1104 goto msg_handled;
1105 }
1106
1107 if (entry->handler_type == VMHT_BLOCKING) {
1108 ctx = kmalloc(struct_size(ctx, msg.payload, payload_size), GFP_ATOMIC);
1109 if (ctx == NULL)
1110 return;
1111
1112 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1113 ctx->msg.header = msg_copy.header;
1114 memcpy(&ctx->msg.payload, msg_copy.u.payload, payload_size);
1115
1116 /*
1117 * The host can generate a rescind message while we
1118 * may still be handling the original offer. We deal with
1119 * this condition by relying on the synchronization provided
1120 * by offer_in_progress and by channel_mutex. See also the
1121 * inline comments in vmbus_onoffer_rescind().
1122 */
1123 switch (msgtype) {
1124 case CHANNELMSG_RESCIND_CHANNELOFFER:
1125 /*
1126 * If we are handling the rescind message;
1127 * schedule the work on the global work queue.
1128 *
1129 * The OFFER message and the RESCIND message should
1130 * not be handled by the same serialized work queue,
1131 * because the OFFER handler may call vmbus_open(),
1132 * which tries to open the channel by sending an
1133 * OPEN_CHANNEL message to the host and waits for
1134 * the host's response; however, if the host has
1135 * rescinded the channel before it receives the
1136 * OPEN_CHANNEL message, the host just silently
1137 * ignores the OPEN_CHANNEL message; as a result,
1138 * the guest's OFFER handler hangs for ever, if we
1139 * handle the RESCIND message in the same serialized
1140 * work queue: the RESCIND handler can not start to
1141 * run before the OFFER handler finishes.
1142 */
1143 if (vmbus_connection.ignore_any_offer_msg)
1144 break;
1145 queue_work(vmbus_connection.rescind_work_queue, &ctx->work);
1146 break;
1147
1148 case CHANNELMSG_OFFERCHANNEL:
1149 /*
1150 * The host sends the offer message of a given channel
1151 * before sending the rescind message of the same
1152 * channel. These messages are sent to the guest's
1153 * connect CPU; the guest then starts processing them
1154 * in the tasklet handler on this CPU:
1155 *
1156 * VMBUS_CONNECT_CPU
1157 *
1158 * [vmbus_on_msg_dpc()]
1159 * atomic_inc() // CHANNELMSG_OFFERCHANNEL
1160 * queue_work()
1161 * ...
1162 * [vmbus_on_msg_dpc()]
1163 * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER
1164 *
1165 * We rely on the memory-ordering properties of the
1166 * queue_work() and schedule_work() primitives, which
1167 * guarantee that the atomic increment will be visible
1168 * to the CPUs which will execute the offer & rescind
1169 * works by the time these works will start execution.
1170 */
1171 if (vmbus_connection.ignore_any_offer_msg)
1172 break;
1173 atomic_inc(&vmbus_connection.offer_in_progress);
1174 fallthrough;
1175
1176 default:
1177 queue_work(vmbus_connection.work_queue, &ctx->work);
1178 }
1179 } else
1180 entry->message_handler(hdr);
1181
1182 msg_handled:
1183 vmbus_signal_eom(msg, message_type);
1184 }
1185
1186 #ifdef CONFIG_PM_SLEEP
1187 /*
1188 * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
1189 * hibernation, because hv_sock connections can not persist across hibernation.
1190 */
vmbus_force_channel_rescinded(struct vmbus_channel * channel)1191 static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
1192 {
1193 struct onmessage_work_context *ctx;
1194 struct vmbus_channel_rescind_offer *rescind;
1195
1196 WARN_ON(!is_hvsock_channel(channel));
1197
1198 /*
1199 * Allocation size is small and the allocation should really not fail,
1200 * otherwise the state of the hv_sock connections ends up in limbo.
1201 */
1202 ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
1203 GFP_KERNEL | __GFP_NOFAIL);
1204
1205 /*
1206 * So far, these are not really used by Linux. Just set them to the
1207 * reasonable values conforming to the definitions of the fields.
1208 */
1209 ctx->msg.header.message_type = 1;
1210 ctx->msg.header.payload_size = sizeof(*rescind);
1211
1212 /* These values are actually used by Linux. */
1213 rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
1214 rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
1215 rescind->child_relid = channel->offermsg.child_relid;
1216
1217 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1218
1219 queue_work(vmbus_connection.work_queue, &ctx->work);
1220 }
1221 #endif /* CONFIG_PM_SLEEP */
1222
1223 /*
1224 * Schedule all channels with events pending
1225 */
vmbus_chan_sched(struct hv_per_cpu_context * hv_cpu)1226 static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
1227 {
1228 unsigned long *recv_int_page;
1229 u32 maxbits, relid;
1230
1231 /*
1232 * The event page can be directly checked to get the id of
1233 * the channel that has the interrupt pending.
1234 */
1235 void *page_addr = hv_cpu->synic_event_page;
1236 union hv_synic_event_flags *event
1237 = (union hv_synic_event_flags *)page_addr +
1238 VMBUS_MESSAGE_SINT;
1239
1240 maxbits = HV_EVENT_FLAGS_COUNT;
1241 recv_int_page = event->flags;
1242
1243 if (unlikely(!recv_int_page))
1244 return;
1245
1246 for_each_set_bit(relid, recv_int_page, maxbits) {
1247 void (*callback_fn)(void *context);
1248 struct vmbus_channel *channel;
1249
1250 if (!sync_test_and_clear_bit(relid, recv_int_page))
1251 continue;
1252
1253 /* Special case - vmbus channel protocol msg */
1254 if (relid == 0)
1255 continue;
1256
1257 /*
1258 * Pairs with the kfree_rcu() in vmbus_chan_release().
1259 * Guarantees that the channel data structure doesn't
1260 * get freed while the channel pointer below is being
1261 * dereferenced.
1262 */
1263 rcu_read_lock();
1264
1265 /* Find channel based on relid */
1266 channel = relid2channel(relid);
1267 if (channel == NULL)
1268 goto sched_unlock_rcu;
1269
1270 if (channel->rescind)
1271 goto sched_unlock_rcu;
1272
1273 /*
1274 * Make sure that the ring buffer data structure doesn't get
1275 * freed while we dereference the ring buffer pointer. Test
1276 * for the channel's onchannel_callback being NULL within a
1277 * sched_lock critical section. See also the inline comments
1278 * in vmbus_reset_channel_cb().
1279 */
1280 spin_lock(&channel->sched_lock);
1281
1282 callback_fn = channel->onchannel_callback;
1283 if (unlikely(callback_fn == NULL))
1284 goto sched_unlock;
1285
1286 trace_vmbus_chan_sched(channel);
1287
1288 ++channel->interrupts;
1289
1290 switch (channel->callback_mode) {
1291 case HV_CALL_ISR:
1292 (*callback_fn)(channel->channel_callback_context);
1293 break;
1294
1295 case HV_CALL_BATCHED:
1296 hv_begin_read(&channel->inbound);
1297 fallthrough;
1298 case HV_CALL_DIRECT:
1299 tasklet_schedule(&channel->callback_event);
1300 }
1301
1302 sched_unlock:
1303 spin_unlock(&channel->sched_lock);
1304 sched_unlock_rcu:
1305 rcu_read_unlock();
1306 }
1307 }
1308
vmbus_isr(void)1309 static void vmbus_isr(void)
1310 {
1311 struct hv_per_cpu_context *hv_cpu
1312 = this_cpu_ptr(hv_context.cpu_context);
1313 void *page_addr;
1314 struct hv_message *msg;
1315
1316 vmbus_chan_sched(hv_cpu);
1317
1318 page_addr = hv_cpu->synic_message_page;
1319 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
1320
1321 /* Check if there are actual msgs to be processed */
1322 if (msg->header.message_type != HVMSG_NONE) {
1323 if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
1324 hv_stimer0_isr();
1325 vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
1326 } else
1327 tasklet_schedule(&hv_cpu->msg_dpc);
1328 }
1329
1330 add_interrupt_randomness(vmbus_interrupt);
1331 }
1332
vmbus_percpu_isr(int irq,void * dev_id)1333 static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
1334 {
1335 vmbus_isr();
1336 return IRQ_HANDLED;
1337 }
1338
vmbus_percpu_work(struct work_struct * work)1339 static void vmbus_percpu_work(struct work_struct *work)
1340 {
1341 unsigned int cpu = smp_processor_id();
1342
1343 hv_synic_init(cpu);
1344 }
1345
1346 /*
1347 * vmbus_bus_init -Main vmbus driver initialization routine.
1348 *
1349 * Here, we
1350 * - initialize the vmbus driver context
1351 * - invoke the vmbus hv main init routine
1352 * - retrieve the channel offers
1353 */
vmbus_bus_init(void)1354 static int vmbus_bus_init(void)
1355 {
1356 int ret, cpu;
1357 struct work_struct __percpu *works;
1358
1359 ret = hv_init();
1360 if (ret != 0) {
1361 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
1362 return ret;
1363 }
1364
1365 ret = bus_register(&hv_bus);
1366 if (ret)
1367 return ret;
1368
1369 /*
1370 * VMbus interrupts are best modeled as per-cpu interrupts. If
1371 * on an architecture with support for per-cpu IRQs (e.g. ARM64),
1372 * allocate a per-cpu IRQ using standard Linux kernel functionality.
1373 * If not on such an architecture (e.g., x86/x64), then rely on
1374 * code in the arch-specific portion of the code tree to connect
1375 * the VMbus interrupt handler.
1376 */
1377
1378 if (vmbus_irq == -1) {
1379 hv_setup_vmbus_handler(vmbus_isr);
1380 } else {
1381 vmbus_evt = alloc_percpu(long);
1382 ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr,
1383 "Hyper-V VMbus", vmbus_evt);
1384 if (ret) {
1385 pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d",
1386 vmbus_irq, ret);
1387 free_percpu(vmbus_evt);
1388 goto err_setup;
1389 }
1390 }
1391
1392 ret = hv_synic_alloc();
1393 if (ret)
1394 goto err_alloc;
1395
1396 works = alloc_percpu(struct work_struct);
1397 if (!works) {
1398 ret = -ENOMEM;
1399 goto err_alloc;
1400 }
1401
1402 /*
1403 * Initialize the per-cpu interrupt state and stimer state.
1404 * Then connect to the host.
1405 */
1406 cpus_read_lock();
1407 for_each_online_cpu(cpu) {
1408 struct work_struct *work = per_cpu_ptr(works, cpu);
1409
1410 INIT_WORK(work, vmbus_percpu_work);
1411 schedule_work_on(cpu, work);
1412 }
1413
1414 for_each_online_cpu(cpu)
1415 flush_work(per_cpu_ptr(works, cpu));
1416
1417 /* Register the callbacks for possible CPU online/offline'ing */
1418 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
1419 hv_synic_init, hv_synic_cleanup);
1420 cpus_read_unlock();
1421 free_percpu(works);
1422 if (ret < 0)
1423 goto err_alloc;
1424 hyperv_cpuhp_online = ret;
1425
1426 ret = vmbus_connect();
1427 if (ret)
1428 goto err_connect;
1429
1430 /*
1431 * Always register the vmbus unload panic notifier because we
1432 * need to shut the VMbus channel connection on panic.
1433 */
1434 atomic_notifier_chain_register(&panic_notifier_list,
1435 &hyperv_panic_vmbus_unload_block);
1436
1437 vmbus_request_offers();
1438
1439 return 0;
1440
1441 err_connect:
1442 cpuhp_remove_state(hyperv_cpuhp_online);
1443 err_alloc:
1444 hv_synic_free();
1445 if (vmbus_irq == -1) {
1446 hv_remove_vmbus_handler();
1447 } else {
1448 free_percpu_irq(vmbus_irq, vmbus_evt);
1449 free_percpu(vmbus_evt);
1450 }
1451 err_setup:
1452 bus_unregister(&hv_bus);
1453 return ret;
1454 }
1455
1456 /**
1457 * __vmbus_driver_register() - Register a vmbus's driver
1458 * @hv_driver: Pointer to driver structure you want to register
1459 * @owner: owner module of the drv
1460 * @mod_name: module name string
1461 *
1462 * Registers the given driver with Linux through the 'driver_register()' call
1463 * and sets up the hyper-v vmbus handling for this driver.
1464 * It will return the state of the 'driver_register()' call.
1465 *
1466 */
__vmbus_driver_register(struct hv_driver * hv_driver,struct module * owner,const char * mod_name)1467 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
1468 {
1469 int ret;
1470
1471 pr_info("registering driver %s\n", hv_driver->name);
1472
1473 ret = vmbus_exists();
1474 if (ret < 0)
1475 return ret;
1476
1477 hv_driver->driver.name = hv_driver->name;
1478 hv_driver->driver.owner = owner;
1479 hv_driver->driver.mod_name = mod_name;
1480 hv_driver->driver.bus = &hv_bus;
1481
1482 spin_lock_init(&hv_driver->dynids.lock);
1483 INIT_LIST_HEAD(&hv_driver->dynids.list);
1484
1485 ret = driver_register(&hv_driver->driver);
1486
1487 return ret;
1488 }
1489 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
1490
1491 /**
1492 * vmbus_driver_unregister() - Unregister a vmbus's driver
1493 * @hv_driver: Pointer to driver structure you want to
1494 * un-register
1495 *
1496 * Un-register the given driver that was previous registered with a call to
1497 * vmbus_driver_register()
1498 */
vmbus_driver_unregister(struct hv_driver * hv_driver)1499 void vmbus_driver_unregister(struct hv_driver *hv_driver)
1500 {
1501 pr_info("unregistering driver %s\n", hv_driver->name);
1502
1503 if (!vmbus_exists()) {
1504 driver_unregister(&hv_driver->driver);
1505 vmbus_free_dynids(hv_driver);
1506 }
1507 }
1508 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
1509
1510
1511 /*
1512 * Called when last reference to channel is gone.
1513 */
vmbus_chan_release(struct kobject * kobj)1514 static void vmbus_chan_release(struct kobject *kobj)
1515 {
1516 struct vmbus_channel *channel
1517 = container_of(kobj, struct vmbus_channel, kobj);
1518
1519 kfree_rcu(channel, rcu);
1520 }
1521
1522 struct vmbus_chan_attribute {
1523 struct attribute attr;
1524 ssize_t (*show)(struct vmbus_channel *chan, char *buf);
1525 ssize_t (*store)(struct vmbus_channel *chan,
1526 const char *buf, size_t count);
1527 };
1528 #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
1529 struct vmbus_chan_attribute chan_attr_##_name \
1530 = __ATTR(_name, _mode, _show, _store)
1531 #define VMBUS_CHAN_ATTR_RW(_name) \
1532 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
1533 #define VMBUS_CHAN_ATTR_RO(_name) \
1534 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
1535 #define VMBUS_CHAN_ATTR_WO(_name) \
1536 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
1537
vmbus_chan_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)1538 static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
1539 struct attribute *attr, char *buf)
1540 {
1541 const struct vmbus_chan_attribute *attribute
1542 = container_of(attr, struct vmbus_chan_attribute, attr);
1543 struct vmbus_channel *chan
1544 = container_of(kobj, struct vmbus_channel, kobj);
1545
1546 if (!attribute->show)
1547 return -EIO;
1548
1549 return attribute->show(chan, buf);
1550 }
1551
vmbus_chan_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)1552 static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
1553 struct attribute *attr, const char *buf,
1554 size_t count)
1555 {
1556 const struct vmbus_chan_attribute *attribute
1557 = container_of(attr, struct vmbus_chan_attribute, attr);
1558 struct vmbus_channel *chan
1559 = container_of(kobj, struct vmbus_channel, kobj);
1560
1561 if (!attribute->store)
1562 return -EIO;
1563
1564 return attribute->store(chan, buf, count);
1565 }
1566
1567 static const struct sysfs_ops vmbus_chan_sysfs_ops = {
1568 .show = vmbus_chan_attr_show,
1569 .store = vmbus_chan_attr_store,
1570 };
1571
out_mask_show(struct vmbus_channel * channel,char * buf)1572 static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
1573 {
1574 struct hv_ring_buffer_info *rbi = &channel->outbound;
1575 ssize_t ret;
1576
1577 mutex_lock(&rbi->ring_buffer_mutex);
1578 if (!rbi->ring_buffer) {
1579 mutex_unlock(&rbi->ring_buffer_mutex);
1580 return -EINVAL;
1581 }
1582
1583 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1584 mutex_unlock(&rbi->ring_buffer_mutex);
1585 return ret;
1586 }
1587 static VMBUS_CHAN_ATTR_RO(out_mask);
1588
in_mask_show(struct vmbus_channel * channel,char * buf)1589 static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
1590 {
1591 struct hv_ring_buffer_info *rbi = &channel->inbound;
1592 ssize_t ret;
1593
1594 mutex_lock(&rbi->ring_buffer_mutex);
1595 if (!rbi->ring_buffer) {
1596 mutex_unlock(&rbi->ring_buffer_mutex);
1597 return -EINVAL;
1598 }
1599
1600 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1601 mutex_unlock(&rbi->ring_buffer_mutex);
1602 return ret;
1603 }
1604 static VMBUS_CHAN_ATTR_RO(in_mask);
1605
read_avail_show(struct vmbus_channel * channel,char * buf)1606 static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
1607 {
1608 struct hv_ring_buffer_info *rbi = &channel->inbound;
1609 ssize_t ret;
1610
1611 mutex_lock(&rbi->ring_buffer_mutex);
1612 if (!rbi->ring_buffer) {
1613 mutex_unlock(&rbi->ring_buffer_mutex);
1614 return -EINVAL;
1615 }
1616
1617 ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
1618 mutex_unlock(&rbi->ring_buffer_mutex);
1619 return ret;
1620 }
1621 static VMBUS_CHAN_ATTR_RO(read_avail);
1622
write_avail_show(struct vmbus_channel * channel,char * buf)1623 static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
1624 {
1625 struct hv_ring_buffer_info *rbi = &channel->outbound;
1626 ssize_t ret;
1627
1628 mutex_lock(&rbi->ring_buffer_mutex);
1629 if (!rbi->ring_buffer) {
1630 mutex_unlock(&rbi->ring_buffer_mutex);
1631 return -EINVAL;
1632 }
1633
1634 ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
1635 mutex_unlock(&rbi->ring_buffer_mutex);
1636 return ret;
1637 }
1638 static VMBUS_CHAN_ATTR_RO(write_avail);
1639
target_cpu_show(struct vmbus_channel * channel,char * buf)1640 static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
1641 {
1642 return sprintf(buf, "%u\n", channel->target_cpu);
1643 }
1644
vmbus_channel_set_cpu(struct vmbus_channel * channel,u32 target_cpu)1645 int vmbus_channel_set_cpu(struct vmbus_channel *channel, u32 target_cpu)
1646 {
1647 u32 origin_cpu;
1648 int ret = 0;
1649
1650 lockdep_assert_cpus_held();
1651 lockdep_assert_held(&vmbus_connection.channel_mutex);
1652
1653 if (vmbus_proto_version < VERSION_WIN10_V4_1)
1654 return -EIO;
1655
1656 /* Validate target_cpu for the cpumask_test_cpu() operation below. */
1657 if (target_cpu >= nr_cpumask_bits)
1658 return -EINVAL;
1659
1660 if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)))
1661 return -EINVAL;
1662
1663 if (!cpu_online(target_cpu))
1664 return -EINVAL;
1665
1666 /*
1667 * Synchronizes vmbus_channel_set_cpu() and channel closure:
1668 *
1669 * { Initially: state = CHANNEL_OPENED }
1670 *
1671 * CPU1 CPU2
1672 *
1673 * [vmbus_channel_set_cpu()] [vmbus_disconnect_ring()]
1674 *
1675 * LOCK channel_mutex LOCK channel_mutex
1676 * LOAD r1 = state LOAD r2 = state
1677 * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED)
1678 * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN
1679 * [...] SEND CLOSECHANNEL
1680 * UNLOCK channel_mutex UNLOCK channel_mutex
1681 *
1682 * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
1683 * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
1684 *
1685 * Note. The host processes the channel messages "sequentially", in
1686 * the order in which they are received on a per-partition basis.
1687 */
1688
1689 /*
1690 * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
1691 * avoid sending the message and fail here for such channels.
1692 */
1693 if (channel->state != CHANNEL_OPENED_STATE) {
1694 ret = -EIO;
1695 goto end;
1696 }
1697
1698 origin_cpu = channel->target_cpu;
1699 if (target_cpu == origin_cpu)
1700 goto end;
1701
1702 if (vmbus_send_modifychannel(channel,
1703 hv_cpu_number_to_vp_number(target_cpu))) {
1704 ret = -EIO;
1705 goto end;
1706 }
1707
1708 /*
1709 * For version before VERSION_WIN10_V5_3, the following warning holds:
1710 *
1711 * Warning. At this point, there is *no* guarantee that the host will
1712 * have successfully processed the vmbus_send_modifychannel() request.
1713 * See the header comment of vmbus_send_modifychannel() for more info.
1714 *
1715 * Lags in the processing of the above vmbus_send_modifychannel() can
1716 * result in missed interrupts if the "old" target CPU is taken offline
1717 * before Hyper-V starts sending interrupts to the "new" target CPU.
1718 * But apart from this offlining scenario, the code tolerates such
1719 * lags. It will function correctly even if a channel interrupt comes
1720 * in on a CPU that is different from the channel target_cpu value.
1721 */
1722
1723 channel->target_cpu = target_cpu;
1724
1725 /* See init_vp_index(). */
1726 if (hv_is_perf_channel(channel))
1727 hv_update_allocated_cpus(origin_cpu, target_cpu);
1728
1729 /* Currently set only for storvsc channels. */
1730 if (channel->change_target_cpu_callback) {
1731 (*channel->change_target_cpu_callback)(channel,
1732 origin_cpu, target_cpu);
1733 }
1734
1735 end:
1736 return ret;
1737 }
1738
target_cpu_store(struct vmbus_channel * channel,const char * buf,size_t count)1739 static ssize_t target_cpu_store(struct vmbus_channel *channel,
1740 const char *buf, size_t count)
1741 {
1742 u32 target_cpu;
1743 ssize_t ret;
1744
1745 if (sscanf(buf, "%uu", &target_cpu) != 1)
1746 return -EIO;
1747
1748 cpus_read_lock();
1749 mutex_lock(&vmbus_connection.channel_mutex);
1750 ret = vmbus_channel_set_cpu(channel, target_cpu);
1751 mutex_unlock(&vmbus_connection.channel_mutex);
1752 cpus_read_unlock();
1753
1754 return ret ?: count;
1755 }
1756 static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
1757
channel_pending_show(struct vmbus_channel * channel,char * buf)1758 static ssize_t channel_pending_show(struct vmbus_channel *channel,
1759 char *buf)
1760 {
1761 return sprintf(buf, "%d\n",
1762 channel_pending(channel,
1763 vmbus_connection.monitor_pages[1]));
1764 }
1765 static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL);
1766
channel_latency_show(struct vmbus_channel * channel,char * buf)1767 static ssize_t channel_latency_show(struct vmbus_channel *channel,
1768 char *buf)
1769 {
1770 return sprintf(buf, "%d\n",
1771 channel_latency(channel,
1772 vmbus_connection.monitor_pages[1]));
1773 }
1774 static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL);
1775
channel_interrupts_show(struct vmbus_channel * channel,char * buf)1776 static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
1777 {
1778 return sprintf(buf, "%llu\n", channel->interrupts);
1779 }
1780 static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL);
1781
channel_events_show(struct vmbus_channel * channel,char * buf)1782 static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
1783 {
1784 return sprintf(buf, "%llu\n", channel->sig_events);
1785 }
1786 static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL);
1787
channel_intr_in_full_show(struct vmbus_channel * channel,char * buf)1788 static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
1789 char *buf)
1790 {
1791 return sprintf(buf, "%llu\n",
1792 (unsigned long long)channel->intr_in_full);
1793 }
1794 static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
1795
channel_intr_out_empty_show(struct vmbus_channel * channel,char * buf)1796 static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
1797 char *buf)
1798 {
1799 return sprintf(buf, "%llu\n",
1800 (unsigned long long)channel->intr_out_empty);
1801 }
1802 static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
1803
channel_out_full_first_show(struct vmbus_channel * channel,char * buf)1804 static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
1805 char *buf)
1806 {
1807 return sprintf(buf, "%llu\n",
1808 (unsigned long long)channel->out_full_first);
1809 }
1810 static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
1811
channel_out_full_total_show(struct vmbus_channel * channel,char * buf)1812 static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
1813 char *buf)
1814 {
1815 return sprintf(buf, "%llu\n",
1816 (unsigned long long)channel->out_full_total);
1817 }
1818 static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
1819
subchannel_monitor_id_show(struct vmbus_channel * channel,char * buf)1820 static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
1821 char *buf)
1822 {
1823 return sprintf(buf, "%u\n", channel->offermsg.monitorid);
1824 }
1825 static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL);
1826
subchannel_id_show(struct vmbus_channel * channel,char * buf)1827 static ssize_t subchannel_id_show(struct vmbus_channel *channel,
1828 char *buf)
1829 {
1830 return sprintf(buf, "%u\n",
1831 channel->offermsg.offer.sub_channel_index);
1832 }
1833 static VMBUS_CHAN_ATTR_RO(subchannel_id);
1834
hv_mmap_ring_buffer_wrapper(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,struct vm_area_struct * vma)1835 static int hv_mmap_ring_buffer_wrapper(struct file *filp, struct kobject *kobj,
1836 const struct bin_attribute *attr,
1837 struct vm_area_struct *vma)
1838 {
1839 struct vmbus_channel *channel = container_of(kobj, struct vmbus_channel, kobj);
1840
1841 /*
1842 * hv_(create|remove)_ring_sysfs implementation ensures that mmap_ring_buffer
1843 * is not NULL.
1844 */
1845 return channel->mmap_ring_buffer(channel, vma);
1846 }
1847
1848 static struct bin_attribute chan_attr_ring_buffer = {
1849 .attr = {
1850 .name = "ring",
1851 .mode = 0600,
1852 },
1853 .mmap = hv_mmap_ring_buffer_wrapper,
1854 };
1855 static struct attribute *vmbus_chan_attrs[] = {
1856 &chan_attr_out_mask.attr,
1857 &chan_attr_in_mask.attr,
1858 &chan_attr_read_avail.attr,
1859 &chan_attr_write_avail.attr,
1860 &chan_attr_cpu.attr,
1861 &chan_attr_pending.attr,
1862 &chan_attr_latency.attr,
1863 &chan_attr_interrupts.attr,
1864 &chan_attr_events.attr,
1865 &chan_attr_intr_in_full.attr,
1866 &chan_attr_intr_out_empty.attr,
1867 &chan_attr_out_full_first.attr,
1868 &chan_attr_out_full_total.attr,
1869 &chan_attr_monitor_id.attr,
1870 &chan_attr_subchannel_id.attr,
1871 NULL
1872 };
1873
1874 static const struct bin_attribute *vmbus_chan_bin_attrs[] = {
1875 &chan_attr_ring_buffer,
1876 NULL
1877 };
1878
1879 /*
1880 * Channel-level attribute_group callback function. Returns the permission for
1881 * each attribute, and returns 0 if an attribute is not visible.
1882 */
vmbus_chan_attr_is_visible(struct kobject * kobj,struct attribute * attr,int idx)1883 static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
1884 struct attribute *attr, int idx)
1885 {
1886 const struct vmbus_channel *channel =
1887 container_of(kobj, struct vmbus_channel, kobj);
1888
1889 /* Hide the monitor attributes if the monitor mechanism is not used. */
1890 if (!channel->offermsg.monitor_allocated &&
1891 (attr == &chan_attr_pending.attr ||
1892 attr == &chan_attr_latency.attr ||
1893 attr == &chan_attr_monitor_id.attr))
1894 return 0;
1895
1896 return attr->mode;
1897 }
1898
vmbus_chan_bin_attr_is_visible(struct kobject * kobj,const struct bin_attribute * attr,int idx)1899 static umode_t vmbus_chan_bin_attr_is_visible(struct kobject *kobj,
1900 const struct bin_attribute *attr, int idx)
1901 {
1902 const struct vmbus_channel *channel =
1903 container_of(kobj, struct vmbus_channel, kobj);
1904
1905 /* Hide ring attribute if channel's ring_sysfs_visible is set to false */
1906 if (attr == &chan_attr_ring_buffer && !channel->ring_sysfs_visible)
1907 return 0;
1908
1909 return attr->attr.mode;
1910 }
1911
vmbus_chan_bin_size(struct kobject * kobj,const struct bin_attribute * bin_attr,int a)1912 static size_t vmbus_chan_bin_size(struct kobject *kobj,
1913 const struct bin_attribute *bin_attr, int a)
1914 {
1915 const struct vmbus_channel *channel =
1916 container_of(kobj, struct vmbus_channel, kobj);
1917
1918 return channel->ringbuffer_pagecount << PAGE_SHIFT;
1919 }
1920
1921 static const struct attribute_group vmbus_chan_group = {
1922 .attrs = vmbus_chan_attrs,
1923 .bin_attrs = vmbus_chan_bin_attrs,
1924 .is_visible = vmbus_chan_attr_is_visible,
1925 .is_bin_visible = vmbus_chan_bin_attr_is_visible,
1926 .bin_size = vmbus_chan_bin_size,
1927 };
1928
1929 static const struct kobj_type vmbus_chan_ktype = {
1930 .sysfs_ops = &vmbus_chan_sysfs_ops,
1931 .release = vmbus_chan_release,
1932 };
1933
1934 /**
1935 * hv_create_ring_sysfs() - create "ring" sysfs entry corresponding to ring buffers for a channel.
1936 * @channel: Pointer to vmbus_channel structure
1937 * @hv_mmap_ring_buffer: function pointer for initializing the function to be called on mmap of
1938 * channel's "ring" sysfs node, which is for the ring buffer of that channel.
1939 * Function pointer is of below type:
1940 * int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
1941 * struct vm_area_struct *vma))
1942 * This has a pointer to the channel and a pointer to vm_area_struct,
1943 * used for mmap, as arguments.
1944 *
1945 * Sysfs node for ring buffer of a channel is created along with other fields, however its
1946 * visibility is disabled by default. Sysfs creation needs to be controlled when the use-case
1947 * is running.
1948 * For example, HV_NIC device is used either by uio_hv_generic or hv_netvsc at any given point of
1949 * time, and "ring" sysfs is needed only when uio_hv_generic is bound to that device. To avoid
1950 * exposing the ring buffer by default, this function is reponsible to enable visibility of
1951 * ring for userspace to use.
1952 * Note: Race conditions can happen with userspace and it is not encouraged to create new
1953 * use-cases for this. This was added to maintain backward compatibility, while solving
1954 * one of the race conditions in uio_hv_generic while creating sysfs. See comments with
1955 * vmbus_add_dynid() and vmbus_device_register().
1956 *
1957 * Returns 0 on success or error code on failure.
1958 */
hv_create_ring_sysfs(struct vmbus_channel * channel,int (* hv_mmap_ring_buffer)(struct vmbus_channel * channel,struct vm_area_struct * vma))1959 int hv_create_ring_sysfs(struct vmbus_channel *channel,
1960 int (*hv_mmap_ring_buffer)(struct vmbus_channel *channel,
1961 struct vm_area_struct *vma))
1962 {
1963 struct kobject *kobj = &channel->kobj;
1964
1965 channel->mmap_ring_buffer = hv_mmap_ring_buffer;
1966 channel->ring_sysfs_visible = true;
1967
1968 return sysfs_update_group(kobj, &vmbus_chan_group);
1969 }
1970 EXPORT_SYMBOL_GPL(hv_create_ring_sysfs);
1971
1972 /**
1973 * hv_remove_ring_sysfs() - remove ring sysfs entry corresponding to ring buffers for a channel.
1974 * @channel: Pointer to vmbus_channel structure
1975 *
1976 * Hide "ring" sysfs for a channel by changing its is_visible attribute and updating sysfs group.
1977 *
1978 * Returns 0 on success or error code on failure.
1979 */
hv_remove_ring_sysfs(struct vmbus_channel * channel)1980 int hv_remove_ring_sysfs(struct vmbus_channel *channel)
1981 {
1982 struct kobject *kobj = &channel->kobj;
1983 int ret;
1984
1985 channel->ring_sysfs_visible = false;
1986 ret = sysfs_update_group(kobj, &vmbus_chan_group);
1987 channel->mmap_ring_buffer = NULL;
1988 return ret;
1989 }
1990 EXPORT_SYMBOL_GPL(hv_remove_ring_sysfs);
1991
1992 /*
1993 * vmbus_add_channel_kobj - setup a sub-directory under device/channels
1994 */
vmbus_add_channel_kobj(struct hv_device * dev,struct vmbus_channel * channel)1995 int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
1996 {
1997 const struct device *device = &dev->device;
1998 struct kobject *kobj = &channel->kobj;
1999 u32 relid = channel->offermsg.child_relid;
2000 int ret;
2001
2002 kobj->kset = dev->channels_kset;
2003 ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
2004 "%u", relid);
2005 if (ret) {
2006 kobject_put(kobj);
2007 return ret;
2008 }
2009
2010 ret = sysfs_create_group(kobj, &vmbus_chan_group);
2011
2012 if (ret) {
2013 /*
2014 * The calling functions' error handling paths will cleanup the
2015 * empty channel directory.
2016 */
2017 kobject_put(kobj);
2018 dev_err(device, "Unable to set up channel sysfs files\n");
2019 return ret;
2020 }
2021
2022 kobject_uevent(kobj, KOBJ_ADD);
2023
2024 return 0;
2025 }
2026
2027 /*
2028 * vmbus_remove_channel_attr_group - remove the channel's attribute group
2029 */
vmbus_remove_channel_attr_group(struct vmbus_channel * channel)2030 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
2031 {
2032 sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
2033 }
2034
2035 /*
2036 * vmbus_device_create - Creates and registers a new child device
2037 * on the vmbus.
2038 */
vmbus_device_create(const guid_t * type,const guid_t * instance,struct vmbus_channel * channel)2039 struct hv_device *vmbus_device_create(const guid_t *type,
2040 const guid_t *instance,
2041 struct vmbus_channel *channel)
2042 {
2043 struct hv_device *child_device_obj;
2044
2045 child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
2046 if (!child_device_obj) {
2047 pr_err("Unable to allocate device object for child device\n");
2048 return NULL;
2049 }
2050
2051 child_device_obj->channel = channel;
2052 guid_copy(&child_device_obj->dev_type, type);
2053 guid_copy(&child_device_obj->dev_instance, instance);
2054 child_device_obj->vendor_id = PCI_VENDOR_ID_MICROSOFT;
2055
2056 return child_device_obj;
2057 }
2058
2059 /*
2060 * vmbus_device_register - Register the child device
2061 */
vmbus_device_register(struct hv_device * child_device_obj)2062 int vmbus_device_register(struct hv_device *child_device_obj)
2063 {
2064 struct kobject *kobj = &child_device_obj->device.kobj;
2065 int ret;
2066
2067 dev_set_name(&child_device_obj->device, "%pUl",
2068 &child_device_obj->channel->offermsg.offer.if_instance);
2069
2070 child_device_obj->device.bus = &hv_bus;
2071 child_device_obj->device.parent = vmbus_root_device;
2072 child_device_obj->device.release = vmbus_device_release;
2073
2074 child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
2075 child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
2076 dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
2077
2078 /*
2079 * Register with the LDM. This will kick off the driver/device
2080 * binding...which will eventually call vmbus_match() and vmbus_probe()
2081 */
2082 ret = device_register(&child_device_obj->device);
2083 if (ret) {
2084 pr_err("Unable to register child device\n");
2085 put_device(&child_device_obj->device);
2086 return ret;
2087 }
2088
2089 /*
2090 * If device_register() found a driver to assign to the device, the
2091 * driver's probe function has already run at this point. If that
2092 * probe function accesses or operates on the "channels" subdirectory
2093 * in sysfs, those operations will have failed because the "channels"
2094 * subdirectory doesn't exist until the code below runs. Or if the
2095 * probe function creates a /dev entry, a user space program could
2096 * find and open the /dev entry, and then create a race by accessing
2097 * the "channels" subdirectory while the creation steps are in progress
2098 * here. The race can't result in a kernel failure, but the user space
2099 * program may get an error in accessing "channels" or its
2100 * subdirectories. See also comments with vmbus_add_dynid() about a
2101 * related race condition.
2102 */
2103 child_device_obj->channels_kset = kset_create_and_add("channels",
2104 NULL, kobj);
2105 if (!child_device_obj->channels_kset) {
2106 ret = -ENOMEM;
2107 goto err_dev_unregister;
2108 }
2109
2110 ret = vmbus_add_channel_kobj(child_device_obj,
2111 child_device_obj->channel);
2112 if (ret) {
2113 pr_err("Unable to register primary channeln");
2114 goto err_kset_unregister;
2115 }
2116 hv_debug_add_dev_dir(child_device_obj);
2117
2118 return 0;
2119
2120 err_kset_unregister:
2121 kset_unregister(child_device_obj->channels_kset);
2122
2123 err_dev_unregister:
2124 device_unregister(&child_device_obj->device);
2125 return ret;
2126 }
2127
2128 /*
2129 * vmbus_device_unregister - Remove the specified child device
2130 * from the vmbus.
2131 */
vmbus_device_unregister(struct hv_device * device_obj)2132 void vmbus_device_unregister(struct hv_device *device_obj)
2133 {
2134 pr_debug("child device %s unregistered\n",
2135 dev_name(&device_obj->device));
2136
2137 kset_unregister(device_obj->channels_kset);
2138
2139 /*
2140 * Kick off the process of unregistering the device.
2141 * This will call vmbus_remove() and eventually vmbus_device_release()
2142 */
2143 device_unregister(&device_obj->device);
2144 }
2145 EXPORT_SYMBOL_GPL(vmbus_device_unregister);
2146
2147 #ifdef CONFIG_ACPI
2148 /*
2149 * VMBUS is an acpi enumerated device. Get the information we
2150 * need from DSDT.
2151 */
vmbus_walk_resources(struct acpi_resource * res,void * ctx)2152 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
2153 {
2154 resource_size_t start = 0;
2155 resource_size_t end = 0;
2156 struct resource *new_res;
2157 struct resource **old_res = &hyperv_mmio;
2158 struct resource **prev_res = NULL;
2159 struct resource r;
2160
2161 switch (res->type) {
2162
2163 /*
2164 * "Address" descriptors are for bus windows. Ignore
2165 * "memory" descriptors, which are for registers on
2166 * devices.
2167 */
2168 case ACPI_RESOURCE_TYPE_ADDRESS32:
2169 start = res->data.address32.address.minimum;
2170 end = res->data.address32.address.maximum;
2171 break;
2172
2173 case ACPI_RESOURCE_TYPE_ADDRESS64:
2174 start = res->data.address64.address.minimum;
2175 end = res->data.address64.address.maximum;
2176 break;
2177
2178 /*
2179 * The IRQ information is needed only on ARM64, which Hyper-V
2180 * sets up in the extended format. IRQ information is present
2181 * on x86/x64 in the non-extended format but it is not used by
2182 * Linux. So don't bother checking for the non-extended format.
2183 */
2184 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
2185 if (!acpi_dev_resource_interrupt(res, 0, &r)) {
2186 pr_err("Unable to parse Hyper-V ACPI interrupt\n");
2187 return AE_ERROR;
2188 }
2189 /* ARM64 INTID for VMbus */
2190 vmbus_interrupt = res->data.extended_irq.interrupts[0];
2191 /* Linux IRQ number */
2192 vmbus_irq = r.start;
2193 return AE_OK;
2194
2195 default:
2196 /* Unused resource type */
2197 return AE_OK;
2198
2199 }
2200 /*
2201 * Ignore ranges that are below 1MB, as they're not
2202 * necessary or useful here.
2203 */
2204 if (end < 0x100000)
2205 return AE_OK;
2206
2207 new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
2208 if (!new_res)
2209 return AE_NO_MEMORY;
2210
2211 /* If this range overlaps the virtual TPM, truncate it. */
2212 if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
2213 end = VTPM_BASE_ADDRESS;
2214
2215 new_res->name = "hyperv mmio";
2216 new_res->flags = IORESOURCE_MEM;
2217 new_res->start = start;
2218 new_res->end = end;
2219
2220 /*
2221 * If two ranges are adjacent, merge them.
2222 */
2223 do {
2224 if (!*old_res) {
2225 *old_res = new_res;
2226 break;
2227 }
2228
2229 if (((*old_res)->end + 1) == new_res->start) {
2230 (*old_res)->end = new_res->end;
2231 kfree(new_res);
2232 break;
2233 }
2234
2235 if ((*old_res)->start == new_res->end + 1) {
2236 (*old_res)->start = new_res->start;
2237 kfree(new_res);
2238 break;
2239 }
2240
2241 if ((*old_res)->start > new_res->end) {
2242 new_res->sibling = *old_res;
2243 if (prev_res)
2244 (*prev_res)->sibling = new_res;
2245 *old_res = new_res;
2246 break;
2247 }
2248
2249 prev_res = old_res;
2250 old_res = &(*old_res)->sibling;
2251
2252 } while (1);
2253
2254 return AE_OK;
2255 }
2256 #endif
2257
vmbus_mmio_remove(void)2258 static void vmbus_mmio_remove(void)
2259 {
2260 struct resource *cur_res;
2261 struct resource *next_res;
2262
2263 if (hyperv_mmio) {
2264 if (fb_mmio) {
2265 __release_region(hyperv_mmio, fb_mmio->start,
2266 resource_size(fb_mmio));
2267 fb_mmio = NULL;
2268 }
2269
2270 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
2271 next_res = cur_res->sibling;
2272 kfree(cur_res);
2273 }
2274 }
2275 }
2276
vmbus_reserve_fb(void)2277 static void __maybe_unused vmbus_reserve_fb(void)
2278 {
2279 resource_size_t start = 0, size;
2280 struct pci_dev *pdev;
2281
2282 if (efi_enabled(EFI_BOOT)) {
2283 /* Gen2 VM: get FB base from EFI framebuffer */
2284 if (IS_ENABLED(CONFIG_SYSFB)) {
2285 start = screen_info.lfb_base;
2286 size = max_t(__u32, screen_info.lfb_size, 0x800000);
2287 }
2288 } else {
2289 /* Gen1 VM: get FB base from PCI */
2290 pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
2291 PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
2292 if (!pdev)
2293 return;
2294
2295 if (pdev->resource[0].flags & IORESOURCE_MEM) {
2296 start = pci_resource_start(pdev, 0);
2297 size = pci_resource_len(pdev, 0);
2298 }
2299
2300 /*
2301 * Release the PCI device so hyperv_drm or hyperv_fb driver can
2302 * grab it later.
2303 */
2304 pci_dev_put(pdev);
2305 }
2306
2307 if (!start)
2308 return;
2309
2310 /*
2311 * Make a claim for the frame buffer in the resource tree under the
2312 * first node, which will be the one below 4GB. The length seems to
2313 * be underreported, particularly in a Generation 1 VM. So start out
2314 * reserving a larger area and make it smaller until it succeeds.
2315 */
2316 for (; !fb_mmio && (size >= 0x100000); size >>= 1)
2317 fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0);
2318 }
2319
2320 /**
2321 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
2322 * @new: If successful, supplied a pointer to the
2323 * allocated MMIO space.
2324 * @device_obj: Identifies the caller
2325 * @min: Minimum guest physical address of the
2326 * allocation
2327 * @max: Maximum guest physical address
2328 * @size: Size of the range to be allocated
2329 * @align: Alignment of the range to be allocated
2330 * @fb_overlap_ok: Whether this allocation can be allowed
2331 * to overlap the video frame buffer.
2332 *
2333 * This function walks the resources granted to VMBus by the
2334 * _CRS object in the ACPI namespace underneath the parent
2335 * "bridge" whether that's a root PCI bus in the Generation 1
2336 * case or a Module Device in the Generation 2 case. It then
2337 * attempts to allocate from the global MMIO pool in a way that
2338 * matches the constraints supplied in these parameters and by
2339 * that _CRS.
2340 *
2341 * Return: 0 on success, -errno on failure
2342 */
vmbus_allocate_mmio(struct resource ** new,struct hv_device * device_obj,resource_size_t min,resource_size_t max,resource_size_t size,resource_size_t align,bool fb_overlap_ok)2343 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
2344 resource_size_t min, resource_size_t max,
2345 resource_size_t size, resource_size_t align,
2346 bool fb_overlap_ok)
2347 {
2348 struct resource *iter, *shadow;
2349 resource_size_t range_min, range_max, start, end;
2350 const char *dev_n = dev_name(&device_obj->device);
2351 int retval;
2352
2353 retval = -ENXIO;
2354 mutex_lock(&hyperv_mmio_lock);
2355
2356 /*
2357 * If overlaps with frame buffers are allowed, then first attempt to
2358 * make the allocation from within the reserved region. Because it
2359 * is already reserved, no shadow allocation is necessary.
2360 */
2361 if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
2362 !(max < fb_mmio->start)) {
2363
2364 range_min = fb_mmio->start;
2365 range_max = fb_mmio->end;
2366 start = (range_min + align - 1) & ~(align - 1);
2367 for (; start + size - 1 <= range_max; start += align) {
2368 *new = request_mem_region_exclusive(start, size, dev_n);
2369 if (*new) {
2370 retval = 0;
2371 goto exit;
2372 }
2373 }
2374 }
2375
2376 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2377 if ((iter->start >= max) || (iter->end <= min))
2378 continue;
2379
2380 range_min = iter->start;
2381 range_max = iter->end;
2382 start = (range_min + align - 1) & ~(align - 1);
2383 for (; start + size - 1 <= range_max; start += align) {
2384 end = start + size - 1;
2385
2386 /* Skip the whole fb_mmio region if not fb_overlap_ok */
2387 if (!fb_overlap_ok && fb_mmio &&
2388 (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
2389 ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
2390 continue;
2391
2392 shadow = __request_region(iter, start, size, NULL,
2393 IORESOURCE_BUSY);
2394 if (!shadow)
2395 continue;
2396
2397 *new = request_mem_region_exclusive(start, size, dev_n);
2398 if (*new) {
2399 shadow->name = (char *)*new;
2400 retval = 0;
2401 goto exit;
2402 }
2403
2404 __release_region(iter, start, size);
2405 }
2406 }
2407
2408 exit:
2409 mutex_unlock(&hyperv_mmio_lock);
2410 return retval;
2411 }
2412 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
2413
2414 /**
2415 * vmbus_free_mmio() - Free a memory-mapped I/O range.
2416 * @start: Base address of region to release.
2417 * @size: Size of the range to be allocated
2418 *
2419 * This function releases anything requested by
2420 * vmbus_mmio_allocate().
2421 */
vmbus_free_mmio(resource_size_t start,resource_size_t size)2422 void vmbus_free_mmio(resource_size_t start, resource_size_t size)
2423 {
2424 struct resource *iter;
2425
2426 mutex_lock(&hyperv_mmio_lock);
2427
2428 /*
2429 * If all bytes of the MMIO range to be released are within the
2430 * special case fb_mmio shadow region, skip releasing the shadow
2431 * region since no corresponding __request_region() was done
2432 * in vmbus_allocate_mmio().
2433 */
2434 if (fb_mmio && start >= fb_mmio->start &&
2435 (start + size - 1 <= fb_mmio->end))
2436 goto skip_shadow_release;
2437
2438 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2439 if ((iter->start >= start + size) || (iter->end <= start))
2440 continue;
2441
2442 __release_region(iter, start, size);
2443 }
2444
2445 skip_shadow_release:
2446 release_mem_region(start, size);
2447 mutex_unlock(&hyperv_mmio_lock);
2448
2449 }
2450 EXPORT_SYMBOL_GPL(vmbus_free_mmio);
2451
2452 #ifdef CONFIG_ACPI
vmbus_acpi_add(struct platform_device * pdev)2453 static int vmbus_acpi_add(struct platform_device *pdev)
2454 {
2455 acpi_status result;
2456 int ret_val = -ENODEV;
2457 struct acpi_device *ancestor;
2458 struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
2459
2460 vmbus_root_device = &device->dev;
2461
2462 /*
2463 * Older versions of Hyper-V for ARM64 fail to include the _CCA
2464 * method on the top level VMbus device in the DSDT. But devices
2465 * are hardware coherent in all current Hyper-V use cases, so fix
2466 * up the ACPI device to behave as if _CCA is present and indicates
2467 * hardware coherence.
2468 */
2469 ACPI_COMPANION_SET(&device->dev, device);
2470 if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) &&
2471 device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) {
2472 pr_info("No ACPI _CCA found; assuming coherent device I/O\n");
2473 device->flags.cca_seen = true;
2474 device->flags.coherent_dma = true;
2475 }
2476
2477 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
2478 vmbus_walk_resources, NULL);
2479
2480 if (ACPI_FAILURE(result))
2481 goto acpi_walk_err;
2482 /*
2483 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
2484 * firmware) is the VMOD that has the mmio ranges. Get that.
2485 */
2486 for (ancestor = acpi_dev_parent(device);
2487 ancestor && ancestor->handle != ACPI_ROOT_OBJECT;
2488 ancestor = acpi_dev_parent(ancestor)) {
2489 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
2490 vmbus_walk_resources, NULL);
2491
2492 if (ACPI_FAILURE(result))
2493 continue;
2494 if (hyperv_mmio) {
2495 vmbus_reserve_fb();
2496 break;
2497 }
2498 }
2499 ret_val = 0;
2500
2501 acpi_walk_err:
2502 if (ret_val)
2503 vmbus_mmio_remove();
2504 return ret_val;
2505 }
2506 #else
vmbus_acpi_add(struct platform_device * pdev)2507 static int vmbus_acpi_add(struct platform_device *pdev)
2508 {
2509 return 0;
2510 }
2511 #endif
2512
vmbus_set_irq(struct platform_device * pdev)2513 static int vmbus_set_irq(struct platform_device *pdev)
2514 {
2515 struct irq_data *data;
2516 int irq;
2517 irq_hw_number_t hwirq;
2518
2519 irq = platform_get_irq(pdev, 0);
2520 /* platform_get_irq() may not return 0. */
2521 if (irq < 0)
2522 return irq;
2523
2524 data = irq_get_irq_data(irq);
2525 if (!data) {
2526 pr_err("No interrupt data for VMBus virq %d\n", irq);
2527 return -ENODEV;
2528 }
2529 hwirq = irqd_to_hwirq(data);
2530
2531 vmbus_irq = irq;
2532 vmbus_interrupt = hwirq;
2533 pr_debug("VMBus virq %d, hwirq %d\n", vmbus_irq, vmbus_interrupt);
2534
2535 return 0;
2536 }
2537
vmbus_device_add(struct platform_device * pdev)2538 static int vmbus_device_add(struct platform_device *pdev)
2539 {
2540 struct resource **cur_res = &hyperv_mmio;
2541 struct of_range range;
2542 struct of_range_parser parser;
2543 struct device_node *np = pdev->dev.of_node;
2544 int ret;
2545
2546 vmbus_root_device = &pdev->dev;
2547
2548 ret = of_range_parser_init(&parser, np);
2549 if (ret)
2550 return ret;
2551
2552 if (!__is_defined(HYPERVISOR_CALLBACK_VECTOR))
2553 ret = vmbus_set_irq(pdev);
2554 if (ret)
2555 return ret;
2556
2557 for_each_of_range(&parser, &range) {
2558 struct resource *res;
2559
2560 res = kzalloc(sizeof(*res), GFP_KERNEL);
2561 if (!res) {
2562 vmbus_mmio_remove();
2563 return -ENOMEM;
2564 }
2565
2566 res->name = "hyperv mmio";
2567 res->flags = range.flags;
2568 res->start = range.cpu_addr;
2569 res->end = range.cpu_addr + range.size;
2570
2571 *cur_res = res;
2572 cur_res = &res->sibling;
2573 }
2574
2575 return ret;
2576 }
2577
vmbus_platform_driver_probe(struct platform_device * pdev)2578 static int vmbus_platform_driver_probe(struct platform_device *pdev)
2579 {
2580 if (acpi_disabled)
2581 return vmbus_device_add(pdev);
2582 else
2583 return vmbus_acpi_add(pdev);
2584 }
2585
vmbus_platform_driver_remove(struct platform_device * pdev)2586 static void vmbus_platform_driver_remove(struct platform_device *pdev)
2587 {
2588 vmbus_mmio_remove();
2589 }
2590
2591 #ifdef CONFIG_PM_SLEEP
vmbus_bus_suspend(struct device * dev)2592 static int vmbus_bus_suspend(struct device *dev)
2593 {
2594 struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(
2595 hv_context.cpu_context, VMBUS_CONNECT_CPU);
2596 struct vmbus_channel *channel, *sc;
2597
2598 tasklet_disable(&hv_cpu->msg_dpc);
2599 vmbus_connection.ignore_any_offer_msg = true;
2600 /* The tasklet_enable() takes care of providing a memory barrier */
2601 tasklet_enable(&hv_cpu->msg_dpc);
2602
2603 /* Drain all the workqueues as we are in suspend */
2604 drain_workqueue(vmbus_connection.rescind_work_queue);
2605 drain_workqueue(vmbus_connection.work_queue);
2606 drain_workqueue(vmbus_connection.handle_primary_chan_wq);
2607 drain_workqueue(vmbus_connection.handle_sub_chan_wq);
2608
2609 mutex_lock(&vmbus_connection.channel_mutex);
2610 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2611 if (!is_hvsock_channel(channel))
2612 continue;
2613
2614 vmbus_force_channel_rescinded(channel);
2615 }
2616 mutex_unlock(&vmbus_connection.channel_mutex);
2617
2618 /*
2619 * Wait until all the sub-channels and hv_sock channels have been
2620 * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
2621 * they would conflict with the new sub-channels that will be created
2622 * in the resume path. hv_sock channels should also be destroyed, but
2623 * a hv_sock channel of an established hv_sock connection can not be
2624 * really destroyed since it may still be referenced by the userspace
2625 * application, so we just force the hv_sock channel to be rescinded
2626 * by vmbus_force_channel_rescinded(), and the userspace application
2627 * will thoroughly destroy the channel after hibernation.
2628 *
2629 * Note: the counter nr_chan_close_on_suspend may never go above 0 if
2630 * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
2631 */
2632 if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
2633 wait_for_completion(&vmbus_connection.ready_for_suspend_event);
2634
2635 mutex_lock(&vmbus_connection.channel_mutex);
2636
2637 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2638 /*
2639 * Remove the channel from the array of channels and invalidate
2640 * the channel's relid. Upon resume, vmbus_onoffer() will fix
2641 * up the relid (and other fields, if necessary) and add the
2642 * channel back to the array.
2643 */
2644 vmbus_channel_unmap_relid(channel);
2645 channel->offermsg.child_relid = INVALID_RELID;
2646
2647 if (is_hvsock_channel(channel)) {
2648 if (!channel->rescind) {
2649 pr_err("hv_sock channel not rescinded!\n");
2650 WARN_ON_ONCE(1);
2651 }
2652 continue;
2653 }
2654
2655 list_for_each_entry(sc, &channel->sc_list, sc_list) {
2656 pr_err("Sub-channel not deleted!\n");
2657 WARN_ON_ONCE(1);
2658 }
2659 }
2660
2661 mutex_unlock(&vmbus_connection.channel_mutex);
2662
2663 vmbus_initiate_unload(false);
2664
2665 return 0;
2666 }
2667
vmbus_bus_resume(struct device * dev)2668 static int vmbus_bus_resume(struct device *dev)
2669 {
2670 struct vmbus_channel *channel;
2671 struct vmbus_channel_msginfo *msginfo;
2672 size_t msgsize;
2673 int ret;
2674
2675 vmbus_connection.ignore_any_offer_msg = false;
2676
2677 /*
2678 * We only use the 'vmbus_proto_version', which was in use before
2679 * hibernation, to re-negotiate with the host.
2680 */
2681 if (!vmbus_proto_version) {
2682 pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
2683 return -EINVAL;
2684 }
2685
2686 msgsize = sizeof(*msginfo) +
2687 sizeof(struct vmbus_channel_initiate_contact);
2688
2689 msginfo = kzalloc(msgsize, GFP_KERNEL);
2690
2691 if (msginfo == NULL)
2692 return -ENOMEM;
2693
2694 ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
2695
2696 kfree(msginfo);
2697
2698 if (ret != 0)
2699 return ret;
2700
2701 vmbus_request_offers();
2702
2703 mutex_lock(&vmbus_connection.channel_mutex);
2704 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2705 if (channel->offermsg.child_relid != INVALID_RELID)
2706 continue;
2707
2708 /* hvsock channels are not expected to be present. */
2709 if (is_hvsock_channel(channel))
2710 continue;
2711
2712 pr_err("channel %pUl/%pUl not present after resume.\n",
2713 &channel->offermsg.offer.if_type,
2714 &channel->offermsg.offer.if_instance);
2715 /* ToDo: Cleanup these channels here */
2716 }
2717 mutex_unlock(&vmbus_connection.channel_mutex);
2718
2719 /* Reset the event for the next suspend. */
2720 reinit_completion(&vmbus_connection.ready_for_suspend_event);
2721
2722 return 0;
2723 }
2724 #else
2725 #define vmbus_bus_suspend NULL
2726 #define vmbus_bus_resume NULL
2727 #endif /* CONFIG_PM_SLEEP */
2728
2729 static const __maybe_unused struct of_device_id vmbus_of_match[] = {
2730 {
2731 .compatible = "microsoft,vmbus",
2732 },
2733 {
2734 /* sentinel */
2735 },
2736 };
2737 MODULE_DEVICE_TABLE(of, vmbus_of_match);
2738
2739 static const __maybe_unused struct acpi_device_id vmbus_acpi_device_ids[] = {
2740 {"VMBUS", 0},
2741 {"VMBus", 0},
2742 {"", 0},
2743 };
2744 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
2745
2746 /*
2747 * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
2748 * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
2749 * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
2750 * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
2751 * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
2752 * resume callback must also run via the "noirq" ops.
2753 *
2754 * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
2755 * earlier in this file before vmbus_pm.
2756 */
2757
2758 static const struct dev_pm_ops vmbus_bus_pm = {
2759 .suspend_noirq = NULL,
2760 .resume_noirq = NULL,
2761 .freeze_noirq = vmbus_bus_suspend,
2762 .thaw_noirq = vmbus_bus_resume,
2763 .poweroff_noirq = vmbus_bus_suspend,
2764 .restore_noirq = vmbus_bus_resume
2765 };
2766
2767 static struct platform_driver vmbus_platform_driver = {
2768 .probe = vmbus_platform_driver_probe,
2769 .remove = vmbus_platform_driver_remove,
2770 .driver = {
2771 .name = "vmbus",
2772 .acpi_match_table = ACPI_PTR(vmbus_acpi_device_ids),
2773 .of_match_table = of_match_ptr(vmbus_of_match),
2774 .pm = &vmbus_bus_pm,
2775 .probe_type = PROBE_FORCE_SYNCHRONOUS,
2776 }
2777 };
2778
hv_kexec_handler(void)2779 static void hv_kexec_handler(void)
2780 {
2781 hv_stimer_global_cleanup();
2782 vmbus_initiate_unload(false);
2783 /* Make sure conn_state is set as hv_synic_cleanup checks for it */
2784 mb();
2785 cpuhp_remove_state(hyperv_cpuhp_online);
2786 };
2787
hv_crash_handler(struct pt_regs * regs)2788 static void hv_crash_handler(struct pt_regs *regs)
2789 {
2790 int cpu;
2791
2792 vmbus_initiate_unload(true);
2793 /*
2794 * In crash handler we can't schedule synic cleanup for all CPUs,
2795 * doing the cleanup for current CPU only. This should be sufficient
2796 * for kdump.
2797 */
2798 cpu = smp_processor_id();
2799 hv_stimer_cleanup(cpu);
2800 hv_synic_disable_regs(cpu);
2801 };
2802
hv_synic_suspend(void)2803 static int hv_synic_suspend(void)
2804 {
2805 /*
2806 * When we reach here, all the non-boot CPUs have been offlined.
2807 * If we're in a legacy configuration where stimer Direct Mode is
2808 * not enabled, the stimers on the non-boot CPUs have been unbound
2809 * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
2810 * hv_stimer_cleanup() -> clockevents_unbind_device().
2811 *
2812 * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
2813 * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
2814 * 1) it's unnecessary as interrupts remain disabled between
2815 * syscore_suspend() and syscore_resume(): see create_image() and
2816 * resume_target_kernel()
2817 * 2) the stimer on CPU0 is automatically disabled later by
2818 * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
2819 * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
2820 * 3) a warning would be triggered if we call
2821 * clockevents_unbind_device(), which may sleep, in an
2822 * interrupts-disabled context.
2823 */
2824
2825 hv_synic_disable_regs(0);
2826
2827 return 0;
2828 }
2829
hv_synic_resume(void)2830 static void hv_synic_resume(void)
2831 {
2832 hv_synic_enable_regs(0);
2833
2834 /*
2835 * Note: we don't need to call hv_stimer_init(0), because the timer
2836 * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
2837 * automatically re-enabled in timekeeping_resume().
2838 */
2839 }
2840
2841 /* The callbacks run only on CPU0, with irqs_disabled. */
2842 static struct syscore_ops hv_synic_syscore_ops = {
2843 .suspend = hv_synic_suspend,
2844 .resume = hv_synic_resume,
2845 };
2846
hv_acpi_init(void)2847 static int __init hv_acpi_init(void)
2848 {
2849 int ret;
2850
2851 if (!hv_is_hyperv_initialized())
2852 return -ENODEV;
2853
2854 if (hv_root_partition() && !hv_nested)
2855 return 0;
2856
2857 /*
2858 * Get ACPI resources first.
2859 */
2860 ret = platform_driver_register(&vmbus_platform_driver);
2861 if (ret)
2862 return ret;
2863
2864 if (!vmbus_root_device) {
2865 ret = -ENODEV;
2866 goto cleanup;
2867 }
2868
2869 /*
2870 * If we're on an architecture with a hardcoded hypervisor
2871 * vector (i.e. x86/x64), override the VMbus interrupt found
2872 * in the ACPI tables. Ensure vmbus_irq is not set since the
2873 * normal Linux IRQ mechanism is not used in this case.
2874 */
2875 #ifdef HYPERVISOR_CALLBACK_VECTOR
2876 vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR;
2877 vmbus_irq = -1;
2878 #endif
2879
2880 hv_debug_init();
2881
2882 ret = vmbus_bus_init();
2883 if (ret)
2884 goto cleanup;
2885
2886 hv_setup_kexec_handler(hv_kexec_handler);
2887 hv_setup_crash_handler(hv_crash_handler);
2888
2889 register_syscore_ops(&hv_synic_syscore_ops);
2890
2891 return 0;
2892
2893 cleanup:
2894 platform_driver_unregister(&vmbus_platform_driver);
2895 vmbus_root_device = NULL;
2896 return ret;
2897 }
2898
vmbus_exit(void)2899 static void __exit vmbus_exit(void)
2900 {
2901 int cpu;
2902
2903 unregister_syscore_ops(&hv_synic_syscore_ops);
2904
2905 hv_remove_kexec_handler();
2906 hv_remove_crash_handler();
2907 vmbus_connection.conn_state = DISCONNECTED;
2908 hv_stimer_global_cleanup();
2909 vmbus_disconnect();
2910 if (vmbus_irq == -1) {
2911 hv_remove_vmbus_handler();
2912 } else {
2913 free_percpu_irq(vmbus_irq, vmbus_evt);
2914 free_percpu(vmbus_evt);
2915 }
2916 for_each_online_cpu(cpu) {
2917 struct hv_per_cpu_context *hv_cpu
2918 = per_cpu_ptr(hv_context.cpu_context, cpu);
2919
2920 tasklet_kill(&hv_cpu->msg_dpc);
2921 }
2922 hv_debug_rm_all_dir();
2923
2924 vmbus_free_channels();
2925 kfree(vmbus_connection.channels);
2926
2927 /*
2928 * The vmbus panic notifier is always registered, hence we should
2929 * also unconditionally unregister it here as well.
2930 */
2931 atomic_notifier_chain_unregister(&panic_notifier_list,
2932 &hyperv_panic_vmbus_unload_block);
2933
2934 bus_unregister(&hv_bus);
2935
2936 cpuhp_remove_state(hyperv_cpuhp_online);
2937 hv_synic_free();
2938 platform_driver_unregister(&vmbus_platform_driver);
2939 }
2940
2941
2942 MODULE_LICENSE("GPL");
2943 MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
2944
2945 subsys_initcall(hv_acpi_init);
2946 module_exit(vmbus_exit);
2947