xref: /linux/drivers/xen/xenbus/xenbus_probe.c (revision 0f912c8917e810a4aa81d122a8e7d0a918505ab9)
1 /******************************************************************************
2  * Talks to Xen Store to figure out what devices we have.
3  *
4  * Copyright (C) 2005 Rusty Russell, IBM Corporation
5  * Copyright (C) 2005 Mike Wray, Hewlett-Packard
6  * Copyright (C) 2005, 2006 XenSource Ltd
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version 2
10  * as published by the Free Software Foundation; or, when distributed
11  * separately from the Linux kernel or incorporated into other
12  * software packages, subject to the following license:
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this source file (the "Software"), to deal in the Software without
16  * restriction, including without limitation the rights to use, copy, modify,
17  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18  * and to permit persons to whom the Software is furnished to do so, subject to
19  * the following conditions:
20  *
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30  * IN THE SOFTWARE.
31  */
32 
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #define dev_fmt pr_fmt
35 
36 #define DPRINTK(fmt, args...)				\
37 	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
38 		 __func__, __LINE__, ##args)
39 
40 #include <linux/kernel.h>
41 #include <linux/err.h>
42 #include <linux/string.h>
43 #include <linux/ctype.h>
44 #include <linux/fcntl.h>
45 #include <linux/mm.h>
46 #include <linux/proc_fs.h>
47 #include <linux/notifier.h>
48 #include <linux/kthread.h>
49 #include <linux/mutex.h>
50 #include <linux/io.h>
51 #include <linux/slab.h>
52 #include <linux/module.h>
53 
54 #include <asm/page.h>
55 #include <asm/xen/hypervisor.h>
56 
57 #include <xen/xen.h>
58 #include <xen/xenbus.h>
59 #include <xen/events.h>
60 #include <xen/xen-ops.h>
61 #include <xen/page.h>
62 
63 #include <xen/hvm.h>
64 
65 #include "xenbus.h"
66 
67 
68 static int xs_init_irq = -1;
69 int xen_store_evtchn;
70 EXPORT_SYMBOL_GPL(xen_store_evtchn);
71 
72 struct xenstore_domain_interface *xen_store_interface;
73 EXPORT_SYMBOL_GPL(xen_store_interface);
74 
75 #define XS_INTERFACE_READY \
76 	((xen_store_interface != NULL) && \
77 	 (xen_store_interface->connection == XENSTORE_CONNECTED))
78 
79 enum xenstore_init xen_store_domain_type;
80 EXPORT_SYMBOL_GPL(xen_store_domain_type);
81 
82 static unsigned long xen_store_gfn;
83 
84 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
85 
86 /* If something in array of ids matches this device, return it. */
87 static const struct xenbus_device_id *
match_device(const struct xenbus_device_id * arr,struct xenbus_device * dev)88 match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
89 {
90 	for (; *arr->devicetype != '\0'; arr++) {
91 		if (!strcmp(arr->devicetype, dev->devicetype))
92 			return arr;
93 	}
94 	return NULL;
95 }
96 
xenbus_match(struct device * _dev,const struct device_driver * _drv)97 int xenbus_match(struct device *_dev, const struct device_driver *_drv)
98 {
99 	const struct xenbus_driver *drv = to_xenbus_driver(_drv);
100 
101 	if (!drv->ids)
102 		return 0;
103 
104 	return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
105 }
106 EXPORT_SYMBOL_GPL(xenbus_match);
107 
108 
free_otherend_details(struct xenbus_device * dev)109 static void free_otherend_details(struct xenbus_device *dev)
110 {
111 	kfree(dev->otherend);
112 	dev->otherend = NULL;
113 }
114 
115 
free_otherend_watch(struct xenbus_device * dev)116 static void free_otherend_watch(struct xenbus_device *dev)
117 {
118 	if (dev->otherend_watch.node) {
119 		unregister_xenbus_watch(&dev->otherend_watch);
120 		kfree(dev->otherend_watch.node);
121 		dev->otherend_watch.node = NULL;
122 	}
123 }
124 
125 
talk_to_otherend(struct xenbus_device * dev)126 static int talk_to_otherend(struct xenbus_device *dev)
127 {
128 	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
129 
130 	free_otherend_watch(dev);
131 	free_otherend_details(dev);
132 
133 	return drv->read_otherend_details(dev);
134 }
135 
136 
137 
watch_otherend(struct xenbus_device * dev)138 static int watch_otherend(struct xenbus_device *dev)
139 {
140 	struct xen_bus_type *bus =
141 		container_of(dev->dev.bus, struct xen_bus_type, bus);
142 
143 	return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
144 				    bus->otherend_will_handle,
145 				    bus->otherend_changed,
146 				    "%s/%s", dev->otherend, "state");
147 }
148 
149 
xenbus_read_otherend_details(struct xenbus_device * xendev,char * id_node,char * path_node)150 int xenbus_read_otherend_details(struct xenbus_device *xendev,
151 				 char *id_node, char *path_node)
152 {
153 	int err = xenbus_gather(XBT_NIL, xendev->nodename,
154 				id_node, "%i", &xendev->otherend_id,
155 				path_node, NULL, &xendev->otherend,
156 				NULL);
157 	if (err) {
158 		xenbus_dev_fatal(xendev, err,
159 				 "reading other end details from %s",
160 				 xendev->nodename);
161 		return err;
162 	}
163 	if (strlen(xendev->otherend) == 0 ||
164 	    !xenbus_exists(XBT_NIL, xendev->otherend, "")) {
165 		xenbus_dev_fatal(xendev, -ENOENT,
166 				 "unable to read other end from %s.  "
167 				 "missing or inaccessible.",
168 				 xendev->nodename);
169 		free_otherend_details(xendev);
170 		return -ENOENT;
171 	}
172 
173 	return 0;
174 }
175 EXPORT_SYMBOL_GPL(xenbus_read_otherend_details);
176 
xenbus_otherend_changed(struct xenbus_watch * watch,const char * path,const char * token,int ignore_on_shutdown)177 void xenbus_otherend_changed(struct xenbus_watch *watch,
178 			     const char *path, const char *token,
179 			     int ignore_on_shutdown)
180 {
181 	struct xenbus_device *dev =
182 		container_of(watch, struct xenbus_device, otherend_watch);
183 	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
184 	enum xenbus_state state;
185 
186 	/* Protect us against watches firing on old details when the otherend
187 	   details change, say immediately after a resume. */
188 	if (!dev->otherend ||
189 	    strncmp(dev->otherend, path, strlen(dev->otherend))) {
190 		dev_dbg(&dev->dev, "Ignoring watch at %s\n", path);
191 		return;
192 	}
193 
194 	state = xenbus_read_driver_state(dev, dev->otherend);
195 
196 	dev_dbg(&dev->dev, "state is %d, (%s), %s, %s\n",
197 		state, xenbus_strstate(state), dev->otherend_watch.node, path);
198 
199 	/*
200 	 * Ignore xenbus transitions during shutdown. This prevents us doing
201 	 * work that can fail e.g., when the rootfs is gone.
202 	 */
203 	if (system_state > SYSTEM_RUNNING) {
204 		if (ignore_on_shutdown && (state == XenbusStateClosing))
205 			xenbus_frontend_closed(dev);
206 		return;
207 	}
208 
209 	if (drv->otherend_changed)
210 		drv->otherend_changed(dev, state);
211 }
212 EXPORT_SYMBOL_GPL(xenbus_otherend_changed);
213 
214 #define XENBUS_SHOW_STAT(name)						\
215 static ssize_t name##_show(struct device *_dev,				\
216 			   struct device_attribute *attr,		\
217 			   char *buf)					\
218 {									\
219 	struct xenbus_device *dev = to_xenbus_device(_dev);		\
220 									\
221 	return sprintf(buf, "%d\n", atomic_read(&dev->name));		\
222 }									\
223 static DEVICE_ATTR_RO(name)
224 
225 XENBUS_SHOW_STAT(event_channels);
226 XENBUS_SHOW_STAT(events);
227 XENBUS_SHOW_STAT(spurious_events);
228 XENBUS_SHOW_STAT(jiffies_eoi_delayed);
229 
spurious_threshold_show(struct device * _dev,struct device_attribute * attr,char * buf)230 static ssize_t spurious_threshold_show(struct device *_dev,
231 				       struct device_attribute *attr,
232 				       char *buf)
233 {
234 	struct xenbus_device *dev = to_xenbus_device(_dev);
235 
236 	return sprintf(buf, "%d\n", dev->spurious_threshold);
237 }
238 
spurious_threshold_store(struct device * _dev,struct device_attribute * attr,const char * buf,size_t count)239 static ssize_t spurious_threshold_store(struct device *_dev,
240 					struct device_attribute *attr,
241 					const char *buf, size_t count)
242 {
243 	struct xenbus_device *dev = to_xenbus_device(_dev);
244 	unsigned int val;
245 	ssize_t ret;
246 
247 	ret = kstrtouint(buf, 0, &val);
248 	if (ret)
249 		return ret;
250 
251 	dev->spurious_threshold = val;
252 
253 	return count;
254 }
255 
256 static DEVICE_ATTR_RW(spurious_threshold);
257 
258 static struct attribute *xenbus_attrs[] = {
259 	&dev_attr_event_channels.attr,
260 	&dev_attr_events.attr,
261 	&dev_attr_spurious_events.attr,
262 	&dev_attr_jiffies_eoi_delayed.attr,
263 	&dev_attr_spurious_threshold.attr,
264 	NULL
265 };
266 
267 static const struct attribute_group xenbus_group = {
268 	.name = "xenbus",
269 	.attrs = xenbus_attrs,
270 };
271 
xenbus_dev_probe(struct device * _dev)272 int xenbus_dev_probe(struct device *_dev)
273 {
274 	struct xenbus_device *dev = to_xenbus_device(_dev);
275 	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
276 	const struct xenbus_device_id *id;
277 	int err;
278 
279 	DPRINTK("%s", dev->nodename);
280 
281 	if (!drv->probe) {
282 		err = -ENODEV;
283 		goto fail;
284 	}
285 
286 	id = match_device(drv->ids, dev);
287 	if (!id) {
288 		err = -ENODEV;
289 		goto fail;
290 	}
291 
292 	err = talk_to_otherend(dev);
293 	if (err) {
294 		dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n",
295 			 dev->nodename);
296 		return err;
297 	}
298 
299 	if (!try_module_get(drv->driver.owner)) {
300 		dev_warn(&dev->dev, "failed to acquire module reference on '%s'\n",
301 			 drv->driver.name);
302 		err = -ESRCH;
303 		goto fail;
304 	}
305 
306 	down(&dev->reclaim_sem);
307 	err = drv->probe(dev, id);
308 	up(&dev->reclaim_sem);
309 	if (err)
310 		goto fail_put;
311 
312 	err = watch_otherend(dev);
313 	if (err) {
314 		dev_warn(&dev->dev, "watch_otherend on %s failed.\n",
315 		       dev->nodename);
316 		goto fail_remove;
317 	}
318 
319 	dev->spurious_threshold = 1;
320 	if (sysfs_create_group(&dev->dev.kobj, &xenbus_group))
321 		dev_warn(&dev->dev, "sysfs_create_group on %s failed.\n",
322 			 dev->nodename);
323 
324 	return 0;
325 fail_remove:
326 	if (drv->remove) {
327 		down(&dev->reclaim_sem);
328 		drv->remove(dev);
329 		up(&dev->reclaim_sem);
330 	}
331 fail_put:
332 	module_put(drv->driver.owner);
333 fail:
334 	xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
335 	return err;
336 }
337 EXPORT_SYMBOL_GPL(xenbus_dev_probe);
338 
xenbus_dev_remove(struct device * _dev)339 void xenbus_dev_remove(struct device *_dev)
340 {
341 	struct xenbus_device *dev = to_xenbus_device(_dev);
342 	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
343 
344 	DPRINTK("%s", dev->nodename);
345 
346 	sysfs_remove_group(&dev->dev.kobj, &xenbus_group);
347 
348 	free_otherend_watch(dev);
349 
350 	if (drv->remove) {
351 		down(&dev->reclaim_sem);
352 		drv->remove(dev);
353 		up(&dev->reclaim_sem);
354 	}
355 
356 	module_put(drv->driver.owner);
357 
358 	free_otherend_details(dev);
359 
360 	/*
361 	 * If the toolstack has forced the device state to closing then set
362 	 * the state to closed now to allow it to be cleaned up.
363 	 * Similarly, if the driver does not support re-bind, set the
364 	 * closed.
365 	 */
366 	if (!drv->allow_rebind ||
367 	    xenbus_read_driver_state(dev, dev->nodename) == XenbusStateClosing)
368 		xenbus_switch_state(dev, XenbusStateClosed);
369 }
370 EXPORT_SYMBOL_GPL(xenbus_dev_remove);
371 
xenbus_register_driver_common(struct xenbus_driver * drv,struct xen_bus_type * bus,struct module * owner,const char * mod_name)372 int xenbus_register_driver_common(struct xenbus_driver *drv,
373 				  struct xen_bus_type *bus,
374 				  struct module *owner, const char *mod_name)
375 {
376 	drv->driver.name = drv->name ? drv->name : drv->ids[0].devicetype;
377 	drv->driver.bus = &bus->bus;
378 	drv->driver.owner = owner;
379 	drv->driver.mod_name = mod_name;
380 
381 	return driver_register(&drv->driver);
382 }
383 EXPORT_SYMBOL_GPL(xenbus_register_driver_common);
384 
xenbus_unregister_driver(struct xenbus_driver * drv)385 void xenbus_unregister_driver(struct xenbus_driver *drv)
386 {
387 	driver_unregister(&drv->driver);
388 }
389 EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
390 
391 struct xb_find_info {
392 	struct xenbus_device *dev;
393 	const char *nodename;
394 };
395 
cmp_dev(struct device * dev,void * data)396 static int cmp_dev(struct device *dev, void *data)
397 {
398 	struct xenbus_device *xendev = to_xenbus_device(dev);
399 	struct xb_find_info *info = data;
400 
401 	if (!strcmp(xendev->nodename, info->nodename)) {
402 		info->dev = xendev;
403 		get_device(dev);
404 		return 1;
405 	}
406 	return 0;
407 }
408 
xenbus_device_find(const char * nodename,struct bus_type * bus)409 static struct xenbus_device *xenbus_device_find(const char *nodename,
410 						struct bus_type *bus)
411 {
412 	struct xb_find_info info = { .dev = NULL, .nodename = nodename };
413 
414 	bus_for_each_dev(bus, NULL, &info, cmp_dev);
415 	return info.dev;
416 }
417 
cleanup_dev(struct device * dev,void * data)418 static int cleanup_dev(struct device *dev, void *data)
419 {
420 	struct xenbus_device *xendev = to_xenbus_device(dev);
421 	struct xb_find_info *info = data;
422 	int len = strlen(info->nodename);
423 
424 	DPRINTK("%s", info->nodename);
425 
426 	/* Match the info->nodename path, or any subdirectory of that path. */
427 	if (strncmp(xendev->nodename, info->nodename, len))
428 		return 0;
429 
430 	/* If the node name is longer, ensure it really is a subdirectory. */
431 	if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
432 		return 0;
433 
434 	info->dev = xendev;
435 	get_device(dev);
436 	return 1;
437 }
438 
xenbus_cleanup_devices(const char * path,struct bus_type * bus)439 static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
440 {
441 	struct xb_find_info info = { .nodename = path };
442 
443 	do {
444 		info.dev = NULL;
445 		bus_for_each_dev(bus, NULL, &info, cleanup_dev);
446 		if (info.dev) {
447 			dev_warn(&info.dev->dev,
448 				 "device forcefully removed from xenstore\n");
449 			info.dev->vanished = true;
450 			device_unregister(&info.dev->dev);
451 			put_device(&info.dev->dev);
452 		}
453 	} while (info.dev);
454 }
455 
xenbus_dev_release(struct device * dev)456 static void xenbus_dev_release(struct device *dev)
457 {
458 	if (dev)
459 		kfree(to_xenbus_device(dev));
460 }
461 
nodename_show(struct device * dev,struct device_attribute * attr,char * buf)462 static ssize_t nodename_show(struct device *dev,
463 			     struct device_attribute *attr, char *buf)
464 {
465 	return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
466 }
467 static DEVICE_ATTR_RO(nodename);
468 
devtype_show(struct device * dev,struct device_attribute * attr,char * buf)469 static ssize_t devtype_show(struct device *dev,
470 			    struct device_attribute *attr, char *buf)
471 {
472 	return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
473 }
474 static DEVICE_ATTR_RO(devtype);
475 
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)476 static ssize_t modalias_show(struct device *dev,
477 			     struct device_attribute *attr, char *buf)
478 {
479 	return sprintf(buf, "%s:%s\n", dev->bus->name,
480 		       to_xenbus_device(dev)->devicetype);
481 }
482 static DEVICE_ATTR_RO(modalias);
483 
state_show(struct device * dev,struct device_attribute * attr,char * buf)484 static ssize_t state_show(struct device *dev,
485 			    struct device_attribute *attr, char *buf)
486 {
487 	return sprintf(buf, "%s\n",
488 			xenbus_strstate(to_xenbus_device(dev)->state));
489 }
490 static DEVICE_ATTR_RO(state);
491 
492 static struct attribute *xenbus_dev_attrs[] = {
493 	&dev_attr_nodename.attr,
494 	&dev_attr_devtype.attr,
495 	&dev_attr_modalias.attr,
496 	&dev_attr_state.attr,
497 	NULL,
498 };
499 
500 static const struct attribute_group xenbus_dev_group = {
501 	.attrs = xenbus_dev_attrs,
502 };
503 
504 const struct attribute_group *xenbus_dev_groups[] = {
505 	&xenbus_dev_group,
506 	NULL,
507 };
508 EXPORT_SYMBOL_GPL(xenbus_dev_groups);
509 
xenbus_probe_node(struct xen_bus_type * bus,const char * type,const char * nodename)510 int xenbus_probe_node(struct xen_bus_type *bus,
511 		      const char *type,
512 		      const char *nodename)
513 {
514 	char devname[XEN_BUS_ID_SIZE];
515 	int err;
516 	struct xenbus_device *xendev;
517 	size_t stringlen;
518 	char *tmpstring;
519 
520 	enum xenbus_state state = xenbus_read_driver_state(NULL, nodename);
521 
522 	if (state != XenbusStateInitialising) {
523 		/* Device is not new, so ignore it.  This can happen if a
524 		   device is going away after switching to Closed.  */
525 		return 0;
526 	}
527 
528 	stringlen = strlen(nodename) + 1 + strlen(type) + 1;
529 	xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
530 	if (!xendev)
531 		return -ENOMEM;
532 
533 	xendev->state = XenbusStateInitialising;
534 
535 	/* Copy the strings into the extra space. */
536 
537 	tmpstring = (char *)(xendev + 1);
538 	strcpy(tmpstring, nodename);
539 	xendev->nodename = tmpstring;
540 
541 	tmpstring += strlen(tmpstring) + 1;
542 	strcpy(tmpstring, type);
543 	xendev->devicetype = tmpstring;
544 	init_completion(&xendev->down);
545 
546 	xendev->dev.bus = &bus->bus;
547 	xendev->dev.release = xenbus_dev_release;
548 
549 	err = bus->get_bus_id(devname, xendev->nodename);
550 	if (err)
551 		goto fail;
552 
553 	dev_set_name(&xendev->dev, "%s", devname);
554 	sema_init(&xendev->reclaim_sem, 1);
555 
556 	/* Register with generic device framework. */
557 	err = device_register(&xendev->dev);
558 	if (err) {
559 		put_device(&xendev->dev);
560 		xendev = NULL;
561 		goto fail;
562 	}
563 
564 	return 0;
565 fail:
566 	kfree(xendev);
567 	return err;
568 }
569 EXPORT_SYMBOL_GPL(xenbus_probe_node);
570 
xenbus_probe_device_type(struct xen_bus_type * bus,const char * type)571 static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
572 {
573 	int err = 0;
574 	char **dir;
575 	unsigned int dir_n = 0;
576 	int i;
577 
578 	dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n);
579 	if (IS_ERR(dir))
580 		return PTR_ERR(dir);
581 
582 	for (i = 0; i < dir_n; i++) {
583 		err = bus->probe(bus, type, dir[i]);
584 		if (err)
585 			break;
586 	}
587 
588 	kfree(dir);
589 	return err;
590 }
591 
xenbus_probe_devices(struct xen_bus_type * bus)592 int xenbus_probe_devices(struct xen_bus_type *bus)
593 {
594 	int err = 0;
595 	char **dir;
596 	unsigned int i, dir_n;
597 
598 	dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
599 	if (IS_ERR(dir))
600 		return PTR_ERR(dir);
601 
602 	for (i = 0; i < dir_n; i++) {
603 		err = xenbus_probe_device_type(bus, dir[i]);
604 		if (err)
605 			break;
606 	}
607 
608 	kfree(dir);
609 	return err;
610 }
611 EXPORT_SYMBOL_GPL(xenbus_probe_devices);
612 
char_count(const char * str,char c)613 static unsigned int char_count(const char *str, char c)
614 {
615 	unsigned int i, ret = 0;
616 
617 	for (i = 0; str[i]; i++)
618 		if (str[i] == c)
619 			ret++;
620 	return ret;
621 }
622 
strsep_len(const char * str,char c,unsigned int len)623 static int strsep_len(const char *str, char c, unsigned int len)
624 {
625 	unsigned int i;
626 
627 	for (i = 0; str[i]; i++)
628 		if (str[i] == c) {
629 			if (len == 0)
630 				return i;
631 			len--;
632 		}
633 	return (len == 0) ? i : -ERANGE;
634 }
635 
xenbus_dev_changed(const char * node,struct xen_bus_type * bus)636 void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
637 {
638 	int exists, rootlen;
639 	struct xenbus_device *dev;
640 	char type[XEN_BUS_ID_SIZE];
641 	const char *p, *root;
642 
643 	if (char_count(node, '/') < 2)
644 		return;
645 
646 	exists = xenbus_exists(XBT_NIL, node, "");
647 	if (!exists) {
648 		xenbus_cleanup_devices(node, &bus->bus);
649 		return;
650 	}
651 
652 	/* backend/<type>/... or device/<type>/... */
653 	p = strchr(node, '/') + 1;
654 	snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
655 	type[XEN_BUS_ID_SIZE-1] = '\0';
656 
657 	rootlen = strsep_len(node, '/', bus->levels);
658 	if (rootlen < 0)
659 		return;
660 	root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node);
661 	if (!root)
662 		return;
663 
664 	dev = xenbus_device_find(root, &bus->bus);
665 	/*
666 	 * Backend domain crash results in not coordinated frontend removal,
667 	 * without going through XenbusStateClosing. If this is a new instance
668 	 * of the same device Xen tools will have reset the state to
669 	 * XenbusStateInitializing.
670 	 * It might be that the backend crashed early during the init phase of
671 	 * device setup, in which case the known state would have been
672 	 * XenbusStateInitializing. So test the backend domid to match the
673 	 * saved one. In case the new backend happens to have the same domid as
674 	 * the old one, we can just carry on, as there is no inconsistency
675 	 * resulting in this case.
676 	 */
677 	if (dev && !strcmp(bus->root, "device")) {
678 		enum xenbus_state state = xenbus_read_driver_state(dev, dev->nodename);
679 		unsigned int backend = xenbus_read_unsigned(root, "backend-id",
680 							    dev->otherend_id);
681 
682 		if (state == XenbusStateInitialising &&
683 		    (state != dev->state || backend != dev->otherend_id)) {
684 			/*
685 			 * State has been reset, assume the old one vanished
686 			 * and new one needs to be probed.
687 			 */
688 			dev_warn(&dev->dev,
689 				 "state reset occurred, reconnecting\n");
690 			dev->vanished = true;
691 		}
692 		if (dev->vanished) {
693 			device_unregister(&dev->dev);
694 			put_device(&dev->dev);
695 			dev = NULL;
696 		}
697 	}
698 	if (!dev)
699 		xenbus_probe_node(bus, type, root);
700 	else
701 		put_device(&dev->dev);
702 
703 	kfree(root);
704 }
705 EXPORT_SYMBOL_GPL(xenbus_dev_changed);
706 
xenbus_dev_freeze(struct device * dev)707 int xenbus_dev_freeze(struct device *dev)
708 {
709 	int err = 0;
710 	struct xenbus_driver *drv;
711 	struct xenbus_device *xdev
712 		= container_of(dev, struct xenbus_device, dev);
713 
714 	DPRINTK("%s", xdev->nodename);
715 
716 	if (dev->driver == NULL)
717 		return 0;
718 	drv = to_xenbus_driver(dev->driver);
719 	if (drv->suspend)
720 		err = drv->suspend(xdev);
721 	if (err)
722 		dev_warn(dev, "freeze failed: %i\n", err);
723 	return 0;
724 }
725 EXPORT_SYMBOL_GPL(xenbus_dev_freeze);
726 
xenbus_dev_restore(struct device * dev)727 int xenbus_dev_restore(struct device *dev)
728 {
729 	int err;
730 	struct xenbus_driver *drv;
731 	struct xenbus_device *xdev
732 		= container_of(dev, struct xenbus_device, dev);
733 
734 	DPRINTK("%s", xdev->nodename);
735 
736 	if (dev->driver == NULL)
737 		return 0;
738 	drv = to_xenbus_driver(dev->driver);
739 	err = talk_to_otherend(xdev);
740 	if (err) {
741 		dev_warn(dev, "restore (talk_to_otherend) failed: %i\n", err);
742 		return err;
743 	}
744 
745 	xdev->state = XenbusStateInitialising;
746 
747 	if (drv->resume) {
748 		err = drv->resume(xdev);
749 		if (err) {
750 			dev_warn(dev, "restore failed: %i\n", err);
751 			return err;
752 		}
753 	}
754 
755 	err = watch_otherend(xdev);
756 	if (err) {
757 		dev_warn(dev, "restore (watch_otherend) failed: %d\n", err);
758 		return err;
759 	}
760 
761 	return 0;
762 }
763 EXPORT_SYMBOL_GPL(xenbus_dev_restore);
764 
xenbus_dev_thaw(struct device * dev)765 int xenbus_dev_thaw(struct device *dev)
766 {
767 	/* Do nothing */
768 	DPRINTK("thaw");
769 	return 0;
770 }
771 EXPORT_SYMBOL_GPL(xenbus_dev_thaw);
772 
773 /* A flag to determine if xenstored is 'ready' (i.e. has started) */
774 int xenstored_ready;
775 
776 
register_xenstore_notifier(struct notifier_block * nb)777 int register_xenstore_notifier(struct notifier_block *nb)
778 {
779 	int ret = 0;
780 
781 	if (xenstored_ready > 0)
782 		ret = nb->notifier_call(nb, 0, NULL);
783 	else
784 		blocking_notifier_chain_register(&xenstore_chain, nb);
785 
786 	return ret;
787 }
788 EXPORT_SYMBOL_GPL(register_xenstore_notifier);
789 
unregister_xenstore_notifier(struct notifier_block * nb)790 void unregister_xenstore_notifier(struct notifier_block *nb)
791 {
792 	blocking_notifier_chain_unregister(&xenstore_chain, nb);
793 }
794 EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
795 
xenbus_probe(void)796 static void xenbus_probe(void)
797 {
798 	xenstored_ready = 1;
799 
800 	if (!xen_store_interface)
801 		xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT,
802 					       XEN_PAGE_SIZE, MEMREMAP_WB);
803 	/*
804 	 * Now it is safe to free the IRQ used for xenstore late
805 	 * initialization. No need to unbind: it is about to be
806 	 * bound again from xb_init_comms. Note that calling
807 	 * unbind_from_irqhandler now would result in xen_evtchn_close()
808 	 * being called and the event channel not being enabled again
809 	 * afterwards, resulting in missed event notifications.
810 	 */
811 	if (xs_init_irq >= 0)
812 		free_irq(xs_init_irq, &xb_waitq);
813 
814 	/*
815 	 * In the HVM case, xenbus_init() deferred its call to
816 	 * xs_init() in case callbacks were not operational yet.
817 	 * So do it now.
818 	 */
819 	if (xen_store_domain_type == XS_HVM)
820 		xs_init();
821 
822 	/* Notify others that xenstore is up */
823 	blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
824 }
825 
826 /*
827  * Returns true when XenStore init must be deferred in order to
828  * allow the PCI platform device to be initialised, before we
829  * can actually have event channel interrupts working.
830  */
xs_hvm_defer_init_for_callback(void)831 static bool xs_hvm_defer_init_for_callback(void)
832 {
833 #ifdef CONFIG_XEN_PVHVM
834 	return xen_store_domain_type == XS_HVM &&
835 		!xen_have_vector_callback;
836 #else
837 	return false;
838 #endif
839 }
840 
xenbus_probe_thread(void * unused)841 static int xenbus_probe_thread(void *unused)
842 {
843 	DEFINE_WAIT(w);
844 
845 	/*
846 	 * We actually just want to wait for *any* trigger of xb_waitq,
847 	 * and run xenbus_probe() the moment it occurs.
848 	 */
849 	prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE);
850 	schedule();
851 	finish_wait(&xb_waitq, &w);
852 
853 	DPRINTK("probing");
854 	xenbus_probe();
855 	return 0;
856 }
857 
xenbus_probe_initcall(void)858 static int __init xenbus_probe_initcall(void)
859 {
860 	if (!xen_domain())
861 		return -ENODEV;
862 
863 	/*
864 	 * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
865 	 * need to wait for the platform PCI device to come up or
866 	 * xen_store_interface is not ready.
867 	 */
868 	if (xen_store_domain_type == XS_PV ||
869 	    (xen_store_domain_type == XS_HVM &&
870 	     !xs_hvm_defer_init_for_callback() &&
871 	     XS_INTERFACE_READY))
872 		xenbus_probe();
873 
874 	/*
875 	 * For XS_LOCAL or when xen_store_interface is not ready, spawn a
876 	 * thread which will wait for xenstored or a xenstore-stubdom to be
877 	 * started, then probe.  It will be triggered when communication
878 	 * starts happening, by waiting on xb_waitq.
879 	 */
880 	if (xen_store_domain_type == XS_LOCAL || !XS_INTERFACE_READY) {
881 		struct task_struct *probe_task;
882 
883 		probe_task = kthread_run(xenbus_probe_thread, NULL,
884 					 "xenbus_probe");
885 		if (IS_ERR(probe_task))
886 			return PTR_ERR(probe_task);
887 	}
888 	return 0;
889 }
890 device_initcall(xenbus_probe_initcall);
891 
xen_set_callback_via(uint64_t via)892 int xen_set_callback_via(uint64_t via)
893 {
894 	struct xen_hvm_param a;
895 	int ret;
896 
897 	a.domid = DOMID_SELF;
898 	a.index = HVM_PARAM_CALLBACK_IRQ;
899 	a.value = via;
900 
901 	ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a);
902 	if (ret)
903 		return ret;
904 
905 	/*
906 	 * If xenbus_probe_initcall() deferred the xenbus_probe()
907 	 * due to the callback not functioning yet, we can do it now.
908 	 */
909 	if (!xenstored_ready && xs_hvm_defer_init_for_callback())
910 		xenbus_probe();
911 
912 	return ret;
913 }
914 EXPORT_SYMBOL_GPL(xen_set_callback_via);
915 
916 /* Set up event channel for xenstored which is run as a local process
917  * (this is normally used only in dom0)
918  */
xenstored_local_init(void)919 static int __init xenstored_local_init(void)
920 {
921 	int err = -ENOMEM;
922 	unsigned long page = 0;
923 	struct evtchn_alloc_unbound alloc_unbound;
924 
925 	/* Allocate Xenstore page */
926 	page = get_zeroed_page(GFP_KERNEL);
927 	if (!page)
928 		goto out_err;
929 
930 	xen_store_gfn = virt_to_gfn((void *)page);
931 
932 	/* Next allocate a local port which xenstored can bind to */
933 	alloc_unbound.dom        = DOMID_SELF;
934 	alloc_unbound.remote_dom = DOMID_SELF;
935 
936 	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
937 					  &alloc_unbound);
938 	if (err == -ENOSYS)
939 		goto out_err;
940 
941 	BUG_ON(err);
942 	xen_store_evtchn = alloc_unbound.port;
943 
944 	return 0;
945 
946  out_err:
947 	if (page != 0)
948 		free_page(page);
949 	return err;
950 }
951 
xenbus_resume_cb(struct notifier_block * nb,unsigned long action,void * data)952 static int xenbus_resume_cb(struct notifier_block *nb,
953 			    unsigned long action, void *data)
954 {
955 	int err = 0;
956 
957 	if (xen_hvm_domain()) {
958 		uint64_t v = 0;
959 
960 		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
961 		if (!err && v)
962 			xen_store_evtchn = v;
963 		else
964 			pr_warn("Cannot update xenstore event channel: %d\n",
965 				err);
966 	} else
967 		xen_store_evtchn = xen_start_info->store_evtchn;
968 
969 	return err;
970 }
971 
972 static struct notifier_block xenbus_resume_nb = {
973 	.notifier_call = xenbus_resume_cb,
974 };
975 
xenbus_late_init(int irq,void * unused)976 static irqreturn_t xenbus_late_init(int irq, void *unused)
977 {
978 	int err;
979 	uint64_t v = 0;
980 
981 	err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
982 	if (err || !v || !~v)
983 		return IRQ_HANDLED;
984 	xen_store_gfn = (unsigned long)v;
985 
986 	wake_up(&xb_waitq);
987 	return IRQ_HANDLED;
988 }
989 
xenbus_init(void)990 static int __init xenbus_init(void)
991 {
992 	int err;
993 	uint64_t v = 0;
994 	bool wait = false;
995 	xen_store_domain_type = XS_UNKNOWN;
996 
997 	if (!xen_domain())
998 		return -ENODEV;
999 
1000 	xenbus_ring_ops_init();
1001 
1002 	if (xen_pv_domain())
1003 		xen_store_domain_type = XS_PV;
1004 	if (xen_hvm_domain())
1005 	{
1006 		xen_store_domain_type = XS_HVM;
1007 		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
1008 		if (err)
1009 			goto out_error;
1010 		xen_store_evtchn = (int)v;
1011 		if (!v && xen_initial_domain())
1012 			xen_store_domain_type = XS_LOCAL;
1013 	}
1014 	if (xen_pv_domain() && !xen_start_info->store_evtchn)
1015 		xen_store_domain_type = XS_LOCAL;
1016 	if (xen_pv_domain() && xen_start_info->store_evtchn)
1017 		xenstored_ready = 1;
1018 
1019 	switch (xen_store_domain_type) {
1020 	case XS_LOCAL:
1021 		err = xenstored_local_init();
1022 		if (err)
1023 			goto out_error;
1024 		xen_store_interface = gfn_to_virt(xen_store_gfn);
1025 		break;
1026 	case XS_PV:
1027 		xen_store_evtchn = xen_start_info->store_evtchn;
1028 		xen_store_gfn = xen_start_info->store_mfn;
1029 		xen_store_interface = gfn_to_virt(xen_store_gfn);
1030 		break;
1031 	case XS_HVM:
1032 		err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
1033 		if (err)
1034 			goto out_error;
1035 		/*
1036 		 * Uninitialized hvm_params are zero and return no error.
1037 		 * Although it is theoretically possible to have
1038 		 * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
1039 		 * not zero when valid. If zero, it means that Xenstore hasn't
1040 		 * been properly initialized. Instead of attempting to map a
1041 		 * wrong guest physical address return error.
1042 		 *
1043 		 * Also recognize all bits set as an invalid/uninitialized value.
1044 		 */
1045 		if (!v) {
1046 			err = -ENOENT;
1047 			goto out_error;
1048 		}
1049 		if (v == ~0ULL) {
1050 			wait = true;
1051 		} else {
1052 			/* Avoid truncation on 32-bit. */
1053 #if BITS_PER_LONG == 32
1054 			if (v > ULONG_MAX) {
1055 				pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
1056 				       __func__, v);
1057 				err = -EINVAL;
1058 				goto out_error;
1059 			}
1060 #endif
1061 			xen_store_gfn = (unsigned long)v;
1062 			xen_store_interface =
1063 				memremap(xen_store_gfn << XEN_PAGE_SHIFT,
1064 					 XEN_PAGE_SIZE, MEMREMAP_WB);
1065 			if (!xen_store_interface) {
1066 				pr_err("%s: cannot map HVM_PARAM_STORE_PFN=%llx\n",
1067 				       __func__, v);
1068 				err = -EINVAL;
1069 				goto out_error;
1070 			}
1071 			if (xen_store_interface->connection != XENSTORE_CONNECTED)
1072 				wait = true;
1073 		}
1074 		if (wait) {
1075 			err = bind_evtchn_to_irqhandler(xen_store_evtchn,
1076 							xenbus_late_init,
1077 							0, "xenstore_late_init",
1078 							&xb_waitq);
1079 			if (err < 0) {
1080 				pr_err("xenstore_late_init couldn't bind irq err=%d\n",
1081 				       err);
1082 				goto out_error;
1083 			}
1084 
1085 			xs_init_irq = err;
1086 		}
1087 		break;
1088 	default:
1089 		pr_warn("Xenstore state unknown\n");
1090 		break;
1091 	}
1092 
1093 	/*
1094 	 * HVM domains may not have a functional callback yet. In that
1095 	 * case let xs_init() be called from xenbus_probe(), which will
1096 	 * get invoked at an appropriate time.
1097 	 */
1098 	if (xen_store_domain_type != XS_HVM) {
1099 		err = xs_init();
1100 		if (err) {
1101 			pr_warn("Error initializing xenstore comms: %i\n", err);
1102 			goto out_error;
1103 		}
1104 	}
1105 
1106 	if ((xen_store_domain_type != XS_LOCAL) &&
1107 	    (xen_store_domain_type != XS_UNKNOWN))
1108 		xen_resume_notifier_register(&xenbus_resume_nb);
1109 
1110 #ifdef CONFIG_XEN_COMPAT_XENFS
1111 	/*
1112 	 * Create xenfs mountpoint in /proc for compatibility with
1113 	 * utilities that expect to find "xenbus" under "/proc/xen".
1114 	 */
1115 	proc_create_mount_point("xen");
1116 #endif
1117 	return 0;
1118 
1119 out_error:
1120 	xen_store_domain_type = XS_UNKNOWN;
1121 	return err;
1122 }
1123 
1124 postcore_initcall(xenbus_init);
1125 
1126 MODULE_LICENSE("GPL");
1127