xref: /linux/arch/um/drivers/virtio_uml.c (revision cbb8e65e234e0139c0c516bb6b9110d210eecd3f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Virtio vhost-user driver
4  *
5  * Copyright(c) 2019 Intel Corporation
6  *
7  * This driver allows virtio devices to be used over a vhost-user socket.
8  *
9  * Guest devices can be instantiated by kernel module or command line
10  * parameters. One device will be created for each parameter. Syntax:
11  *
12  *		virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
13  * where:
14  *		<socket>	:= vhost-user socket path to connect
15  *		<virtio_id>	:= virtio device id (as in virtio_ids.h)
16  *		<platform_id>	:= (optional) platform device id
17  *
18  * example:
19  *		virtio_uml.device=/var/uml.socket:1
20  *
21  * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
22  */
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/virtio.h>
28 #include <linux/virtio_config.h>
29 #include <linux/virtio_ring.h>
30 #include <linux/time-internal.h>
31 #include <linux/virtio-uml.h>
32 #include <shared/as-layout.h>
33 #include <irq_kern.h>
34 #include <init.h>
35 #include <os.h>
36 #include "vhost_user.h"
37 
38 #define MAX_SUPPORTED_QUEUE_SIZE	256
39 
40 #define to_virtio_uml_device(_vdev) \
41 	container_of(_vdev, struct virtio_uml_device, vdev)
42 
43 struct virtio_uml_platform_data {
44 	u32 virtio_device_id;
45 	const char *socket_path;
46 	struct work_struct conn_broken_wk;
47 	struct platform_device *pdev;
48 };
49 
50 struct virtio_uml_device {
51 	struct virtio_device vdev;
52 	struct platform_device *pdev;
53 	struct virtio_uml_platform_data *pdata;
54 
55 	spinlock_t sock_lock;
56 	int sock, req_fd, irq;
57 	u64 features;
58 	u64 protocol_features;
59 	u8 status;
60 	u8 registered:1;
61 	u8 suspended:1;
62 	u8 no_vq_suspend:1;
63 
64 	u8 config_changed_irq:1;
65 	uint64_t vq_irq_vq_map;
66 	int recv_rc;
67 };
68 
69 struct virtio_uml_vq_info {
70 	int kick_fd, call_fd;
71 	char name[32];
72 	bool suspended;
73 };
74 
75 #define vu_err(vu_dev, ...)	dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
76 
77 /* Vhost-user protocol */
78 
79 static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
80 			    const int *fds, unsigned int fds_num)
81 {
82 	int rc;
83 
84 	do {
85 		rc = os_sendmsg_fds(fd, buf, len, fds, fds_num);
86 		if (rc > 0) {
87 			buf += rc;
88 			len -= rc;
89 			fds = NULL;
90 			fds_num = 0;
91 		}
92 	} while (len && (rc >= 0 || rc == -EINTR));
93 
94 	if (rc < 0)
95 		return rc;
96 	return 0;
97 }
98 
99 static int full_read(int fd, void *buf, int len, bool abortable)
100 {
101 	int rc;
102 
103 	if (!len)
104 		return 0;
105 
106 	do {
107 		rc = os_read_file(fd, buf, len);
108 		if (rc > 0) {
109 			buf += rc;
110 			len -= rc;
111 		}
112 	} while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
113 
114 	if (rc < 0)
115 		return rc;
116 	if (rc == 0)
117 		return -ECONNRESET;
118 	return 0;
119 }
120 
121 static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
122 {
123 	return full_read(fd, msg, sizeof(msg->header), true);
124 }
125 
126 static int vhost_user_recv(struct virtio_uml_device *vu_dev,
127 			   int fd, struct vhost_user_msg *msg,
128 			   size_t max_payload_size, bool wait)
129 {
130 	size_t size;
131 	int rc;
132 
133 	/*
134 	 * In virtio time-travel mode, we're handling all the vhost-user
135 	 * FDs by polling them whenever appropriate. However, we may get
136 	 * into a situation where we're sending out an interrupt message
137 	 * to a device (e.g. a net device) and need to handle a simulation
138 	 * time message while doing so, e.g. one that tells us to update
139 	 * our idea of how long we can run without scheduling.
140 	 *
141 	 * Thus, we need to not just read() from the given fd, but need
142 	 * to also handle messages for the simulation time - this function
143 	 * does that for us while waiting for the given fd to be readable.
144 	 */
145 	if (wait)
146 		time_travel_wait_readable(fd);
147 
148 	rc = vhost_user_recv_header(fd, msg);
149 
150 	if (rc)
151 		return rc;
152 	size = msg->header.size;
153 	if (size > max_payload_size)
154 		return -EPROTO;
155 	return full_read(fd, &msg->payload, size, false);
156 }
157 
158 static void vhost_user_check_reset(struct virtio_uml_device *vu_dev,
159 				   int rc)
160 {
161 	struct virtio_uml_platform_data *pdata = vu_dev->pdata;
162 
163 	if (rc != -ECONNRESET)
164 		return;
165 
166 	if (!vu_dev->registered)
167 		return;
168 
169 	vu_dev->registered = 0;
170 
171 	schedule_work(&pdata->conn_broken_wk);
172 }
173 
174 static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
175 				struct vhost_user_msg *msg,
176 				size_t max_payload_size)
177 {
178 	int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
179 				 max_payload_size, true);
180 
181 	if (rc) {
182 		vhost_user_check_reset(vu_dev, rc);
183 		return rc;
184 	}
185 
186 	if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
187 		return -EPROTO;
188 
189 	return 0;
190 }
191 
192 static int vhost_user_recv_u64(struct virtio_uml_device *vu_dev,
193 			       u64 *value)
194 {
195 	struct vhost_user_msg msg;
196 	int rc = vhost_user_recv_resp(vu_dev, &msg,
197 				      sizeof(msg.payload.integer));
198 
199 	if (rc)
200 		return rc;
201 	if (msg.header.size != sizeof(msg.payload.integer))
202 		return -EPROTO;
203 	*value = msg.payload.integer;
204 	return 0;
205 }
206 
207 static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
208 			       struct vhost_user_msg *msg,
209 			       size_t max_payload_size)
210 {
211 	int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg,
212 				 max_payload_size, false);
213 
214 	if (rc)
215 		return rc;
216 
217 	if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) !=
218 			VHOST_USER_VERSION)
219 		return -EPROTO;
220 
221 	return 0;
222 }
223 
224 static int vhost_user_send(struct virtio_uml_device *vu_dev,
225 			   bool need_response, struct vhost_user_msg *msg,
226 			   int *fds, size_t num_fds)
227 {
228 	size_t size = sizeof(msg->header) + msg->header.size;
229 	unsigned long flags;
230 	bool request_ack;
231 	int rc;
232 
233 	msg->header.flags |= VHOST_USER_VERSION;
234 
235 	/*
236 	 * The need_response flag indicates that we already need a response,
237 	 * e.g. to read the features. In these cases, don't request an ACK as
238 	 * it is meaningless. Also request an ACK only if supported.
239 	 */
240 	request_ack = !need_response;
241 	if (!(vu_dev->protocol_features &
242 			BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK)))
243 		request_ack = false;
244 
245 	if (request_ack)
246 		msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
247 
248 	spin_lock_irqsave(&vu_dev->sock_lock, flags);
249 	rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
250 	if (rc < 0)
251 		goto out;
252 
253 	if (request_ack) {
254 		uint64_t status;
255 
256 		rc = vhost_user_recv_u64(vu_dev, &status);
257 		if (rc)
258 			goto out;
259 
260 		if (status) {
261 			vu_err(vu_dev, "slave reports error: %llu\n", status);
262 			rc = -EIO;
263 			goto out;
264 		}
265 	}
266 
267 out:
268 	spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
269 	return rc;
270 }
271 
272 static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev,
273 				      bool need_response, u32 request)
274 {
275 	struct vhost_user_msg msg = {
276 		.header.request = request,
277 	};
278 
279 	return vhost_user_send(vu_dev, need_response, &msg, NULL, 0);
280 }
281 
282 static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev,
283 					 u32 request, int fd)
284 {
285 	struct vhost_user_msg msg = {
286 		.header.request = request,
287 	};
288 
289 	return vhost_user_send(vu_dev, false, &msg, &fd, 1);
290 }
291 
292 static int vhost_user_send_u64(struct virtio_uml_device *vu_dev,
293 			       u32 request, u64 value)
294 {
295 	struct vhost_user_msg msg = {
296 		.header.request = request,
297 		.header.size = sizeof(msg.payload.integer),
298 		.payload.integer = value,
299 	};
300 
301 	return vhost_user_send(vu_dev, false, &msg, NULL, 0);
302 }
303 
304 static int vhost_user_set_owner(struct virtio_uml_device *vu_dev)
305 {
306 	return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER);
307 }
308 
309 static int vhost_user_get_features(struct virtio_uml_device *vu_dev,
310 				   u64 *features)
311 {
312 	int rc = vhost_user_send_no_payload(vu_dev, true,
313 					    VHOST_USER_GET_FEATURES);
314 
315 	if (rc)
316 		return rc;
317 	return vhost_user_recv_u64(vu_dev, features);
318 }
319 
320 static int vhost_user_set_features(struct virtio_uml_device *vu_dev,
321 				   u64 features)
322 {
323 	return vhost_user_send_u64(vu_dev, VHOST_USER_SET_FEATURES, features);
324 }
325 
326 static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev,
327 					    u64 *protocol_features)
328 {
329 	int rc = vhost_user_send_no_payload(vu_dev, true,
330 			VHOST_USER_GET_PROTOCOL_FEATURES);
331 
332 	if (rc)
333 		return rc;
334 	return vhost_user_recv_u64(vu_dev, protocol_features);
335 }
336 
337 static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev,
338 					    u64 protocol_features)
339 {
340 	return vhost_user_send_u64(vu_dev, VHOST_USER_SET_PROTOCOL_FEATURES,
341 				   protocol_features);
342 }
343 
344 static void vhost_user_reply(struct virtio_uml_device *vu_dev,
345 			     struct vhost_user_msg *msg, int response)
346 {
347 	struct vhost_user_msg reply = {
348 		.payload.integer = response,
349 	};
350 	size_t size = sizeof(reply.header) + sizeof(reply.payload.integer);
351 	int rc;
352 
353 	reply.header = msg->header;
354 	reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY;
355 	reply.header.flags |= VHOST_USER_FLAG_REPLY;
356 	reply.header.size = sizeof(reply.payload.integer);
357 
358 	rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0);
359 
360 	if (rc)
361 		vu_err(vu_dev,
362 		       "sending reply to slave request failed: %d (size %zu)\n",
363 		       rc, size);
364 }
365 
366 static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
367 				       struct time_travel_event *ev)
368 {
369 	struct virtqueue *vq;
370 	int response = 1;
371 	struct {
372 		struct vhost_user_msg msg;
373 		u8 extra_payload[512];
374 	} msg;
375 	int rc;
376 	irqreturn_t irq_rc = IRQ_NONE;
377 
378 	while (1) {
379 		rc = vhost_user_recv_req(vu_dev, &msg.msg,
380 					 sizeof(msg.msg.payload) +
381 					 sizeof(msg.extra_payload));
382 		if (rc)
383 			break;
384 
385 		switch (msg.msg.header.request) {
386 		case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
387 			vu_dev->config_changed_irq = true;
388 			response = 0;
389 			break;
390 		case VHOST_USER_SLAVE_VRING_CALL:
391 			virtio_device_for_each_vq((&vu_dev->vdev), vq) {
392 				if (vq->index == msg.msg.payload.vring_state.index) {
393 					response = 0;
394 					vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index);
395 					break;
396 				}
397 			}
398 			break;
399 		case VHOST_USER_SLAVE_IOTLB_MSG:
400 			/* not supported - VIRTIO_F_ACCESS_PLATFORM */
401 		case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
402 			/* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
403 		default:
404 			vu_err(vu_dev, "unexpected slave request %d\n",
405 			       msg.msg.header.request);
406 		}
407 
408 		if (ev && !vu_dev->suspended)
409 			time_travel_add_irq_event(ev);
410 
411 		if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
412 			vhost_user_reply(vu_dev, &msg.msg, response);
413 		irq_rc = IRQ_HANDLED;
414 	}
415 	/* mask EAGAIN as we try non-blocking read until socket is empty */
416 	vu_dev->recv_rc = (rc == -EAGAIN) ? 0 : rc;
417 	return irq_rc;
418 }
419 
420 static irqreturn_t vu_req_interrupt(int irq, void *data)
421 {
422 	struct virtio_uml_device *vu_dev = data;
423 	irqreturn_t ret = IRQ_HANDLED;
424 
425 	if (!um_irq_timetravel_handler_used())
426 		ret = vu_req_read_message(vu_dev, NULL);
427 
428 	if (vu_dev->recv_rc) {
429 		vhost_user_check_reset(vu_dev, vu_dev->recv_rc);
430 	} else if (vu_dev->vq_irq_vq_map) {
431 		struct virtqueue *vq;
432 
433 		virtio_device_for_each_vq((&vu_dev->vdev), vq) {
434 			if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index))
435 				vring_interrupt(0 /* ignored */, vq);
436 		}
437 		vu_dev->vq_irq_vq_map = 0;
438 	} else if (vu_dev->config_changed_irq) {
439 		virtio_config_changed(&vu_dev->vdev);
440 		vu_dev->config_changed_irq = false;
441 	}
442 
443 	return ret;
444 }
445 
446 static void vu_req_interrupt_comm_handler(int irq, int fd, void *data,
447 					  struct time_travel_event *ev)
448 {
449 	vu_req_read_message(data, ev);
450 }
451 
452 static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev)
453 {
454 	int rc, req_fds[2];
455 
456 	/* Use a pipe for slave req fd, SIGIO is not supported for eventfd */
457 	rc = os_pipe(req_fds, true, true);
458 	if (rc < 0)
459 		return rc;
460 	vu_dev->req_fd = req_fds[0];
461 
462 	rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ,
463 			       vu_req_interrupt, IRQF_SHARED,
464 			       vu_dev->pdev->name, vu_dev,
465 			       vu_req_interrupt_comm_handler);
466 	if (rc < 0)
467 		goto err_close;
468 
469 	vu_dev->irq = rc;
470 
471 	rc = vhost_user_send_no_payload_fd(vu_dev, VHOST_USER_SET_SLAVE_REQ_FD,
472 					   req_fds[1]);
473 	if (rc)
474 		goto err_free_irq;
475 
476 	goto out;
477 
478 err_free_irq:
479 	um_free_irq(vu_dev->irq, vu_dev);
480 err_close:
481 	os_close_file(req_fds[0]);
482 out:
483 	/* Close unused write end of request fds */
484 	os_close_file(req_fds[1]);
485 	return rc;
486 }
487 
488 static int vhost_user_init(struct virtio_uml_device *vu_dev)
489 {
490 	int rc = vhost_user_set_owner(vu_dev);
491 
492 	if (rc)
493 		return rc;
494 	rc = vhost_user_get_features(vu_dev, &vu_dev->features);
495 	if (rc)
496 		return rc;
497 
498 	if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) {
499 		rc = vhost_user_get_protocol_features(vu_dev,
500 				&vu_dev->protocol_features);
501 		if (rc)
502 			return rc;
503 		vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F;
504 		rc = vhost_user_set_protocol_features(vu_dev,
505 				vu_dev->protocol_features);
506 		if (rc)
507 			return rc;
508 	}
509 
510 	if (vu_dev->protocol_features &
511 			BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
512 		rc = vhost_user_init_slave_req(vu_dev);
513 		if (rc)
514 			return rc;
515 	}
516 
517 	return 0;
518 }
519 
520 static void vhost_user_get_config(struct virtio_uml_device *vu_dev,
521 				  u32 offset, void *buf, u32 len)
522 {
523 	u32 cfg_size = offset + len;
524 	struct vhost_user_msg *msg;
525 	size_t payload_size = sizeof(msg->payload.config) + cfg_size;
526 	size_t msg_size = sizeof(msg->header) + payload_size;
527 	int rc;
528 
529 	if (!(vu_dev->protocol_features &
530 	      BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
531 		return;
532 
533 	msg = kzalloc(msg_size, GFP_KERNEL);
534 	if (!msg)
535 		return;
536 	msg->header.request = VHOST_USER_GET_CONFIG;
537 	msg->header.size = payload_size;
538 	msg->payload.config.offset = 0;
539 	msg->payload.config.size = cfg_size;
540 
541 	rc = vhost_user_send(vu_dev, true, msg, NULL, 0);
542 	if (rc) {
543 		vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n",
544 		       rc);
545 		goto free;
546 	}
547 
548 	rc = vhost_user_recv_resp(vu_dev, msg, msg_size);
549 	if (rc) {
550 		vu_err(vu_dev,
551 		       "receiving VHOST_USER_GET_CONFIG response failed: %d\n",
552 		       rc);
553 		goto free;
554 	}
555 
556 	if (msg->header.size != payload_size ||
557 	    msg->payload.config.size != cfg_size) {
558 		rc = -EPROTO;
559 		vu_err(vu_dev,
560 		       "Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n",
561 		       msg->header.size, payload_size,
562 		       msg->payload.config.size, cfg_size);
563 		goto free;
564 	}
565 	memcpy(buf, msg->payload.config.payload + offset, len);
566 
567 free:
568 	kfree(msg);
569 }
570 
571 static void vhost_user_set_config(struct virtio_uml_device *vu_dev,
572 				  u32 offset, const void *buf, u32 len)
573 {
574 	struct vhost_user_msg *msg;
575 	size_t payload_size = sizeof(msg->payload.config) + len;
576 	size_t msg_size = sizeof(msg->header) + payload_size;
577 	int rc;
578 
579 	if (!(vu_dev->protocol_features &
580 	      BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
581 		return;
582 
583 	msg = kzalloc(msg_size, GFP_KERNEL);
584 	if (!msg)
585 		return;
586 	msg->header.request = VHOST_USER_SET_CONFIG;
587 	msg->header.size = payload_size;
588 	msg->payload.config.offset = offset;
589 	msg->payload.config.size = len;
590 	memcpy(msg->payload.config.payload, buf, len);
591 
592 	rc = vhost_user_send(vu_dev, false, msg, NULL, 0);
593 	if (rc)
594 		vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n",
595 		       rc);
596 
597 	kfree(msg);
598 }
599 
600 static int vhost_user_init_mem_region(u64 addr, u64 size, int *fd_out,
601 				      struct vhost_user_mem_region *region_out)
602 {
603 	unsigned long long mem_offset;
604 	int rc = phys_mapping(addr, &mem_offset);
605 
606 	if (WARN(rc < 0, "phys_mapping of 0x%llx returned %d\n", addr, rc))
607 		return -EFAULT;
608 	*fd_out = rc;
609 	region_out->guest_addr = addr;
610 	region_out->user_addr = addr;
611 	region_out->size = size;
612 	region_out->mmap_offset = mem_offset;
613 
614 	/* Ensure mapping is valid for the entire region */
615 	rc = phys_mapping(addr + size - 1, &mem_offset);
616 	if (WARN(rc != *fd_out, "phys_mapping of 0x%llx failed: %d != %d\n",
617 		 addr + size - 1, rc, *fd_out))
618 		return -EFAULT;
619 	return 0;
620 }
621 
622 static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev)
623 {
624 	struct vhost_user_msg msg = {
625 		.header.request = VHOST_USER_SET_MEM_TABLE,
626 		.header.size = sizeof(msg.payload.mem_regions),
627 		.payload.mem_regions.num = 1,
628 	};
629 	unsigned long reserved = uml_reserved - uml_physmem;
630 	int fds[2];
631 	int rc;
632 
633 	/*
634 	 * This is a bit tricky, see also the comment with setup_physmem().
635 	 *
636 	 * Essentially, setup_physmem() uses a file to mmap() our physmem,
637 	 * but the code and data we *already* have is omitted. To us, this
638 	 * is no difference, since they both become part of our address
639 	 * space and memory consumption. To somebody looking in from the
640 	 * outside, however, it is different because the part of our memory
641 	 * consumption that's already part of the binary (code/data) is not
642 	 * mapped from the file, so it's not visible to another mmap from
643 	 * the file descriptor.
644 	 *
645 	 * Thus, don't advertise this space to the vhost-user slave. This
646 	 * means that the slave will likely abort or similar when we give
647 	 * it an address from the hidden range, since it's not marked as
648 	 * a valid address, but at least that way we detect the issue and
649 	 * don't just have the slave read an all-zeroes buffer from the
650 	 * shared memory file, or write something there that we can never
651 	 * see (depending on the direction of the virtqueue traffic.)
652 	 *
653 	 * Since we usually don't want to use .text for virtio buffers,
654 	 * this effectively means that you cannot use
655 	 *  1) global variables, which are in the .bss and not in the shm
656 	 *     file-backed memory
657 	 *  2) the stack in some processes, depending on where they have
658 	 *     their stack (or maybe only no interrupt stack?)
659 	 *
660 	 * The stack is already not typically valid for DMA, so this isn't
661 	 * much of a restriction, but global variables might be encountered.
662 	 *
663 	 * It might be possible to fix it by copying around the data that's
664 	 * between bss_start and where we map the file now, but it's not
665 	 * something that you typically encounter with virtio drivers, so
666 	 * it didn't seem worthwhile.
667 	 */
668 	rc = vhost_user_init_mem_region(reserved, physmem_size - reserved,
669 					&fds[0],
670 					&msg.payload.mem_regions.regions[0]);
671 
672 	if (rc < 0)
673 		return rc;
674 
675 	return vhost_user_send(vu_dev, false, &msg, fds,
676 			       msg.payload.mem_regions.num);
677 }
678 
679 static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev,
680 				      u32 request, u32 index, u32 num)
681 {
682 	struct vhost_user_msg msg = {
683 		.header.request = request,
684 		.header.size = sizeof(msg.payload.vring_state),
685 		.payload.vring_state.index = index,
686 		.payload.vring_state.num = num,
687 	};
688 
689 	return vhost_user_send(vu_dev, false, &msg, NULL, 0);
690 }
691 
692 static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev,
693 				    u32 index, u32 num)
694 {
695 	return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_NUM,
696 					  index, num);
697 }
698 
699 static int vhost_user_set_vring_base(struct virtio_uml_device *vu_dev,
700 				     u32 index, u32 offset)
701 {
702 	return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_BASE,
703 					  index, offset);
704 }
705 
706 static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev,
707 				     u32 index, u64 desc, u64 used, u64 avail,
708 				     u64 log)
709 {
710 	struct vhost_user_msg msg = {
711 		.header.request = VHOST_USER_SET_VRING_ADDR,
712 		.header.size = sizeof(msg.payload.vring_addr),
713 		.payload.vring_addr.index = index,
714 		.payload.vring_addr.desc = desc,
715 		.payload.vring_addr.used = used,
716 		.payload.vring_addr.avail = avail,
717 		.payload.vring_addr.log = log,
718 	};
719 
720 	return vhost_user_send(vu_dev, false, &msg, NULL, 0);
721 }
722 
723 static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev,
724 				   u32 request, int index, int fd)
725 {
726 	struct vhost_user_msg msg = {
727 		.header.request = request,
728 		.header.size = sizeof(msg.payload.integer),
729 		.payload.integer = index,
730 	};
731 
732 	if (index & ~VHOST_USER_VRING_INDEX_MASK)
733 		return -EINVAL;
734 	if (fd < 0) {
735 		msg.payload.integer |= VHOST_USER_VRING_POLL_MASK;
736 		return vhost_user_send(vu_dev, false, &msg, NULL, 0);
737 	}
738 	return vhost_user_send(vu_dev, false, &msg, &fd, 1);
739 }
740 
741 static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev,
742 				     int index, int fd)
743 {
744 	return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_CALL,
745 				       index, fd);
746 }
747 
748 static int vhost_user_set_vring_kick(struct virtio_uml_device *vu_dev,
749 				     int index, int fd)
750 {
751 	return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_KICK,
752 				       index, fd);
753 }
754 
755 static int vhost_user_set_vring_enable(struct virtio_uml_device *vu_dev,
756 				       u32 index, bool enable)
757 {
758 	if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)))
759 		return 0;
760 
761 	return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_ENABLE,
762 					  index, enable);
763 }
764 
765 
766 /* Virtio interface */
767 
768 static bool vu_notify(struct virtqueue *vq)
769 {
770 	struct virtio_uml_vq_info *info = vq->priv;
771 	const uint64_t n = 1;
772 	int rc;
773 
774 	if (info->suspended)
775 		return true;
776 
777 	time_travel_propagate_time();
778 
779 	if (info->kick_fd < 0) {
780 		struct virtio_uml_device *vu_dev;
781 
782 		vu_dev = to_virtio_uml_device(vq->vdev);
783 
784 		return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK,
785 						  vq->index, 0) == 0;
786 	}
787 
788 	do {
789 		rc = os_write_file(info->kick_fd, &n, sizeof(n));
790 	} while (rc == -EINTR);
791 	return !WARN(rc != sizeof(n), "write returned %d\n", rc);
792 }
793 
794 static irqreturn_t vu_interrupt(int irq, void *opaque)
795 {
796 	struct virtqueue *vq = opaque;
797 	struct virtio_uml_vq_info *info = vq->priv;
798 	uint64_t n;
799 	int rc;
800 	irqreturn_t ret = IRQ_NONE;
801 
802 	do {
803 		rc = os_read_file(info->call_fd, &n, sizeof(n));
804 		if (rc == sizeof(n))
805 			ret |= vring_interrupt(irq, vq);
806 	} while (rc == sizeof(n) || rc == -EINTR);
807 	WARN(rc != -EAGAIN, "read returned %d\n", rc);
808 	return ret;
809 }
810 
811 
812 static void vu_get(struct virtio_device *vdev, unsigned offset,
813 		   void *buf, unsigned len)
814 {
815 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
816 
817 	vhost_user_get_config(vu_dev, offset, buf, len);
818 }
819 
820 static void vu_set(struct virtio_device *vdev, unsigned offset,
821 		   const void *buf, unsigned len)
822 {
823 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
824 
825 	vhost_user_set_config(vu_dev, offset, buf, len);
826 }
827 
828 static u8 vu_get_status(struct virtio_device *vdev)
829 {
830 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
831 
832 	return vu_dev->status;
833 }
834 
835 static void vu_set_status(struct virtio_device *vdev, u8 status)
836 {
837 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
838 
839 	vu_dev->status = status;
840 }
841 
842 static void vu_reset(struct virtio_device *vdev)
843 {
844 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
845 
846 	vu_dev->status = 0;
847 }
848 
849 static void vu_del_vq(struct virtqueue *vq)
850 {
851 	struct virtio_uml_vq_info *info = vq->priv;
852 
853 	if (info->call_fd >= 0) {
854 		struct virtio_uml_device *vu_dev;
855 
856 		vu_dev = to_virtio_uml_device(vq->vdev);
857 
858 		um_free_irq(vu_dev->irq, vq);
859 		os_close_file(info->call_fd);
860 	}
861 
862 	if (info->kick_fd >= 0)
863 		os_close_file(info->kick_fd);
864 
865 	vring_del_virtqueue(vq);
866 	kfree(info);
867 }
868 
869 static void vu_del_vqs(struct virtio_device *vdev)
870 {
871 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
872 	struct virtqueue *vq, *n;
873 	u64 features;
874 
875 	/* Note: reverse order as a workaround to a decoding bug in snabb */
876 	list_for_each_entry_reverse(vq, &vdev->vqs, list)
877 		WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false));
878 
879 	/* Ensure previous messages have been processed */
880 	WARN_ON(vhost_user_get_features(vu_dev, &features));
881 
882 	list_for_each_entry_safe(vq, n, &vdev->vqs, list)
883 		vu_del_vq(vq);
884 }
885 
886 static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev,
887 			       struct virtqueue *vq)
888 {
889 	struct virtio_uml_vq_info *info = vq->priv;
890 	int call_fds[2];
891 	int rc;
892 
893 	/* no call FD needed/desired in this case */
894 	if (vu_dev->protocol_features &
895 			BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
896 	    vu_dev->protocol_features &
897 			BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
898 		info->call_fd = -1;
899 		return 0;
900 	}
901 
902 	/* Use a pipe for call fd, since SIGIO is not supported for eventfd */
903 	rc = os_pipe(call_fds, true, true);
904 	if (rc < 0)
905 		return rc;
906 
907 	info->call_fd = call_fds[0];
908 	rc = um_request_irq(vu_dev->irq, info->call_fd, IRQ_READ,
909 			    vu_interrupt, IRQF_SHARED, info->name, vq);
910 	if (rc < 0)
911 		goto close_both;
912 
913 	rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]);
914 	if (rc)
915 		goto release_irq;
916 
917 	goto out;
918 
919 release_irq:
920 	um_free_irq(vu_dev->irq, vq);
921 close_both:
922 	os_close_file(call_fds[0]);
923 out:
924 	/* Close (unused) write end of call fds */
925 	os_close_file(call_fds[1]);
926 
927 	return rc;
928 }
929 
930 static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
931 				     unsigned index, vq_callback_t *callback,
932 				     const char *name, bool ctx)
933 {
934 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
935 	struct platform_device *pdev = vu_dev->pdev;
936 	struct virtio_uml_vq_info *info;
937 	struct virtqueue *vq;
938 	int num = MAX_SUPPORTED_QUEUE_SIZE;
939 	int rc;
940 
941 	info = kzalloc(sizeof(*info), GFP_KERNEL);
942 	if (!info) {
943 		rc = -ENOMEM;
944 		goto error_kzalloc;
945 	}
946 	snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
947 		 pdev->id, name);
948 
949 	vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
950 				    ctx, vu_notify, callback, info->name);
951 	if (!vq) {
952 		rc = -ENOMEM;
953 		goto error_create;
954 	}
955 	vq->priv = info;
956 	vq->num_max = num;
957 	num = virtqueue_get_vring_size(vq);
958 
959 	if (vu_dev->protocol_features &
960 			BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) {
961 		info->kick_fd = -1;
962 	} else {
963 		rc = os_eventfd(0, 0);
964 		if (rc < 0)
965 			goto error_kick;
966 		info->kick_fd = rc;
967 	}
968 
969 	rc = vu_setup_vq_call_fd(vu_dev, vq);
970 	if (rc)
971 		goto error_call;
972 
973 	rc = vhost_user_set_vring_num(vu_dev, index, num);
974 	if (rc)
975 		goto error_setup;
976 
977 	rc = vhost_user_set_vring_base(vu_dev, index, 0);
978 	if (rc)
979 		goto error_setup;
980 
981 	rc = vhost_user_set_vring_addr(vu_dev, index,
982 				       virtqueue_get_desc_addr(vq),
983 				       virtqueue_get_used_addr(vq),
984 				       virtqueue_get_avail_addr(vq),
985 				       (u64) -1);
986 	if (rc)
987 		goto error_setup;
988 
989 	return vq;
990 
991 error_setup:
992 	if (info->call_fd >= 0) {
993 		um_free_irq(vu_dev->irq, vq);
994 		os_close_file(info->call_fd);
995 	}
996 error_call:
997 	if (info->kick_fd >= 0)
998 		os_close_file(info->kick_fd);
999 error_kick:
1000 	vring_del_virtqueue(vq);
1001 error_create:
1002 	kfree(info);
1003 error_kzalloc:
1004 	return ERR_PTR(rc);
1005 }
1006 
1007 static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
1008 		       struct virtqueue *vqs[],
1009 		       struct virtqueue_info vqs_info[],
1010 		       struct irq_affinity *desc)
1011 {
1012 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1013 	int i, queue_idx = 0, rc;
1014 	struct virtqueue *vq;
1015 
1016 	/* not supported for now */
1017 	if (WARN_ON(nvqs > 64))
1018 		return -EINVAL;
1019 
1020 	rc = vhost_user_set_mem_table(vu_dev);
1021 	if (rc)
1022 		return rc;
1023 
1024 	for (i = 0; i < nvqs; ++i) {
1025 		struct virtqueue_info *vqi = &vqs_info[i];
1026 
1027 		if (!vqi->name) {
1028 			vqs[i] = NULL;
1029 			continue;
1030 		}
1031 
1032 		vqs[i] = vu_setup_vq(vdev, queue_idx++, vqi->callback,
1033 				     vqi->name, vqi->ctx);
1034 		if (IS_ERR(vqs[i])) {
1035 			rc = PTR_ERR(vqs[i]);
1036 			goto error_setup;
1037 		}
1038 	}
1039 
1040 	list_for_each_entry(vq, &vdev->vqs, list) {
1041 		struct virtio_uml_vq_info *info = vq->priv;
1042 
1043 		if (info->kick_fd >= 0) {
1044 			rc = vhost_user_set_vring_kick(vu_dev, vq->index,
1045 						       info->kick_fd);
1046 			if (rc)
1047 				goto error_setup;
1048 		}
1049 
1050 		rc = vhost_user_set_vring_enable(vu_dev, vq->index, true);
1051 		if (rc)
1052 			goto error_setup;
1053 	}
1054 
1055 	return 0;
1056 
1057 error_setup:
1058 	vu_del_vqs(vdev);
1059 	return rc;
1060 }
1061 
1062 static u64 vu_get_features(struct virtio_device *vdev)
1063 {
1064 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1065 
1066 	return vu_dev->features;
1067 }
1068 
1069 static int vu_finalize_features(struct virtio_device *vdev)
1070 {
1071 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1072 	u64 supported = vdev->features & VHOST_USER_SUPPORTED_F;
1073 
1074 	vring_transport_features(vdev);
1075 	vu_dev->features = vdev->features | supported;
1076 
1077 	return vhost_user_set_features(vu_dev, vu_dev->features);
1078 }
1079 
1080 static const char *vu_bus_name(struct virtio_device *vdev)
1081 {
1082 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1083 
1084 	return vu_dev->pdev->name;
1085 }
1086 
1087 static const struct virtio_config_ops virtio_uml_config_ops = {
1088 	.get = vu_get,
1089 	.set = vu_set,
1090 	.get_status = vu_get_status,
1091 	.set_status = vu_set_status,
1092 	.reset = vu_reset,
1093 	.find_vqs = vu_find_vqs,
1094 	.del_vqs = vu_del_vqs,
1095 	.get_features = vu_get_features,
1096 	.finalize_features = vu_finalize_features,
1097 	.bus_name = vu_bus_name,
1098 };
1099 
1100 static void virtio_uml_release_dev(struct device *d)
1101 {
1102 	struct virtio_device *vdev =
1103 			container_of(d, struct virtio_device, dev);
1104 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1105 
1106 	time_travel_propagate_time();
1107 
1108 	/* might not have been opened due to not negotiating the feature */
1109 	if (vu_dev->req_fd >= 0) {
1110 		um_free_irq(vu_dev->irq, vu_dev);
1111 		os_close_file(vu_dev->req_fd);
1112 	}
1113 
1114 	os_close_file(vu_dev->sock);
1115 	kfree(vu_dev);
1116 }
1117 
1118 void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
1119 				  bool no_vq_suspend)
1120 {
1121 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1122 
1123 	if (WARN_ON(vdev->config != &virtio_uml_config_ops))
1124 		return;
1125 
1126 	vu_dev->no_vq_suspend = no_vq_suspend;
1127 	dev_info(&vdev->dev, "%sabled VQ suspend\n",
1128 		 no_vq_suspend ? "dis" : "en");
1129 }
1130 
1131 static void vu_of_conn_broken(struct work_struct *wk)
1132 {
1133 	struct virtio_uml_platform_data *pdata;
1134 	struct virtio_uml_device *vu_dev;
1135 
1136 	pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
1137 
1138 	vu_dev = platform_get_drvdata(pdata->pdev);
1139 
1140 	virtio_break_device(&vu_dev->vdev);
1141 
1142 	/*
1143 	 * We can't remove the device from the devicetree so the only thing we
1144 	 * can do is warn.
1145 	 */
1146 	WARN_ON(1);
1147 }
1148 
1149 /* Platform device */
1150 
1151 static struct virtio_uml_platform_data *
1152 virtio_uml_create_pdata(struct platform_device *pdev)
1153 {
1154 	struct device_node *np = pdev->dev.of_node;
1155 	struct virtio_uml_platform_data *pdata;
1156 	int ret;
1157 
1158 	if (!np)
1159 		return ERR_PTR(-EINVAL);
1160 
1161 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1162 	if (!pdata)
1163 		return ERR_PTR(-ENOMEM);
1164 
1165 	INIT_WORK(&pdata->conn_broken_wk, vu_of_conn_broken);
1166 	pdata->pdev = pdev;
1167 
1168 	ret = of_property_read_string(np, "socket-path", &pdata->socket_path);
1169 	if (ret)
1170 		return ERR_PTR(ret);
1171 
1172 	ret = of_property_read_u32(np, "virtio-device-id",
1173 				   &pdata->virtio_device_id);
1174 	if (ret)
1175 		return ERR_PTR(ret);
1176 
1177 	return pdata;
1178 }
1179 
1180 static int virtio_uml_probe(struct platform_device *pdev)
1181 {
1182 	struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1183 	struct virtio_uml_device *vu_dev;
1184 	int rc;
1185 
1186 	if (!pdata) {
1187 		pdata = virtio_uml_create_pdata(pdev);
1188 		if (IS_ERR(pdata))
1189 			return PTR_ERR(pdata);
1190 	}
1191 
1192 	vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
1193 	if (!vu_dev)
1194 		return -ENOMEM;
1195 
1196 	vu_dev->pdata = pdata;
1197 	vu_dev->vdev.dev.parent = &pdev->dev;
1198 	vu_dev->vdev.dev.release = virtio_uml_release_dev;
1199 	vu_dev->vdev.config = &virtio_uml_config_ops;
1200 	vu_dev->vdev.id.device = pdata->virtio_device_id;
1201 	vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID;
1202 	vu_dev->pdev = pdev;
1203 	vu_dev->req_fd = -1;
1204 
1205 	time_travel_propagate_time();
1206 
1207 	do {
1208 		rc = os_connect_socket(pdata->socket_path);
1209 	} while (rc == -EINTR);
1210 	if (rc < 0)
1211 		goto error_free;
1212 	vu_dev->sock = rc;
1213 
1214 	spin_lock_init(&vu_dev->sock_lock);
1215 
1216 	rc = vhost_user_init(vu_dev);
1217 	if (rc)
1218 		goto error_init;
1219 
1220 	platform_set_drvdata(pdev, vu_dev);
1221 
1222 	device_set_wakeup_capable(&vu_dev->vdev.dev, true);
1223 
1224 	rc = register_virtio_device(&vu_dev->vdev);
1225 	if (rc)
1226 		put_device(&vu_dev->vdev.dev);
1227 	vu_dev->registered = 1;
1228 	return rc;
1229 
1230 error_init:
1231 	os_close_file(vu_dev->sock);
1232 error_free:
1233 	kfree(vu_dev);
1234 	return rc;
1235 }
1236 
1237 static void virtio_uml_remove(struct platform_device *pdev)
1238 {
1239 	struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1240 
1241 	unregister_virtio_device(&vu_dev->vdev);
1242 }
1243 
1244 /* Command line device list */
1245 
1246 static void vu_cmdline_release_dev(struct device *d)
1247 {
1248 }
1249 
1250 static struct device vu_cmdline_parent = {
1251 	.init_name = "virtio-uml-cmdline",
1252 	.release = vu_cmdline_release_dev,
1253 };
1254 
1255 static bool vu_cmdline_parent_registered;
1256 static int vu_cmdline_id;
1257 
1258 static int vu_unregister_cmdline_device(struct device *dev, void *data)
1259 {
1260 	struct platform_device *pdev = to_platform_device(dev);
1261 	struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1262 
1263 	kfree(pdata->socket_path);
1264 	platform_device_unregister(pdev);
1265 	return 0;
1266 }
1267 
1268 static void vu_conn_broken(struct work_struct *wk)
1269 {
1270 	struct virtio_uml_platform_data *pdata;
1271 	struct virtio_uml_device *vu_dev;
1272 
1273 	pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
1274 
1275 	vu_dev = platform_get_drvdata(pdata->pdev);
1276 
1277 	virtio_break_device(&vu_dev->vdev);
1278 
1279 	vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
1280 }
1281 
1282 static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
1283 {
1284 	const char *ids = strchr(device, ':');
1285 	unsigned int virtio_device_id;
1286 	int processed, consumed, err;
1287 	char *socket_path;
1288 	struct virtio_uml_platform_data pdata, *ppdata;
1289 	struct platform_device *pdev;
1290 
1291 	if (!ids || ids == device)
1292 		return -EINVAL;
1293 
1294 	processed = sscanf(ids, ":%u%n:%d%n",
1295 			   &virtio_device_id, &consumed,
1296 			   &vu_cmdline_id, &consumed);
1297 
1298 	if (processed < 1 || ids[consumed])
1299 		return -EINVAL;
1300 
1301 	if (!vu_cmdline_parent_registered) {
1302 		err = device_register(&vu_cmdline_parent);
1303 		if (err) {
1304 			pr_err("Failed to register parent device!\n");
1305 			put_device(&vu_cmdline_parent);
1306 			return err;
1307 		}
1308 		vu_cmdline_parent_registered = true;
1309 	}
1310 
1311 	socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL);
1312 	if (!socket_path)
1313 		return -ENOMEM;
1314 
1315 	pdata.virtio_device_id = (u32) virtio_device_id;
1316 	pdata.socket_path = socket_path;
1317 
1318 	pr_info("Registering device virtio-uml.%d id=%d at %s\n",
1319 		vu_cmdline_id, virtio_device_id, socket_path);
1320 
1321 	pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml",
1322 					     vu_cmdline_id++, &pdata,
1323 					     sizeof(pdata));
1324 	err = PTR_ERR_OR_ZERO(pdev);
1325 	if (err)
1326 		goto free;
1327 
1328 	ppdata = pdev->dev.platform_data;
1329 	ppdata->pdev = pdev;
1330 	INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken);
1331 
1332 	return 0;
1333 
1334 free:
1335 	kfree(socket_path);
1336 	return err;
1337 }
1338 
1339 static int vu_cmdline_get_device(struct device *dev, void *data)
1340 {
1341 	struct platform_device *pdev = to_platform_device(dev);
1342 	struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1343 	char *buffer = data;
1344 	unsigned int len = strlen(buffer);
1345 
1346 	snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n",
1347 		 pdata->socket_path, pdata->virtio_device_id, pdev->id);
1348 	return 0;
1349 }
1350 
1351 static int vu_cmdline_get(char *buffer, const struct kernel_param *kp)
1352 {
1353 	buffer[0] = '\0';
1354 	if (vu_cmdline_parent_registered)
1355 		device_for_each_child(&vu_cmdline_parent, buffer,
1356 				      vu_cmdline_get_device);
1357 	return strlen(buffer) + 1;
1358 }
1359 
1360 static const struct kernel_param_ops vu_cmdline_param_ops = {
1361 	.set = vu_cmdline_set,
1362 	.get = vu_cmdline_get,
1363 };
1364 
1365 device_param_cb(device, &vu_cmdline_param_ops, NULL, S_IRUSR);
1366 __uml_help(vu_cmdline_param_ops,
1367 "virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n"
1368 "    Configure a virtio device over a vhost-user socket.\n"
1369 "    See virtio_ids.h for a list of possible virtio device id values.\n"
1370 "    Optionally use a specific platform_device id.\n\n"
1371 );
1372 
1373 
1374 static void vu_unregister_cmdline_devices(void)
1375 {
1376 	if (vu_cmdline_parent_registered) {
1377 		device_for_each_child(&vu_cmdline_parent, NULL,
1378 				      vu_unregister_cmdline_device);
1379 		device_unregister(&vu_cmdline_parent);
1380 		vu_cmdline_parent_registered = false;
1381 	}
1382 }
1383 
1384 /* Platform driver */
1385 
1386 static const struct of_device_id virtio_uml_match[] = {
1387 	{ .compatible = "virtio,uml", },
1388 	{ }
1389 };
1390 MODULE_DEVICE_TABLE(of, virtio_uml_match);
1391 
1392 static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state)
1393 {
1394 	struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1395 
1396 	if (!vu_dev->no_vq_suspend) {
1397 		struct virtqueue *vq;
1398 
1399 		virtio_device_for_each_vq((&vu_dev->vdev), vq) {
1400 			struct virtio_uml_vq_info *info = vq->priv;
1401 
1402 			info->suspended = true;
1403 			vhost_user_set_vring_enable(vu_dev, vq->index, false);
1404 		}
1405 	}
1406 
1407 	if (!device_may_wakeup(&vu_dev->vdev.dev)) {
1408 		vu_dev->suspended = true;
1409 		return 0;
1410 	}
1411 
1412 	return irq_set_irq_wake(vu_dev->irq, 1);
1413 }
1414 
1415 static int virtio_uml_resume(struct platform_device *pdev)
1416 {
1417 	struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1418 
1419 	if (!vu_dev->no_vq_suspend) {
1420 		struct virtqueue *vq;
1421 
1422 		virtio_device_for_each_vq((&vu_dev->vdev), vq) {
1423 			struct virtio_uml_vq_info *info = vq->priv;
1424 
1425 			info->suspended = false;
1426 			vhost_user_set_vring_enable(vu_dev, vq->index, true);
1427 		}
1428 	}
1429 
1430 	vu_dev->suspended = false;
1431 
1432 	if (!device_may_wakeup(&vu_dev->vdev.dev))
1433 		return 0;
1434 
1435 	return irq_set_irq_wake(vu_dev->irq, 0);
1436 }
1437 
1438 static struct platform_driver virtio_uml_driver = {
1439 	.probe = virtio_uml_probe,
1440 	.remove_new = virtio_uml_remove,
1441 	.driver = {
1442 		.name = "virtio-uml",
1443 		.of_match_table = virtio_uml_match,
1444 	},
1445 	.suspend = virtio_uml_suspend,
1446 	.resume = virtio_uml_resume,
1447 };
1448 
1449 static int __init virtio_uml_init(void)
1450 {
1451 	return platform_driver_register(&virtio_uml_driver);
1452 }
1453 
1454 static void __exit virtio_uml_exit(void)
1455 {
1456 	platform_driver_unregister(&virtio_uml_driver);
1457 	vu_unregister_cmdline_devices();
1458 }
1459 
1460 module_init(virtio_uml_init);
1461 module_exit(virtio_uml_exit);
1462 __uml_exitcall(virtio_uml_exit);
1463 
1464 MODULE_DESCRIPTION("UML driver for vhost-user virtio devices");
1465 MODULE_LICENSE("GPL");
1466