xref: /linux/arch/um/drivers/virtio_uml.c (revision 0750b8fcf313845b21c71344b4bea8ad7d3cee84)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Virtio vhost-user driver
4  *
5  * Copyright(c) 2019 Intel Corporation
6  *
7  * This driver allows virtio devices to be used over a vhost-user socket.
8  *
9  * Guest devices can be instantiated by kernel module or command line
10  * parameters. One device will be created for each parameter. Syntax:
11  *
12  *		virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
13  * where:
14  *		<socket>	:= vhost-user socket path to connect
15  *		<virtio_id>	:= virtio device id (as in virtio_ids.h)
16  *		<platform_id>	:= (optional) platform device id
17  *
18  * example:
19  *		virtio_uml.device=/var/uml.socket:1
20  *
21  * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
22  */
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/virtio.h>
28 #include <linux/virtio_config.h>
29 #include <linux/virtio_ring.h>
30 #include <linux/time-internal.h>
31 #include <linux/virtio-uml.h>
32 #include <shared/as-layout.h>
33 #include <irq_kern.h>
34 #include <init.h>
35 #include <os.h>
36 #include "vhost_user.h"
37 
38 #define MAX_SUPPORTED_QUEUE_SIZE	256
39 
40 #define to_virtio_uml_device(_vdev) \
41 	container_of(_vdev, struct virtio_uml_device, vdev)
42 
43 struct virtio_uml_platform_data {
44 	u32 virtio_device_id;
45 	const char *socket_path;
46 	struct work_struct conn_broken_wk;
47 	struct platform_device *pdev;
48 };
49 
50 struct virtio_uml_device {
51 	struct virtio_device vdev;
52 	struct platform_device *pdev;
53 	struct virtio_uml_platform_data *pdata;
54 
55 	spinlock_t sock_lock;
56 	int sock, req_fd, irq;
57 	u64 features;
58 	u64 protocol_features;
59 	u8 status;
60 	u8 registered:1;
61 	u8 suspended:1;
62 	u8 no_vq_suspend:1;
63 
64 	u8 config_changed_irq:1;
65 	uint64_t vq_irq_vq_map;
66 };
67 
68 struct virtio_uml_vq_info {
69 	int kick_fd, call_fd;
70 	char name[32];
71 	bool suspended;
72 };
73 
74 extern unsigned long long physmem_size, highmem;
75 
76 #define vu_err(vu_dev, ...)	dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
77 
78 /* Vhost-user protocol */
79 
80 static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
81 			    const int *fds, unsigned int fds_num)
82 {
83 	int rc;
84 
85 	do {
86 		rc = os_sendmsg_fds(fd, buf, len, fds, fds_num);
87 		if (rc > 0) {
88 			buf += rc;
89 			len -= rc;
90 			fds = NULL;
91 			fds_num = 0;
92 		}
93 	} while (len && (rc >= 0 || rc == -EINTR));
94 
95 	if (rc < 0)
96 		return rc;
97 	return 0;
98 }
99 
100 static int full_read(int fd, void *buf, int len, bool abortable)
101 {
102 	int rc;
103 
104 	if (!len)
105 		return 0;
106 
107 	do {
108 		rc = os_read_file(fd, buf, len);
109 		if (rc > 0) {
110 			buf += rc;
111 			len -= rc;
112 		}
113 	} while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
114 
115 	if (rc < 0)
116 		return rc;
117 	if (rc == 0)
118 		return -ECONNRESET;
119 	return 0;
120 }
121 
122 static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
123 {
124 	return full_read(fd, msg, sizeof(msg->header), true);
125 }
126 
127 static int vhost_user_recv(struct virtio_uml_device *vu_dev,
128 			   int fd, struct vhost_user_msg *msg,
129 			   size_t max_payload_size, bool wait)
130 {
131 	size_t size;
132 	int rc;
133 
134 	/*
135 	 * In virtio time-travel mode, we're handling all the vhost-user
136 	 * FDs by polling them whenever appropriate. However, we may get
137 	 * into a situation where we're sending out an interrupt message
138 	 * to a device (e.g. a net device) and need to handle a simulation
139 	 * time message while doing so, e.g. one that tells us to update
140 	 * our idea of how long we can run without scheduling.
141 	 *
142 	 * Thus, we need to not just read() from the given fd, but need
143 	 * to also handle messages for the simulation time - this function
144 	 * does that for us while waiting for the given fd to be readable.
145 	 */
146 	if (wait)
147 		time_travel_wait_readable(fd);
148 
149 	rc = vhost_user_recv_header(fd, msg);
150 
151 	if (rc == -ECONNRESET && vu_dev->registered) {
152 		struct virtio_uml_platform_data *pdata;
153 
154 		pdata = vu_dev->pdata;
155 
156 		virtio_break_device(&vu_dev->vdev);
157 		schedule_work(&pdata->conn_broken_wk);
158 	}
159 	if (rc)
160 		return rc;
161 	size = msg->header.size;
162 	if (size > max_payload_size)
163 		return -EPROTO;
164 	return full_read(fd, &msg->payload, size, false);
165 }
166 
167 static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
168 				struct vhost_user_msg *msg,
169 				size_t max_payload_size)
170 {
171 	int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
172 				 max_payload_size, true);
173 
174 	if (rc)
175 		return rc;
176 
177 	if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
178 		return -EPROTO;
179 
180 	return 0;
181 }
182 
183 static int vhost_user_recv_u64(struct virtio_uml_device *vu_dev,
184 			       u64 *value)
185 {
186 	struct vhost_user_msg msg;
187 	int rc = vhost_user_recv_resp(vu_dev, &msg,
188 				      sizeof(msg.payload.integer));
189 
190 	if (rc)
191 		return rc;
192 	if (msg.header.size != sizeof(msg.payload.integer))
193 		return -EPROTO;
194 	*value = msg.payload.integer;
195 	return 0;
196 }
197 
198 static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
199 			       struct vhost_user_msg *msg,
200 			       size_t max_payload_size)
201 {
202 	int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg,
203 				 max_payload_size, false);
204 
205 	if (rc)
206 		return rc;
207 
208 	if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) !=
209 			VHOST_USER_VERSION)
210 		return -EPROTO;
211 
212 	return 0;
213 }
214 
215 static int vhost_user_send(struct virtio_uml_device *vu_dev,
216 			   bool need_response, struct vhost_user_msg *msg,
217 			   int *fds, size_t num_fds)
218 {
219 	size_t size = sizeof(msg->header) + msg->header.size;
220 	unsigned long flags;
221 	bool request_ack;
222 	int rc;
223 
224 	msg->header.flags |= VHOST_USER_VERSION;
225 
226 	/*
227 	 * The need_response flag indicates that we already need a response,
228 	 * e.g. to read the features. In these cases, don't request an ACK as
229 	 * it is meaningless. Also request an ACK only if supported.
230 	 */
231 	request_ack = !need_response;
232 	if (!(vu_dev->protocol_features &
233 			BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK)))
234 		request_ack = false;
235 
236 	if (request_ack)
237 		msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
238 
239 	spin_lock_irqsave(&vu_dev->sock_lock, flags);
240 	rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
241 	if (rc < 0)
242 		goto out;
243 
244 	if (request_ack) {
245 		uint64_t status;
246 
247 		rc = vhost_user_recv_u64(vu_dev, &status);
248 		if (rc)
249 			goto out;
250 
251 		if (status) {
252 			vu_err(vu_dev, "slave reports error: %llu\n", status);
253 			rc = -EIO;
254 			goto out;
255 		}
256 	}
257 
258 out:
259 	spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
260 	return rc;
261 }
262 
263 static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev,
264 				      bool need_response, u32 request)
265 {
266 	struct vhost_user_msg msg = {
267 		.header.request = request,
268 	};
269 
270 	return vhost_user_send(vu_dev, need_response, &msg, NULL, 0);
271 }
272 
273 static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev,
274 					 u32 request, int fd)
275 {
276 	struct vhost_user_msg msg = {
277 		.header.request = request,
278 	};
279 
280 	return vhost_user_send(vu_dev, false, &msg, &fd, 1);
281 }
282 
283 static int vhost_user_send_u64(struct virtio_uml_device *vu_dev,
284 			       u32 request, u64 value)
285 {
286 	struct vhost_user_msg msg = {
287 		.header.request = request,
288 		.header.size = sizeof(msg.payload.integer),
289 		.payload.integer = value,
290 	};
291 
292 	return vhost_user_send(vu_dev, false, &msg, NULL, 0);
293 }
294 
295 static int vhost_user_set_owner(struct virtio_uml_device *vu_dev)
296 {
297 	return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER);
298 }
299 
300 static int vhost_user_get_features(struct virtio_uml_device *vu_dev,
301 				   u64 *features)
302 {
303 	int rc = vhost_user_send_no_payload(vu_dev, true,
304 					    VHOST_USER_GET_FEATURES);
305 
306 	if (rc)
307 		return rc;
308 	return vhost_user_recv_u64(vu_dev, features);
309 }
310 
311 static int vhost_user_set_features(struct virtio_uml_device *vu_dev,
312 				   u64 features)
313 {
314 	return vhost_user_send_u64(vu_dev, VHOST_USER_SET_FEATURES, features);
315 }
316 
317 static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev,
318 					    u64 *protocol_features)
319 {
320 	int rc = vhost_user_send_no_payload(vu_dev, true,
321 			VHOST_USER_GET_PROTOCOL_FEATURES);
322 
323 	if (rc)
324 		return rc;
325 	return vhost_user_recv_u64(vu_dev, protocol_features);
326 }
327 
328 static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev,
329 					    u64 protocol_features)
330 {
331 	return vhost_user_send_u64(vu_dev, VHOST_USER_SET_PROTOCOL_FEATURES,
332 				   protocol_features);
333 }
334 
335 static void vhost_user_reply(struct virtio_uml_device *vu_dev,
336 			     struct vhost_user_msg *msg, int response)
337 {
338 	struct vhost_user_msg reply = {
339 		.payload.integer = response,
340 	};
341 	size_t size = sizeof(reply.header) + sizeof(reply.payload.integer);
342 	int rc;
343 
344 	reply.header = msg->header;
345 	reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY;
346 	reply.header.flags |= VHOST_USER_FLAG_REPLY;
347 	reply.header.size = sizeof(reply.payload.integer);
348 
349 	rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0);
350 
351 	if (rc)
352 		vu_err(vu_dev,
353 		       "sending reply to slave request failed: %d (size %zu)\n",
354 		       rc, size);
355 }
356 
357 static irqreturn_t vu_req_read_message(struct virtio_uml_device *vu_dev,
358 				       struct time_travel_event *ev)
359 {
360 	struct virtqueue *vq;
361 	int response = 1;
362 	struct {
363 		struct vhost_user_msg msg;
364 		u8 extra_payload[512];
365 	} msg;
366 	int rc;
367 
368 	rc = vhost_user_recv_req(vu_dev, &msg.msg,
369 				 sizeof(msg.msg.payload) +
370 				 sizeof(msg.extra_payload));
371 
372 	if (rc)
373 		return IRQ_NONE;
374 
375 	switch (msg.msg.header.request) {
376 	case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
377 		vu_dev->config_changed_irq = true;
378 		response = 0;
379 		break;
380 	case VHOST_USER_SLAVE_VRING_CALL:
381 		virtio_device_for_each_vq((&vu_dev->vdev), vq) {
382 			if (vq->index == msg.msg.payload.vring_state.index) {
383 				response = 0;
384 				vu_dev->vq_irq_vq_map |= BIT_ULL(vq->index);
385 				break;
386 			}
387 		}
388 		break;
389 	case VHOST_USER_SLAVE_IOTLB_MSG:
390 		/* not supported - VIRTIO_F_ACCESS_PLATFORM */
391 	case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
392 		/* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
393 	default:
394 		vu_err(vu_dev, "unexpected slave request %d\n",
395 		       msg.msg.header.request);
396 	}
397 
398 	if (ev && !vu_dev->suspended)
399 		time_travel_add_irq_event(ev);
400 
401 	if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
402 		vhost_user_reply(vu_dev, &msg.msg, response);
403 
404 	return IRQ_HANDLED;
405 }
406 
407 static irqreturn_t vu_req_interrupt(int irq, void *data)
408 {
409 	struct virtio_uml_device *vu_dev = data;
410 	irqreturn_t ret = IRQ_HANDLED;
411 
412 	if (!um_irq_timetravel_handler_used())
413 		ret = vu_req_read_message(vu_dev, NULL);
414 
415 	if (vu_dev->vq_irq_vq_map) {
416 		struct virtqueue *vq;
417 
418 		virtio_device_for_each_vq((&vu_dev->vdev), vq) {
419 			if (vu_dev->vq_irq_vq_map & BIT_ULL(vq->index))
420 				vring_interrupt(0 /* ignored */, vq);
421 		}
422 		vu_dev->vq_irq_vq_map = 0;
423 	} else if (vu_dev->config_changed_irq) {
424 		virtio_config_changed(&vu_dev->vdev);
425 		vu_dev->config_changed_irq = false;
426 	}
427 
428 	return ret;
429 }
430 
431 static void vu_req_interrupt_comm_handler(int irq, int fd, void *data,
432 					  struct time_travel_event *ev)
433 {
434 	vu_req_read_message(data, ev);
435 }
436 
437 static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev)
438 {
439 	int rc, req_fds[2];
440 
441 	/* Use a pipe for slave req fd, SIGIO is not supported for eventfd */
442 	rc = os_pipe(req_fds, true, true);
443 	if (rc < 0)
444 		return rc;
445 	vu_dev->req_fd = req_fds[0];
446 
447 	rc = um_request_irq_tt(UM_IRQ_ALLOC, vu_dev->req_fd, IRQ_READ,
448 			       vu_req_interrupt, IRQF_SHARED,
449 			       vu_dev->pdev->name, vu_dev,
450 			       vu_req_interrupt_comm_handler);
451 	if (rc < 0)
452 		goto err_close;
453 
454 	vu_dev->irq = rc;
455 
456 	rc = vhost_user_send_no_payload_fd(vu_dev, VHOST_USER_SET_SLAVE_REQ_FD,
457 					   req_fds[1]);
458 	if (rc)
459 		goto err_free_irq;
460 
461 	goto out;
462 
463 err_free_irq:
464 	um_free_irq(vu_dev->irq, vu_dev);
465 err_close:
466 	os_close_file(req_fds[0]);
467 out:
468 	/* Close unused write end of request fds */
469 	os_close_file(req_fds[1]);
470 	return rc;
471 }
472 
473 static int vhost_user_init(struct virtio_uml_device *vu_dev)
474 {
475 	int rc = vhost_user_set_owner(vu_dev);
476 
477 	if (rc)
478 		return rc;
479 	rc = vhost_user_get_features(vu_dev, &vu_dev->features);
480 	if (rc)
481 		return rc;
482 
483 	if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) {
484 		rc = vhost_user_get_protocol_features(vu_dev,
485 				&vu_dev->protocol_features);
486 		if (rc)
487 			return rc;
488 		vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F;
489 		rc = vhost_user_set_protocol_features(vu_dev,
490 				vu_dev->protocol_features);
491 		if (rc)
492 			return rc;
493 	}
494 
495 	if (vu_dev->protocol_features &
496 			BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
497 		rc = vhost_user_init_slave_req(vu_dev);
498 		if (rc)
499 			return rc;
500 	}
501 
502 	return 0;
503 }
504 
505 static void vhost_user_get_config(struct virtio_uml_device *vu_dev,
506 				  u32 offset, void *buf, u32 len)
507 {
508 	u32 cfg_size = offset + len;
509 	struct vhost_user_msg *msg;
510 	size_t payload_size = sizeof(msg->payload.config) + cfg_size;
511 	size_t msg_size = sizeof(msg->header) + payload_size;
512 	int rc;
513 
514 	if (!(vu_dev->protocol_features &
515 	      BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
516 		return;
517 
518 	msg = kzalloc(msg_size, GFP_KERNEL);
519 	if (!msg)
520 		return;
521 	msg->header.request = VHOST_USER_GET_CONFIG;
522 	msg->header.size = payload_size;
523 	msg->payload.config.offset = 0;
524 	msg->payload.config.size = cfg_size;
525 
526 	rc = vhost_user_send(vu_dev, true, msg, NULL, 0);
527 	if (rc) {
528 		vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n",
529 		       rc);
530 		goto free;
531 	}
532 
533 	rc = vhost_user_recv_resp(vu_dev, msg, msg_size);
534 	if (rc) {
535 		vu_err(vu_dev,
536 		       "receiving VHOST_USER_GET_CONFIG response failed: %d\n",
537 		       rc);
538 		goto free;
539 	}
540 
541 	if (msg->header.size != payload_size ||
542 	    msg->payload.config.size != cfg_size) {
543 		rc = -EPROTO;
544 		vu_err(vu_dev,
545 		       "Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n",
546 		       msg->header.size, payload_size,
547 		       msg->payload.config.size, cfg_size);
548 		goto free;
549 	}
550 	memcpy(buf, msg->payload.config.payload + offset, len);
551 
552 free:
553 	kfree(msg);
554 }
555 
556 static void vhost_user_set_config(struct virtio_uml_device *vu_dev,
557 				  u32 offset, const void *buf, u32 len)
558 {
559 	struct vhost_user_msg *msg;
560 	size_t payload_size = sizeof(msg->payload.config) + len;
561 	size_t msg_size = sizeof(msg->header) + payload_size;
562 	int rc;
563 
564 	if (!(vu_dev->protocol_features &
565 	      BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
566 		return;
567 
568 	msg = kzalloc(msg_size, GFP_KERNEL);
569 	if (!msg)
570 		return;
571 	msg->header.request = VHOST_USER_SET_CONFIG;
572 	msg->header.size = payload_size;
573 	msg->payload.config.offset = offset;
574 	msg->payload.config.size = len;
575 	memcpy(msg->payload.config.payload, buf, len);
576 
577 	rc = vhost_user_send(vu_dev, false, msg, NULL, 0);
578 	if (rc)
579 		vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n",
580 		       rc);
581 
582 	kfree(msg);
583 }
584 
585 static int vhost_user_init_mem_region(u64 addr, u64 size, int *fd_out,
586 				      struct vhost_user_mem_region *region_out)
587 {
588 	unsigned long long mem_offset;
589 	int rc = phys_mapping(addr, &mem_offset);
590 
591 	if (WARN(rc < 0, "phys_mapping of 0x%llx returned %d\n", addr, rc))
592 		return -EFAULT;
593 	*fd_out = rc;
594 	region_out->guest_addr = addr;
595 	region_out->user_addr = addr;
596 	region_out->size = size;
597 	region_out->mmap_offset = mem_offset;
598 
599 	/* Ensure mapping is valid for the entire region */
600 	rc = phys_mapping(addr + size - 1, &mem_offset);
601 	if (WARN(rc != *fd_out, "phys_mapping of 0x%llx failed: %d != %d\n",
602 		 addr + size - 1, rc, *fd_out))
603 		return -EFAULT;
604 	return 0;
605 }
606 
607 static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev)
608 {
609 	struct vhost_user_msg msg = {
610 		.header.request = VHOST_USER_SET_MEM_TABLE,
611 		.header.size = sizeof(msg.payload.mem_regions),
612 		.payload.mem_regions.num = 1,
613 	};
614 	unsigned long reserved = uml_reserved - uml_physmem;
615 	int fds[2];
616 	int rc;
617 
618 	/*
619 	 * This is a bit tricky, see also the comment with setup_physmem().
620 	 *
621 	 * Essentially, setup_physmem() uses a file to mmap() our physmem,
622 	 * but the code and data we *already* have is omitted. To us, this
623 	 * is no difference, since they both become part of our address
624 	 * space and memory consumption. To somebody looking in from the
625 	 * outside, however, it is different because the part of our memory
626 	 * consumption that's already part of the binary (code/data) is not
627 	 * mapped from the file, so it's not visible to another mmap from
628 	 * the file descriptor.
629 	 *
630 	 * Thus, don't advertise this space to the vhost-user slave. This
631 	 * means that the slave will likely abort or similar when we give
632 	 * it an address from the hidden range, since it's not marked as
633 	 * a valid address, but at least that way we detect the issue and
634 	 * don't just have the slave read an all-zeroes buffer from the
635 	 * shared memory file, or write something there that we can never
636 	 * see (depending on the direction of the virtqueue traffic.)
637 	 *
638 	 * Since we usually don't want to use .text for virtio buffers,
639 	 * this effectively means that you cannot use
640 	 *  1) global variables, which are in the .bss and not in the shm
641 	 *     file-backed memory
642 	 *  2) the stack in some processes, depending on where they have
643 	 *     their stack (or maybe only no interrupt stack?)
644 	 *
645 	 * The stack is already not typically valid for DMA, so this isn't
646 	 * much of a restriction, but global variables might be encountered.
647 	 *
648 	 * It might be possible to fix it by copying around the data that's
649 	 * between bss_start and where we map the file now, but it's not
650 	 * something that you typically encounter with virtio drivers, so
651 	 * it didn't seem worthwhile.
652 	 */
653 	rc = vhost_user_init_mem_region(reserved, physmem_size - reserved,
654 					&fds[0],
655 					&msg.payload.mem_regions.regions[0]);
656 
657 	if (rc < 0)
658 		return rc;
659 	if (highmem) {
660 		msg.payload.mem_regions.num++;
661 		rc = vhost_user_init_mem_region(__pa(end_iomem), highmem,
662 				&fds[1], &msg.payload.mem_regions.regions[1]);
663 		if (rc < 0)
664 			return rc;
665 	}
666 
667 	return vhost_user_send(vu_dev, false, &msg, fds,
668 			       msg.payload.mem_regions.num);
669 }
670 
671 static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev,
672 				      u32 request, u32 index, u32 num)
673 {
674 	struct vhost_user_msg msg = {
675 		.header.request = request,
676 		.header.size = sizeof(msg.payload.vring_state),
677 		.payload.vring_state.index = index,
678 		.payload.vring_state.num = num,
679 	};
680 
681 	return vhost_user_send(vu_dev, false, &msg, NULL, 0);
682 }
683 
684 static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev,
685 				    u32 index, u32 num)
686 {
687 	return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_NUM,
688 					  index, num);
689 }
690 
691 static int vhost_user_set_vring_base(struct virtio_uml_device *vu_dev,
692 				     u32 index, u32 offset)
693 {
694 	return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_BASE,
695 					  index, offset);
696 }
697 
698 static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev,
699 				     u32 index, u64 desc, u64 used, u64 avail,
700 				     u64 log)
701 {
702 	struct vhost_user_msg msg = {
703 		.header.request = VHOST_USER_SET_VRING_ADDR,
704 		.header.size = sizeof(msg.payload.vring_addr),
705 		.payload.vring_addr.index = index,
706 		.payload.vring_addr.desc = desc,
707 		.payload.vring_addr.used = used,
708 		.payload.vring_addr.avail = avail,
709 		.payload.vring_addr.log = log,
710 	};
711 
712 	return vhost_user_send(vu_dev, false, &msg, NULL, 0);
713 }
714 
715 static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev,
716 				   u32 request, int index, int fd)
717 {
718 	struct vhost_user_msg msg = {
719 		.header.request = request,
720 		.header.size = sizeof(msg.payload.integer),
721 		.payload.integer = index,
722 	};
723 
724 	if (index & ~VHOST_USER_VRING_INDEX_MASK)
725 		return -EINVAL;
726 	if (fd < 0) {
727 		msg.payload.integer |= VHOST_USER_VRING_POLL_MASK;
728 		return vhost_user_send(vu_dev, false, &msg, NULL, 0);
729 	}
730 	return vhost_user_send(vu_dev, false, &msg, &fd, 1);
731 }
732 
733 static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev,
734 				     int index, int fd)
735 {
736 	return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_CALL,
737 				       index, fd);
738 }
739 
740 static int vhost_user_set_vring_kick(struct virtio_uml_device *vu_dev,
741 				     int index, int fd)
742 {
743 	return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_KICK,
744 				       index, fd);
745 }
746 
747 static int vhost_user_set_vring_enable(struct virtio_uml_device *vu_dev,
748 				       u32 index, bool enable)
749 {
750 	if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)))
751 		return 0;
752 
753 	return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_ENABLE,
754 					  index, enable);
755 }
756 
757 
758 /* Virtio interface */
759 
760 static bool vu_notify(struct virtqueue *vq)
761 {
762 	struct virtio_uml_vq_info *info = vq->priv;
763 	const uint64_t n = 1;
764 	int rc;
765 
766 	if (info->suspended)
767 		return true;
768 
769 	time_travel_propagate_time();
770 
771 	if (info->kick_fd < 0) {
772 		struct virtio_uml_device *vu_dev;
773 
774 		vu_dev = to_virtio_uml_device(vq->vdev);
775 
776 		return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK,
777 						  vq->index, 0) == 0;
778 	}
779 
780 	do {
781 		rc = os_write_file(info->kick_fd, &n, sizeof(n));
782 	} while (rc == -EINTR);
783 	return !WARN(rc != sizeof(n), "write returned %d\n", rc);
784 }
785 
786 static irqreturn_t vu_interrupt(int irq, void *opaque)
787 {
788 	struct virtqueue *vq = opaque;
789 	struct virtio_uml_vq_info *info = vq->priv;
790 	uint64_t n;
791 	int rc;
792 	irqreturn_t ret = IRQ_NONE;
793 
794 	do {
795 		rc = os_read_file(info->call_fd, &n, sizeof(n));
796 		if (rc == sizeof(n))
797 			ret |= vring_interrupt(irq, vq);
798 	} while (rc == sizeof(n) || rc == -EINTR);
799 	WARN(rc != -EAGAIN, "read returned %d\n", rc);
800 	return ret;
801 }
802 
803 
804 static void vu_get(struct virtio_device *vdev, unsigned offset,
805 		   void *buf, unsigned len)
806 {
807 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
808 
809 	vhost_user_get_config(vu_dev, offset, buf, len);
810 }
811 
812 static void vu_set(struct virtio_device *vdev, unsigned offset,
813 		   const void *buf, unsigned len)
814 {
815 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
816 
817 	vhost_user_set_config(vu_dev, offset, buf, len);
818 }
819 
820 static u8 vu_get_status(struct virtio_device *vdev)
821 {
822 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
823 
824 	return vu_dev->status;
825 }
826 
827 static void vu_set_status(struct virtio_device *vdev, u8 status)
828 {
829 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
830 
831 	vu_dev->status = status;
832 }
833 
834 static void vu_reset(struct virtio_device *vdev)
835 {
836 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
837 
838 	vu_dev->status = 0;
839 }
840 
841 static void vu_del_vq(struct virtqueue *vq)
842 {
843 	struct virtio_uml_vq_info *info = vq->priv;
844 
845 	if (info->call_fd >= 0) {
846 		struct virtio_uml_device *vu_dev;
847 
848 		vu_dev = to_virtio_uml_device(vq->vdev);
849 
850 		um_free_irq(vu_dev->irq, vq);
851 		os_close_file(info->call_fd);
852 	}
853 
854 	if (info->kick_fd >= 0)
855 		os_close_file(info->kick_fd);
856 
857 	vring_del_virtqueue(vq);
858 	kfree(info);
859 }
860 
861 static void vu_del_vqs(struct virtio_device *vdev)
862 {
863 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
864 	struct virtqueue *vq, *n;
865 	u64 features;
866 
867 	/* Note: reverse order as a workaround to a decoding bug in snabb */
868 	list_for_each_entry_reverse(vq, &vdev->vqs, list)
869 		WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false));
870 
871 	/* Ensure previous messages have been processed */
872 	WARN_ON(vhost_user_get_features(vu_dev, &features));
873 
874 	list_for_each_entry_safe(vq, n, &vdev->vqs, list)
875 		vu_del_vq(vq);
876 }
877 
878 static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev,
879 			       struct virtqueue *vq)
880 {
881 	struct virtio_uml_vq_info *info = vq->priv;
882 	int call_fds[2];
883 	int rc;
884 
885 	/* no call FD needed/desired in this case */
886 	if (vu_dev->protocol_features &
887 			BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
888 	    vu_dev->protocol_features &
889 			BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
890 		info->call_fd = -1;
891 		return 0;
892 	}
893 
894 	/* Use a pipe for call fd, since SIGIO is not supported for eventfd */
895 	rc = os_pipe(call_fds, true, true);
896 	if (rc < 0)
897 		return rc;
898 
899 	info->call_fd = call_fds[0];
900 	rc = um_request_irq(vu_dev->irq, info->call_fd, IRQ_READ,
901 			    vu_interrupt, IRQF_SHARED, info->name, vq);
902 	if (rc < 0)
903 		goto close_both;
904 
905 	rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]);
906 	if (rc)
907 		goto release_irq;
908 
909 	goto out;
910 
911 release_irq:
912 	um_free_irq(vu_dev->irq, vq);
913 close_both:
914 	os_close_file(call_fds[0]);
915 out:
916 	/* Close (unused) write end of call fds */
917 	os_close_file(call_fds[1]);
918 
919 	return rc;
920 }
921 
922 static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
923 				     unsigned index, vq_callback_t *callback,
924 				     const char *name, bool ctx)
925 {
926 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
927 	struct platform_device *pdev = vu_dev->pdev;
928 	struct virtio_uml_vq_info *info;
929 	struct virtqueue *vq;
930 	int num = MAX_SUPPORTED_QUEUE_SIZE;
931 	int rc;
932 
933 	info = kzalloc(sizeof(*info), GFP_KERNEL);
934 	if (!info) {
935 		rc = -ENOMEM;
936 		goto error_kzalloc;
937 	}
938 	snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
939 		 pdev->id, name);
940 
941 	vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
942 				    ctx, vu_notify, callback, info->name);
943 	if (!vq) {
944 		rc = -ENOMEM;
945 		goto error_create;
946 	}
947 	vq->priv = info;
948 	num = virtqueue_get_vring_size(vq);
949 
950 	if (vu_dev->protocol_features &
951 			BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) {
952 		info->kick_fd = -1;
953 	} else {
954 		rc = os_eventfd(0, 0);
955 		if (rc < 0)
956 			goto error_kick;
957 		info->kick_fd = rc;
958 	}
959 
960 	rc = vu_setup_vq_call_fd(vu_dev, vq);
961 	if (rc)
962 		goto error_call;
963 
964 	rc = vhost_user_set_vring_num(vu_dev, index, num);
965 	if (rc)
966 		goto error_setup;
967 
968 	rc = vhost_user_set_vring_base(vu_dev, index, 0);
969 	if (rc)
970 		goto error_setup;
971 
972 	rc = vhost_user_set_vring_addr(vu_dev, index,
973 				       virtqueue_get_desc_addr(vq),
974 				       virtqueue_get_used_addr(vq),
975 				       virtqueue_get_avail_addr(vq),
976 				       (u64) -1);
977 	if (rc)
978 		goto error_setup;
979 
980 	return vq;
981 
982 error_setup:
983 	if (info->call_fd >= 0) {
984 		um_free_irq(vu_dev->irq, vq);
985 		os_close_file(info->call_fd);
986 	}
987 error_call:
988 	if (info->kick_fd >= 0)
989 		os_close_file(info->kick_fd);
990 error_kick:
991 	vring_del_virtqueue(vq);
992 error_create:
993 	kfree(info);
994 error_kzalloc:
995 	return ERR_PTR(rc);
996 }
997 
998 static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
999 		       struct virtqueue *vqs[], vq_callback_t *callbacks[],
1000 		       const char * const names[], const bool *ctx,
1001 		       struct irq_affinity *desc)
1002 {
1003 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1004 	int i, queue_idx = 0, rc;
1005 	struct virtqueue *vq;
1006 
1007 	/* not supported for now */
1008 	if (WARN_ON(nvqs > 64))
1009 		return -EINVAL;
1010 
1011 	rc = vhost_user_set_mem_table(vu_dev);
1012 	if (rc)
1013 		return rc;
1014 
1015 	for (i = 0; i < nvqs; ++i) {
1016 		if (!names[i]) {
1017 			vqs[i] = NULL;
1018 			continue;
1019 		}
1020 
1021 		vqs[i] = vu_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
1022 				     ctx ? ctx[i] : false);
1023 		if (IS_ERR(vqs[i])) {
1024 			rc = PTR_ERR(vqs[i]);
1025 			goto error_setup;
1026 		}
1027 	}
1028 
1029 	list_for_each_entry(vq, &vdev->vqs, list) {
1030 		struct virtio_uml_vq_info *info = vq->priv;
1031 
1032 		if (info->kick_fd >= 0) {
1033 			rc = vhost_user_set_vring_kick(vu_dev, vq->index,
1034 						       info->kick_fd);
1035 			if (rc)
1036 				goto error_setup;
1037 		}
1038 
1039 		rc = vhost_user_set_vring_enable(vu_dev, vq->index, true);
1040 		if (rc)
1041 			goto error_setup;
1042 	}
1043 
1044 	return 0;
1045 
1046 error_setup:
1047 	vu_del_vqs(vdev);
1048 	return rc;
1049 }
1050 
1051 static u64 vu_get_features(struct virtio_device *vdev)
1052 {
1053 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1054 
1055 	return vu_dev->features;
1056 }
1057 
1058 static int vu_finalize_features(struct virtio_device *vdev)
1059 {
1060 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1061 	u64 supported = vdev->features & VHOST_USER_SUPPORTED_F;
1062 
1063 	vring_transport_features(vdev);
1064 	vu_dev->features = vdev->features | supported;
1065 
1066 	return vhost_user_set_features(vu_dev, vu_dev->features);
1067 }
1068 
1069 static const char *vu_bus_name(struct virtio_device *vdev)
1070 {
1071 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1072 
1073 	return vu_dev->pdev->name;
1074 }
1075 
1076 static const struct virtio_config_ops virtio_uml_config_ops = {
1077 	.get = vu_get,
1078 	.set = vu_set,
1079 	.get_status = vu_get_status,
1080 	.set_status = vu_set_status,
1081 	.reset = vu_reset,
1082 	.find_vqs = vu_find_vqs,
1083 	.del_vqs = vu_del_vqs,
1084 	.get_features = vu_get_features,
1085 	.finalize_features = vu_finalize_features,
1086 	.bus_name = vu_bus_name,
1087 };
1088 
1089 static void virtio_uml_release_dev(struct device *d)
1090 {
1091 	struct virtio_device *vdev =
1092 			container_of(d, struct virtio_device, dev);
1093 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1094 
1095 	time_travel_propagate_time();
1096 
1097 	/* might not have been opened due to not negotiating the feature */
1098 	if (vu_dev->req_fd >= 0) {
1099 		um_free_irq(vu_dev->irq, vu_dev);
1100 		os_close_file(vu_dev->req_fd);
1101 	}
1102 
1103 	os_close_file(vu_dev->sock);
1104 	kfree(vu_dev);
1105 }
1106 
1107 void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev,
1108 				  bool no_vq_suspend)
1109 {
1110 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
1111 
1112 	if (WARN_ON(vdev->config != &virtio_uml_config_ops))
1113 		return;
1114 
1115 	vu_dev->no_vq_suspend = no_vq_suspend;
1116 	dev_info(&vdev->dev, "%sabled VQ suspend\n",
1117 		 no_vq_suspend ? "dis" : "en");
1118 }
1119 
1120 static void vu_of_conn_broken(struct work_struct *wk)
1121 {
1122 	/*
1123 	 * We can't remove the device from the devicetree so the only thing we
1124 	 * can do is warn.
1125 	 */
1126 	WARN_ON(1);
1127 }
1128 
1129 /* Platform device */
1130 
1131 static struct virtio_uml_platform_data *
1132 virtio_uml_create_pdata(struct platform_device *pdev)
1133 {
1134 	struct device_node *np = pdev->dev.of_node;
1135 	struct virtio_uml_platform_data *pdata;
1136 	int ret;
1137 
1138 	if (!np)
1139 		return ERR_PTR(-EINVAL);
1140 
1141 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1142 	if (!pdata)
1143 		return ERR_PTR(-ENOMEM);
1144 
1145 	INIT_WORK(&pdata->conn_broken_wk, vu_of_conn_broken);
1146 	pdata->pdev = pdev;
1147 
1148 	ret = of_property_read_string(np, "socket-path", &pdata->socket_path);
1149 	if (ret)
1150 		return ERR_PTR(ret);
1151 
1152 	ret = of_property_read_u32(np, "virtio-device-id",
1153 				   &pdata->virtio_device_id);
1154 	if (ret)
1155 		return ERR_PTR(ret);
1156 
1157 	return pdata;
1158 }
1159 
1160 static int virtio_uml_probe(struct platform_device *pdev)
1161 {
1162 	struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1163 	struct virtio_uml_device *vu_dev;
1164 	int rc;
1165 
1166 	if (!pdata) {
1167 		pdata = virtio_uml_create_pdata(pdev);
1168 		if (IS_ERR(pdata))
1169 			return PTR_ERR(pdata);
1170 	}
1171 
1172 	vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
1173 	if (!vu_dev)
1174 		return -ENOMEM;
1175 
1176 	vu_dev->pdata = pdata;
1177 	vu_dev->vdev.dev.parent = &pdev->dev;
1178 	vu_dev->vdev.dev.release = virtio_uml_release_dev;
1179 	vu_dev->vdev.config = &virtio_uml_config_ops;
1180 	vu_dev->vdev.id.device = pdata->virtio_device_id;
1181 	vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID;
1182 	vu_dev->pdev = pdev;
1183 	vu_dev->req_fd = -1;
1184 
1185 	time_travel_propagate_time();
1186 
1187 	do {
1188 		rc = os_connect_socket(pdata->socket_path);
1189 	} while (rc == -EINTR);
1190 	if (rc < 0)
1191 		goto error_free;
1192 	vu_dev->sock = rc;
1193 
1194 	spin_lock_init(&vu_dev->sock_lock);
1195 
1196 	rc = vhost_user_init(vu_dev);
1197 	if (rc)
1198 		goto error_init;
1199 
1200 	platform_set_drvdata(pdev, vu_dev);
1201 
1202 	device_set_wakeup_capable(&vu_dev->vdev.dev, true);
1203 
1204 	rc = register_virtio_device(&vu_dev->vdev);
1205 	if (rc)
1206 		put_device(&vu_dev->vdev.dev);
1207 	vu_dev->registered = 1;
1208 	return rc;
1209 
1210 error_init:
1211 	os_close_file(vu_dev->sock);
1212 error_free:
1213 	kfree(vu_dev);
1214 	return rc;
1215 }
1216 
1217 static int virtio_uml_remove(struct platform_device *pdev)
1218 {
1219 	struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1220 
1221 	unregister_virtio_device(&vu_dev->vdev);
1222 	return 0;
1223 }
1224 
1225 /* Command line device list */
1226 
1227 static void vu_cmdline_release_dev(struct device *d)
1228 {
1229 }
1230 
1231 static struct device vu_cmdline_parent = {
1232 	.init_name = "virtio-uml-cmdline",
1233 	.release = vu_cmdline_release_dev,
1234 };
1235 
1236 static bool vu_cmdline_parent_registered;
1237 static int vu_cmdline_id;
1238 
1239 static int vu_unregister_cmdline_device(struct device *dev, void *data)
1240 {
1241 	struct platform_device *pdev = to_platform_device(dev);
1242 	struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1243 
1244 	kfree(pdata->socket_path);
1245 	platform_device_unregister(pdev);
1246 	return 0;
1247 }
1248 
1249 static void vu_conn_broken(struct work_struct *wk)
1250 {
1251 	struct virtio_uml_platform_data *pdata;
1252 
1253 	pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
1254 	vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
1255 }
1256 
1257 static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
1258 {
1259 	const char *ids = strchr(device, ':');
1260 	unsigned int virtio_device_id;
1261 	int processed, consumed, err;
1262 	char *socket_path;
1263 	struct virtio_uml_platform_data pdata, *ppdata;
1264 	struct platform_device *pdev;
1265 
1266 	if (!ids || ids == device)
1267 		return -EINVAL;
1268 
1269 	processed = sscanf(ids, ":%u%n:%d%n",
1270 			   &virtio_device_id, &consumed,
1271 			   &vu_cmdline_id, &consumed);
1272 
1273 	if (processed < 1 || ids[consumed])
1274 		return -EINVAL;
1275 
1276 	if (!vu_cmdline_parent_registered) {
1277 		err = device_register(&vu_cmdline_parent);
1278 		if (err) {
1279 			pr_err("Failed to register parent device!\n");
1280 			put_device(&vu_cmdline_parent);
1281 			return err;
1282 		}
1283 		vu_cmdline_parent_registered = true;
1284 	}
1285 
1286 	socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL);
1287 	if (!socket_path)
1288 		return -ENOMEM;
1289 
1290 	pdata.virtio_device_id = (u32) virtio_device_id;
1291 	pdata.socket_path = socket_path;
1292 
1293 	pr_info("Registering device virtio-uml.%d id=%d at %s\n",
1294 		vu_cmdline_id, virtio_device_id, socket_path);
1295 
1296 	pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml",
1297 					     vu_cmdline_id++, &pdata,
1298 					     sizeof(pdata));
1299 	err = PTR_ERR_OR_ZERO(pdev);
1300 	if (err)
1301 		goto free;
1302 
1303 	ppdata = pdev->dev.platform_data;
1304 	ppdata->pdev = pdev;
1305 	INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken);
1306 
1307 	return 0;
1308 
1309 free:
1310 	kfree(socket_path);
1311 	return err;
1312 }
1313 
1314 static int vu_cmdline_get_device(struct device *dev, void *data)
1315 {
1316 	struct platform_device *pdev = to_platform_device(dev);
1317 	struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
1318 	char *buffer = data;
1319 	unsigned int len = strlen(buffer);
1320 
1321 	snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n",
1322 		 pdata->socket_path, pdata->virtio_device_id, pdev->id);
1323 	return 0;
1324 }
1325 
1326 static int vu_cmdline_get(char *buffer, const struct kernel_param *kp)
1327 {
1328 	buffer[0] = '\0';
1329 	if (vu_cmdline_parent_registered)
1330 		device_for_each_child(&vu_cmdline_parent, buffer,
1331 				      vu_cmdline_get_device);
1332 	return strlen(buffer) + 1;
1333 }
1334 
1335 static const struct kernel_param_ops vu_cmdline_param_ops = {
1336 	.set = vu_cmdline_set,
1337 	.get = vu_cmdline_get,
1338 };
1339 
1340 device_param_cb(device, &vu_cmdline_param_ops, NULL, S_IRUSR);
1341 __uml_help(vu_cmdline_param_ops,
1342 "virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n"
1343 "    Configure a virtio device over a vhost-user socket.\n"
1344 "    See virtio_ids.h for a list of possible virtio device id values.\n"
1345 "    Optionally use a specific platform_device id.\n\n"
1346 );
1347 
1348 
1349 static void vu_unregister_cmdline_devices(void)
1350 {
1351 	if (vu_cmdline_parent_registered) {
1352 		device_for_each_child(&vu_cmdline_parent, NULL,
1353 				      vu_unregister_cmdline_device);
1354 		device_unregister(&vu_cmdline_parent);
1355 		vu_cmdline_parent_registered = false;
1356 	}
1357 }
1358 
1359 /* Platform driver */
1360 
1361 static const struct of_device_id virtio_uml_match[] = {
1362 	{ .compatible = "virtio,uml", },
1363 	{ }
1364 };
1365 MODULE_DEVICE_TABLE(of, virtio_uml_match);
1366 
1367 static int virtio_uml_suspend(struct platform_device *pdev, pm_message_t state)
1368 {
1369 	struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1370 
1371 	if (!vu_dev->no_vq_suspend) {
1372 		struct virtqueue *vq;
1373 
1374 		virtio_device_for_each_vq((&vu_dev->vdev), vq) {
1375 			struct virtio_uml_vq_info *info = vq->priv;
1376 
1377 			info->suspended = true;
1378 			vhost_user_set_vring_enable(vu_dev, vq->index, false);
1379 		}
1380 	}
1381 
1382 	if (!device_may_wakeup(&vu_dev->vdev.dev)) {
1383 		vu_dev->suspended = true;
1384 		return 0;
1385 	}
1386 
1387 	return irq_set_irq_wake(vu_dev->irq, 1);
1388 }
1389 
1390 static int virtio_uml_resume(struct platform_device *pdev)
1391 {
1392 	struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
1393 
1394 	if (!vu_dev->no_vq_suspend) {
1395 		struct virtqueue *vq;
1396 
1397 		virtio_device_for_each_vq((&vu_dev->vdev), vq) {
1398 			struct virtio_uml_vq_info *info = vq->priv;
1399 
1400 			info->suspended = false;
1401 			vhost_user_set_vring_enable(vu_dev, vq->index, true);
1402 		}
1403 	}
1404 
1405 	vu_dev->suspended = false;
1406 
1407 	if (!device_may_wakeup(&vu_dev->vdev.dev))
1408 		return 0;
1409 
1410 	return irq_set_irq_wake(vu_dev->irq, 0);
1411 }
1412 
1413 static struct platform_driver virtio_uml_driver = {
1414 	.probe = virtio_uml_probe,
1415 	.remove = virtio_uml_remove,
1416 	.driver = {
1417 		.name = "virtio-uml",
1418 		.of_match_table = virtio_uml_match,
1419 	},
1420 	.suspend = virtio_uml_suspend,
1421 	.resume = virtio_uml_resume,
1422 };
1423 
1424 static int __init virtio_uml_init(void)
1425 {
1426 	return platform_driver_register(&virtio_uml_driver);
1427 }
1428 
1429 static void __exit virtio_uml_exit(void)
1430 {
1431 	platform_driver_unregister(&virtio_uml_driver);
1432 	vu_unregister_cmdline_devices();
1433 }
1434 
1435 module_init(virtio_uml_init);
1436 module_exit(virtio_uml_exit);
1437 __uml_exitcall(virtio_uml_exit);
1438 
1439 MODULE_DESCRIPTION("UML driver for vhost-user virtio devices");
1440 MODULE_LICENSE("GPL");
1441