Lines Matching +full:host +full:- +full:port

1 // SPDX-License-Identifier: GPL-2.0-or-later
28 #include <linux/dma-mapping.h>
43 /* Used for exporting per-port information to debugfs */
56 .name = "virtio-ports",
67 /* The hvc device associated with this console port */
113 * This is a per-device struct that stores data common to all the
114 * ports for that device (vdev->priv).
144 * guest->host transfers, one for host->guest transfers
149 * A control packet buffer for guest->host requests, protected
154 /* Array of per-port IO virtqueues */
165 /* This struct holds the per-port data */
166 struct port {
167 /* Next port in the list, head is in the ports_device */
178 * port. Has to be a spinlock because it can be called from
186 /* The IO vqs for this port */
189 /* File in the debugfs directory that exposes this port's information */
194 * this port for accounting and debugging purposes. These
195 * counts are not reset across port open / close events.
200 * The entries in this struct will be valid if this port is
205 /* Each port associates with a separate char device */
209 /* Reference-counting to handle port hot-unplugs and file operations */
215 /* The 'name' of the port that we expose via sysfs properties */
218 /* We can notify apps of host connect / disconnect events via SIGIO */
221 /* The 'id' to identify the port with the Host */
226 /* Is the host device open */
229 /* We should allow only one process to open a port */
233 static struct port *find_port_by_vtermno(u32 vtermno)
235 struct port *port;
241 if (cons->vtermno == vtermno) {
242 port = container_of(cons, struct port, cons);
246 port = NULL;
249 return port;
252 static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
255 struct port *port;
258 spin_lock_irqsave(&portdev->ports_lock, flags);
259 list_for_each_entry(port, &portdev->ports, list) {
260 if (port->cdev->dev == dev) {
261 kref_get(&port->kref);
265 port = NULL;
267 spin_unlock_irqrestore(&portdev->ports_lock, flags);
269 return port;
272 static struct port *find_port_by_devt(dev_t dev)
275 struct port *port;
280 port = find_port_by_devt_in_portdev(portdev, dev);
281 if (port)
284 port = NULL;
287 return port;
290 static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
292 struct port *port;
295 spin_lock_irqsave(&portdev->ports_lock, flags);
296 list_for_each_entry(port, &portdev->ports, list)
297 if (port->id == id)
299 port = NULL;
301 spin_unlock_irqrestore(&portdev->ports_lock, flags);
303 return port;
306 static struct port *find_port_by_vq(struct ports_device *portdev,
309 struct port *port;
312 spin_lock_irqsave(&portdev->ports_lock, flags);
313 list_for_each_entry(port, &portdev->ports, list)
314 if (port->in_vq == vq || port->out_vq == vq)
316 port = NULL;
318 spin_unlock_irqrestore(&portdev->ports_lock, flags);
319 return port;
322 static bool is_console_port(struct port *port)
324 if (port->cons.hvc)
331 return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL;
340 if (!portdev->vdev)
342 return __virtio_test_bit(portdev->vdev, VIRTIO_CONSOLE_F_MULTIPORT);
352 for (i = 0; i < buf->sgpages; i++) {
353 struct page *page = sg_page(&buf->sg[i]);
359 if (!buf->dev) {
360 kfree(buf->buf);
366 /* queue up dma-buffers to be freed later */
368 list_add_tail(&buf->list, &pending_free_dma_bufs);
372 dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma);
375 put_device(buf->dev);
398 list_del(&buf->list);
418 buf->sgpages = pages;
420 buf->dev = NULL;
421 buf->buf = NULL;
432 buf->dev = vdev->dev.parent;
433 if (!buf->dev)
437 get_device(buf->dev);
438 buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma,
441 buf->dev = NULL;
442 buf->buf = kmalloc(buf_size, GFP_KERNEL);
445 if (!buf->buf)
447 buf->len = 0;
448 buf->offset = 0;
449 buf->size = buf_size;
459 static struct port_buffer *get_inbuf(struct port *port)
464 if (port->inbuf)
465 return port->inbuf;
467 buf = virtqueue_get_buf(port->in_vq, &len);
469 buf->len = min_t(size_t, len, buf->size);
470 buf->offset = 0;
471 port->stats.bytes_received += len;
477 * Create a scatter-gather list representing our input buffer and put
487 sg_init_one(sg, buf->buf, buf->size);
492 ret = vq->num_free;
496 /* Discard any unread data this port has. Callers lockers. */
497 static void discard_port_data(struct port *port)
502 if (!port->portdev) {
506 buf = get_inbuf(port);
510 port->stats.bytes_discarded += buf->len - buf->offset;
511 if (add_inbuf(port->in_vq, buf) < 0) {
515 port->inbuf = NULL;
516 buf = get_inbuf(port);
519 dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
523 static bool port_has_data(struct port *port)
529 spin_lock_irqsave(&port->inbuf_lock, flags);
530 port->inbuf = get_inbuf(port);
531 if (port->inbuf)
534 spin_unlock_irqrestore(&port->inbuf_lock, flags);
548 vq = portdev->c_ovq;
550 spin_lock(&portdev->c_ovq_lock);
552 portdev->cpkt.id = cpu_to_virtio32(portdev->vdev, port_id);
553 portdev->cpkt.event = cpu_to_virtio16(portdev->vdev, event);
554 portdev->cpkt.value = cpu_to_virtio16(portdev->vdev, value);
556 sg_init_one(sg, &portdev->cpkt, sizeof(struct virtio_console_control));
558 if (virtqueue_add_outbuf(vq, sg, 1, &portdev->cpkt, GFP_ATOMIC) == 0) {
565 spin_unlock(&portdev->c_ovq_lock);
569 static ssize_t send_control_msg(struct port *port, unsigned int event,
572 /* Did the port get unplugged before userspace closed it? */
573 if (port->portdev)
574 return __send_control_msg(port->portdev, port->id, event, value);
579 /* Callers must take the port->outvq_lock */
580 static void reclaim_consumed_buffers(struct port *port)
585 if (!port->portdev) {
589 while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
591 port->outvq_full = false;
595 static ssize_t __send_to_port(struct port *port, struct scatterlist *sg,
604 out_vq = port->out_vq;
606 spin_lock_irqsave(&port->outvq_lock, flags);
608 reclaim_consumed_buffers(port);
612 /* Tell Host to go! */
620 if (out_vq->num_free == 0)
621 port->outvq_full = true;
627 * Wait till the host acknowledges it pushed out the data we
639 spin_unlock_irqrestore(&port->outvq_lock, flags);
641 port->stats.bytes_sent += in_count;
643 * We're expected to return the amount of data we wrote -- all
653 static ssize_t fill_readbuf(struct port *port, u8 __user *out_buf,
659 if (!out_count || !port_has_data(port))
662 buf = port->inbuf;
663 out_count = min(out_count, buf->len - buf->offset);
668 ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
670 return -EFAULT;
672 memcpy((__force u8 *)out_buf, buf->buf + buf->offset,
676 buf->offset += out_count;
678 if (buf->offset == buf->len) {
681 * Re-queue so that the Host can send us more data.
683 spin_lock_irqsave(&port->inbuf_lock, flags);
684 port->inbuf = NULL;
686 if (add_inbuf(port->in_vq, buf) < 0)
687 dev_warn(port->dev, "failed add_buf\n");
689 spin_unlock_irqrestore(&port->inbuf_lock, flags);
696 static bool will_read_block(struct port *port)
698 if (!port->guest_connected) {
699 /* Port got hot-unplugged. Let's exit. */
702 return !port_has_data(port) && port->host_connected;
705 static bool will_write_block(struct port *port)
709 if (!port->guest_connected) {
710 /* Port got hot-unplugged. Let's exit. */
713 if (!port->host_connected)
716 spin_lock_irq(&port->outvq_lock);
718 * Check if the Host has consumed any buffers since we last
721 reclaim_consumed_buffers(port);
722 ret = port->outvq_full;
723 spin_unlock_irq(&port->outvq_lock);
731 struct port *port;
734 port = filp->private_data;
736 /* Port is hot-unplugged. */
737 if (!port->guest_connected)
738 return -ENODEV;
740 if (!port_has_data(port)) {
742 * If nothing's connected on the host just return 0 in
746 if (!port->host_connected)
748 if (filp->f_flags & O_NONBLOCK)
749 return -EAGAIN;
751 ret = wait_event_freezable(port->waitqueue,
752 !will_read_block(port));
756 /* Port got hot-unplugged while we were waiting above. */
757 if (!port->guest_connected)
758 return -ENODEV;
764 * might receive some data as well as the host could get
769 if (!port_has_data(port) && !port->host_connected)
772 return fill_readbuf(port, ubuf, count, true);
775 static int wait_port_writable(struct port *port, bool nonblock)
779 if (will_write_block(port)) {
781 return -EAGAIN;
783 ret = wait_event_freezable(port->waitqueue,
784 !will_write_block(port));
788 /* Port got hot-unplugged. */
789 if (!port->guest_connected)
790 return -ENODEV;
798 struct port *port;
808 port = filp->private_data;
810 nonblock = filp->f_flags & O_NONBLOCK;
812 ret = wait_port_writable(port, nonblock);
818 buf = alloc_buf(port->portdev->vdev, count, 0);
820 return -ENOMEM;
822 ret = copy_from_user(buf->buf, ubuf, count);
824 ret = -EFAULT;
829 * We now ask send_buf() to not spin for generic ports -- we
830 * can re-use the same code path that non-blocking file
833 * through to the host.
836 sg_init_one(sg, buf->buf, count);
837 ret = __send_to_port(port, sg, 1, count, buf, nonblock);
858 struct sg_list *sgl = sd->u.data;
861 if (sgl->n == sgl->size)
867 get_page(buf->page);
868 unlock_page(buf->page);
870 len = min(buf->len, sd->len);
871 sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset);
878 return -ENOMEM;
880 offset = sd->pos & ~PAGE_MASK;
882 len = sd->len;
884 len = PAGE_SIZE - offset;
886 src = kmap_local_page(buf->page);
887 memcpy(page_address(page) + offset, src + buf->offset, len);
890 sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
892 sgl->n++;
893 sgl->len += len;
898 /* Faster zero-copy write by splicing */
903 struct port *port = filp->private_data;
917 * pipe_to_sg() must allocate dma-buffers and copy content from
919 * support allocating and freeing such a list of dma-buffers.
921 if (is_rproc_serial(port->out_vq->vdev))
922 return -EINVAL;
926 if (pipe_empty(pipe->head, pipe->tail))
929 ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
933 occupancy = pipe_occupancy(pipe->head, pipe->tail);
934 buf = alloc_buf(port->portdev->vdev, 0, occupancy);
937 ret = -ENOMEM;
944 sgl.sg = buf->sg;
949 ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
962 struct port *port;
965 port = filp->private_data;
966 poll_wait(filp, &port->waitqueue, wait);
968 if (!port->guest_connected) {
969 /* Port got unplugged */
973 if (!will_read_block(port))
975 if (!will_write_block(port))
977 if (!port->host_connected)
987 struct port *port;
989 port = filp->private_data;
991 /* Notify host of port being closed */
992 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
994 spin_lock_irq(&port->inbuf_lock);
995 port->guest_connected = false;
997 discard_port_data(port);
999 spin_unlock_irq(&port->inbuf_lock);
1001 spin_lock_irq(&port->outvq_lock);
1002 reclaim_consumed_buffers(port);
1003 spin_unlock_irq(&port->outvq_lock);
1007 * Locks aren't necessary here as a port can't be opened after
1008 * unplug, and if a port isn't unplugged, a kref would already
1009 * exist for the port. Plus, taking ports_lock here would
1011 * inside remove_port if we're the last holder of the port,
1014 kref_put(&port->kref, remove_port);
1021 struct cdev *cdev = inode->i_cdev;
1022 struct port *port;
1025 /* We get the port with a kref here */
1026 port = find_port_by_devt(cdev->dev);
1027 if (!port) {
1028 /* Port was unplugged before we could proceed */
1029 return -ENXIO;
1031 filp->private_data = port;
1034 * Don't allow opening of console port devices -- that's done
1037 if (is_console_port(port)) {
1038 ret = -ENXIO;
1042 /* Allow only one process to open a particular port at a time */
1043 spin_lock_irq(&port->inbuf_lock);
1044 if (port->guest_connected) {
1045 spin_unlock_irq(&port->inbuf_lock);
1046 ret = -EBUSY;
1050 port->guest_connected = true;
1051 spin_unlock_irq(&port->inbuf_lock);
1053 spin_lock_irq(&port->outvq_lock);
1056 * buffers in the window of the port getting previously closed
1059 reclaim_consumed_buffers(port);
1060 spin_unlock_irq(&port->outvq_lock);
1064 /* Notify host of port being opened */
1065 send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
1069 kref_put(&port->kref, remove_port);
1075 struct port *port;
1077 port = filp->private_data;
1078 return fasync_helper(fd, filp, mode, &port->async_queue);
1085 * /dev/vport<device number>p<port number>
1101 * We turn the characters into a scatter-gather list, add it to the
1102 * output queue and then kick the Host. Then we sit here waiting for
1108 struct port *port;
1113 port = find_port_by_vtermno(vtermno);
1114 if (!port)
1115 return -EPIPE;
1119 return -ENOMEM;
1122 ret = __send_to_port(port, sg, 1, count, data, false);
1136 struct port *port;
1138 port = find_port_by_vtermno(vtermno);
1139 if (!port)
1140 return -EPIPE;
1143 BUG_ON(!port->in_vq);
1145 return fill_readbuf(port, (__force u8 __user *)buf, count, false);
1148 static void resize_console(struct port *port)
1152 /* The port could have been hot-unplugged */
1153 if (!port || !is_console_port(port))
1156 vdev = port->portdev->vdev;
1161 hvc_resize(port->cons.hvc, port->cons.ws);
1167 struct port *port;
1169 port = find_port_by_vtermno(hp->vtermno);
1170 if (!port)
1171 return -EINVAL;
1173 hp->irq_requested = 1;
1174 resize_console(port);
1181 hp->irq_requested = 0;
1193 static int init_port_console(struct port *port)
1198 * The Host's telling us this port is a console port. Hook it
1218 port->cons.vtermno = ret;
1219 port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE);
1220 if (IS_ERR(port->cons.hvc)) {
1221 ret = PTR_ERR(port->cons.hvc);
1222 dev_err(port->dev,
1223 "error %d allocating hvc for port\n", ret);
1224 port->cons.hvc = NULL;
1225 ida_free(&vtermno_ida, port->cons.vtermno);
1229 list_add_tail(&port->cons.list, &pdrvdata.consoles);
1231 port->guest_connected = true;
1233 /* Notify host of port being opened */
1234 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
1242 struct port *port;
1244 port = dev_get_drvdata(dev);
1246 return sprintf(buffer, "%s\n", port->name);
1263 struct port *port = s->private;
1265 seq_printf(s, "name: %s\n", port->name ? port->name : "");
1266 seq_printf(s, "guest_connected: %d\n", port->guest_connected);
1267 seq_printf(s, "host_connected: %d\n", port->host_connected);
1268 seq_printf(s, "outvq_full: %d\n", port->outvq_full);
1269 seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent);
1270 seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received);
1271 seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded);
1273 is_console_port(port) ? "yes" : "no");
1274 seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno);
1281 static void set_console_size(struct port *port, u16 rows, u16 cols)
1283 if (!port || !is_console_port(port))
1286 port->cons.ws.ws_row = rows;
1287 port->cons.ws.ws_col = cols;
1298 buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
1300 return -ENOMEM;
1316 static void send_sigio_to_port(struct port *port)
1318 if (port->async_queue && port->guest_connected)
1319 kill_fasync(&port->async_queue, SIGIO, POLL_OUT);
1325 struct port *port;
1329 port = kmalloc(sizeof(*port), GFP_KERNEL);
1330 if (!port) {
1331 err = -ENOMEM;
1334 kref_init(&port->kref);
1336 port->portdev = portdev;
1337 port->id = id;
1339 port->name = NULL;
1340 port->inbuf = NULL;
1341 port->cons.hvc = NULL;
1342 port->async_queue = NULL;
1344 port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
1345 port->cons.vtermno = 0;
1347 port->host_connected = port->guest_connected = false;
1348 port->stats = (struct port_stats) { 0 };
1350 port->outvq_full = false;
1352 port->in_vq = portdev->in_vqs[port->id];
1353 port->out_vq = portdev->out_vqs[port->id];
1355 port->cdev = cdev_alloc();
1356 if (!port->cdev) {
1357 dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n");
1358 err = -ENOMEM;
1361 port->cdev->ops = &port_fops;
1363 devt = MKDEV(portdev->chr_major, id);
1364 err = cdev_add(port->cdev, devt, 1);
1366 dev_err(&port->portdev->vdev->dev,
1367 "Error %d adding cdev for port %u\n", err, id);
1370 port->dev = device_create(&port_class, &port->portdev->vdev->dev,
1371 devt, port, "vport%up%u",
1372 port->portdev->vdev->index, id);
1373 if (IS_ERR(port->dev)) {
1374 err = PTR_ERR(port->dev);
1375 dev_err(&port->portdev->vdev->dev,
1376 "Error %d creating device for port %u\n",
1381 spin_lock_init(&port->inbuf_lock);
1382 spin_lock_init(&port->outvq_lock);
1383 init_waitqueue_head(&port->waitqueue);
1389 err = fill_queue(port->in_vq, &port->inbuf_lock);
1390 if (err < 0 && err != -ENOSPC) {
1391 dev_err(port->dev, "Error allocating inbufs\n");
1395 if (is_rproc_serial(port->portdev->vdev))
1398 * rproc_serial does not want the console port, only
1399 * the generic port implementation.
1401 port->host_connected = true;
1402 else if (!use_multiport(port->portdev)) {
1405 * this has to be a console port.
1407 err = init_port_console(port);
1412 spin_lock_irq(&portdev->ports_lock);
1413 list_add_tail(&port->list, &port->portdev->ports);
1414 spin_unlock_irq(&portdev->ports_lock);
1417 * Tell the Host we're set so that it can send us various
1418 * configuration parameters for this port (eg, port name,
1419 * caching, whether this is a console port, etc.)
1421 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1425 * inspect a port's state at any time
1428 port->portdev->vdev->index, id);
1429 port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
1431 port, &port_debugfs_fops);
1436 device_destroy(&port_class, port->dev->devt);
1438 cdev_del(port->cdev);
1440 kfree(port);
1442 /* The host might want to notify management sw about port add failure */
1447 /* No users remain, remove all port-specific data. */
1450 struct port *port;
1452 port = container_of(kref, struct port, kref);
1454 kfree(port);
1457 static void remove_port_data(struct port *port)
1459 spin_lock_irq(&port->inbuf_lock);
1460 /* Remove unused data this port might have received. */
1461 discard_port_data(port);
1462 spin_unlock_irq(&port->inbuf_lock);
1464 spin_lock_irq(&port->outvq_lock);
1465 reclaim_consumed_buffers(port);
1466 spin_unlock_irq(&port->outvq_lock);
1470 * Port got unplugged. Remove port from portdev's list and drop the
1471 * kref reference. If no userspace has this port opened, it will
1472 * result in immediate removal the port.
1474 static void unplug_port(struct port *port)
1476 spin_lock_irq(&port->portdev->ports_lock);
1477 list_del(&port->list);
1478 spin_unlock_irq(&port->portdev->ports_lock);
1480 spin_lock_irq(&port->inbuf_lock);
1481 if (port->guest_connected) {
1482 /* Let the app know the port is going down. */
1483 send_sigio_to_port(port);
1486 port->guest_connected = false;
1487 port->host_connected = false;
1489 wake_up_interruptible(&port->waitqueue);
1491 spin_unlock_irq(&port->inbuf_lock);
1493 if (is_console_port(port)) {
1495 list_del(&port->cons.list);
1497 hvc_remove(port->cons.hvc);
1498 ida_free(&vtermno_ida, port->cons.vtermno);
1501 remove_port_data(port);
1504 * We should just assume the device itself has gone off --
1505 * else a close on an open port later will try to send out a
1508 port->portdev = NULL;
1510 sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
1511 device_destroy(&port_class, port->dev->devt);
1512 cdev_del(port->cdev);
1514 debugfs_remove(port->debugfs_file);
1515 kfree(port->name);
1518 * Locks around here are not necessary - a port can't be
1519 * opened after we removed the port struct from ports_list
1522 kref_put(&port->kref, remove_port);
1525 /* Any private messages that the Host and Guest want to share */
1531 struct port *port;
1535 cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
1537 port = find_port_by_id(portdev, virtio32_to_cpu(vdev, cpkt->id));
1538 if (!port &&
1539 cpkt->event != cpu_to_virtio16(vdev, VIRTIO_CONSOLE_PORT_ADD)) {
1541 dev_dbg(&portdev->vdev->dev,
1542 "Invalid index %u in control packet\n", cpkt->id);
1546 switch (virtio16_to_cpu(vdev, cpkt->event)) {
1548 if (port) {
1549 dev_dbg(&portdev->vdev->dev,
1550 "Port %u already added\n", port->id);
1551 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
1554 if (virtio32_to_cpu(vdev, cpkt->id) >=
1555 portdev->max_nr_ports) {
1556 dev_warn(&portdev->vdev->dev,
1557 "Request for adding port with "
1558 "out-of-bound id %u, max. supported id: %u\n",
1559 cpkt->id, portdev->max_nr_ports - 1);
1562 add_port(portdev, virtio32_to_cpu(vdev, cpkt->id));
1565 unplug_port(port);
1568 if (!cpkt->value)
1570 if (is_console_port(port))
1573 init_port_console(port);
1576 * Could remove the port here in case init fails - but
1577 * have to notify the host first.
1586 if (!is_console_port(port))
1589 memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt),
1591 set_console_size(port, size.rows, size.cols);
1593 port->cons.hvc->irq_requested = 1;
1594 resize_console(port);
1598 port->host_connected = virtio16_to_cpu(vdev, cpkt->value);
1599 wake_up_interruptible(&port->waitqueue);
1601 * If the host port got closed and the host had any
1605 spin_lock_irq(&port->outvq_lock);
1606 reclaim_consumed_buffers(port);
1607 spin_unlock_irq(&port->outvq_lock);
1611 * knowing the host connection state changed.
1613 spin_lock_irq(&port->inbuf_lock);
1614 send_sigio_to_port(port);
1615 spin_unlock_irq(&port->inbuf_lock);
1622 if (port->name)
1629 name_size = buf->len - buf->offset - sizeof(*cpkt) + 1;
1631 port->name = kmalloc(name_size, GFP_KERNEL);
1632 if (!port->name) {
1633 dev_err(port->dev,
1634 "Not enough space to store port name\n");
1637 strscpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
1642 * create it only if we have a name for the port.
1644 err = sysfs_create_group(&port->dev->kobj,
1647 dev_err(port->dev,
1656 kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
1670 vq = portdev->c_ivq;
1672 spin_lock(&portdev->c_ivq_lock);
1674 spin_unlock(&portdev->c_ivq_lock);
1676 buf->len = min_t(size_t, len, buf->size);
1677 buf->offset = 0;
1679 handle_control_message(vq->vdev, portdev, buf);
1681 spin_lock(&portdev->c_ivq_lock);
1682 if (add_inbuf(portdev->c_ivq, buf) < 0) {
1683 dev_warn(&portdev->vdev->dev,
1688 spin_unlock(&portdev->c_ivq_lock);
1702 struct port *port;
1704 port = find_port_by_vq(vq->vdev->priv, vq);
1705 if (!port) {
1710 wake_up_interruptible(&port->waitqueue);
1715 struct port *port;
1718 port = find_port_by_vq(vq->vdev->priv, vq);
1719 if (!port) {
1724 spin_lock_irqsave(&port->inbuf_lock, flags);
1725 port->inbuf = get_inbuf(port);
1728 * Normally the port should not accept data when the port is
1729 * closed. For generic serial ports, the host won't (shouldn't)
1731 * can be reached when a console port is not yet connected (no
1736 * A generic serial port will discard data if not connected,
1737 * while console ports and rproc-serial ports accepts data at
1738 * any time. rproc-serial is initiated with guest_connected to
1744 if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev))
1745 discard_port_data(port);
1748 send_sigio_to_port(port);
1750 spin_unlock_irqrestore(&port->inbuf_lock, flags);
1752 wake_up_interruptible(&port->waitqueue);
1754 if (is_console_port(port) && hvc_poll(port->cons.hvc))
1762 portdev = vq->vdev->priv;
1763 schedule_work(&portdev->control_work);
1770 portdev = vdev->priv;
1773 schedule_work(&portdev->config_work);
1783 struct port *port;
1786 vdev = portdev->vdev;
1790 port = find_port_by_id(portdev, 0);
1791 set_console_size(port, rows, cols);
1798 * done per-port.
1800 resize_console(port);
1811 nr_ports = portdev->max_nr_ports;
1816 portdev->in_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *),
1818 portdev->out_vqs = kmalloc_array(nr_ports, sizeof(struct virtqueue *),
1820 if (!vqs || !vqs_info || !portdev->in_vqs || !portdev->out_vqs) {
1821 err = -ENOMEM;
1826 * For backward compat (newer host but older guest), the host
1827 * spawns a console port first and also inits the vqs for port
1839 vqs_info[j].name = "control-i";
1840 vqs_info[j + 1].name = "control-o";
1851 err = virtio_find_vqs(portdev->vdev, nr_queues, vqs, vqs_info, NULL);
1856 portdev->in_vqs[0] = vqs[0];
1857 portdev->out_vqs[0] = vqs[1];
1860 portdev->c_ivq = vqs[j];
1861 portdev->c_ovq = vqs[j + 1];
1865 portdev->in_vqs[i] = vqs[j];
1866 portdev->out_vqs[i] = vqs[j + 1];
1875 kfree(portdev->out_vqs);
1876 kfree(portdev->in_vqs);
1891 virtio_device_for_each_vq(portdev->vdev, vq) {
1899 portdev->vdev->config->del_vqs(portdev->vdev);
1900 kfree(portdev->in_vqs);
1901 kfree(portdev->out_vqs);
1907 struct port *port, *port2;
1909 portdev = vdev->priv;
1912 list_del(&portdev->list);
1918 flush_work(&portdev->control_work);
1920 flush_work(&portdev->config_work);
1926 cancel_work_sync(&portdev->control_work);
1928 cancel_work_sync(&portdev->config_work);
1930 list_for_each_entry_safe(port, port2, &portdev->ports, list)
1931 unplug_port(port);
1933 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
1937 * (device-side) queues. So there's no point in keeping the
1940 * have to just stop using the port, as the vqs are going
1951 * If the host also supports multiple console ports, we check the
1952 * config space to see how many ports the host has spawned. We
1953 * initialize each port found.
1962 if (!vdev->config->get &&
1965 dev_err(&vdev->dev, "%s failure: config access disabled\n",
1967 return -EINVAL;
1972 err = -ENOMEM;
1976 /* Attach this portdev to this virtio_device, and vice-versa. */
1977 portdev->vdev = vdev;
1978 vdev->priv = portdev;
1980 portdev->chr_major = register_chrdev(0, "virtio-portsdev",
1982 if (portdev->chr_major < 0) {
1983 dev_err(&vdev->dev,
1985 portdev->chr_major, vdev->index);
1986 err = portdev->chr_major;
1991 portdev->max_nr_ports = 1;
1997 &portdev->max_nr_ports) == 0) {
1998 if (portdev->max_nr_ports == 0 ||
1999 portdev->max_nr_ports > VIRTCONS_MAX_PORTS) {
2000 dev_err(&vdev->dev,
2002 portdev->max_nr_ports);
2003 err = -EINVAL;
2009 spin_lock_init(&portdev->ports_lock);
2010 INIT_LIST_HEAD(&portdev->ports);
2011 INIT_LIST_HEAD(&portdev->list);
2013 INIT_WORK(&portdev->config_work, &config_work_handler);
2014 INIT_WORK(&portdev->control_work, &control_work_handler);
2017 spin_lock_init(&portdev->c_ivq_lock);
2018 spin_lock_init(&portdev->c_ovq_lock);
2023 dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
2027 virtio_device_ready(portdev->vdev);
2030 err = fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
2032 dev_err(&vdev->dev,
2035 * The host might want to notify mgmt sw about device
2046 * For backward compatibility: Create a console port
2047 * if we're running on older host.
2053 list_add_tail(&portdev->list, &pdrvdata.portdevs);
2062 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
2095 struct port *port;
2097 portdev = vdev->priv;
2102 virtqueue_disable_cb(portdev->c_ivq);
2103 cancel_work_sync(&portdev->control_work);
2104 cancel_work_sync(&portdev->config_work);
2110 virtqueue_disable_cb(portdev->c_ivq);
2112 list_for_each_entry(port, &portdev->ports, list) {
2113 virtqueue_disable_cb(port->in_vq);
2114 virtqueue_disable_cb(port->out_vq);
2116 * We'll ask the host later if the new invocation has
2117 * the port opened or closed.
2119 port->host_connected = false;
2120 remove_port_data(port);
2130 struct port *port;
2133 portdev = vdev->priv;
2139 virtio_device_ready(portdev->vdev);
2142 fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
2144 list_for_each_entry(port, &portdev->ports, list) {
2145 port->in_vq = portdev->in_vqs[port->id];
2146 port->out_vq = portdev->out_vqs[port->id];
2148 fill_queue(port->in_vq, &port->inbuf_lock);
2150 /* Get port open/close status on the host */
2151 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
2154 * If a port was open at the time of suspending, we
2155 * have to let the host know that it's still open.
2157 if (port->guest_connected)
2158 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
2195 pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);