1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
4 */
5
6 #include <linux/completion.h>
7 #include <linux/interrupt.h>
8 #include <linux/list.h>
9 #include <linux/mutex.h>
10 #include <linux/slab.h>
11 #include <linux/workqueue.h>
12 #include <asm/atomic.h>
13 #include <init.h>
14 #include <irq_kern.h>
15 #include <os.h>
16 #include "port.h"
17
18 struct port_list {
19 struct list_head list;
20 atomic_t wait_count;
21 int has_connection;
22 struct completion done;
23 int port;
24 int fd;
25 spinlock_t lock;
26 struct list_head pending;
27 struct list_head connections;
28 };
29
30 struct port_dev {
31 struct port_list *port;
32 int helper_pid;
33 int telnetd_pid;
34 };
35
36 struct connection {
37 struct list_head list;
38 int fd;
39 int helper_pid;
40 int socket[2];
41 int telnetd_pid;
42 struct port_list *port;
43 };
44
pipe_interrupt(int irq,void * data)45 static irqreturn_t pipe_interrupt(int irq, void *data)
46 {
47 struct connection *conn = data;
48 int n_fds = 1, fd = -1;
49 ssize_t ret;
50
51 ret = os_rcv_fd_msg(conn->socket[0], &fd, n_fds, &conn->helper_pid,
52 sizeof(conn->helper_pid));
53 if (ret != sizeof(conn->helper_pid)) {
54 if (ret == -EAGAIN)
55 return IRQ_NONE;
56
57 printk(KERN_ERR "pipe_interrupt : os_rcv_fd_msg returned %zd\n",
58 ret);
59 os_close_file(conn->fd);
60 }
61
62 list_del(&conn->list);
63
64 conn->fd = fd;
65 list_add(&conn->list, &conn->port->connections);
66
67 complete(&conn->port->done);
68 return IRQ_HANDLED;
69 }
70
71 #define NO_WAITER_MSG \
72 "****\n" \
73 "There are currently no UML consoles waiting for port connections.\n" \
74 "Either disconnect from one to make it available or activate some more\n" \
75 "by enabling more consoles in the UML /etc/inittab.\n" \
76 "****\n"
77
port_accept(struct port_list * port)78 static int port_accept(struct port_list *port)
79 {
80 struct connection *conn;
81 int fd, socket[2], pid;
82
83 fd = port_connection(port->fd, socket, &pid);
84 if (fd < 0) {
85 if (fd != -EAGAIN)
86 printk(KERN_ERR "port_accept : port_connection "
87 "returned %d\n", -fd);
88 goto out;
89 }
90
91 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
92 if (conn == NULL) {
93 printk(KERN_ERR "port_accept : failed to allocate "
94 "connection\n");
95 goto out_close;
96 }
97 *conn = ((struct connection)
98 { .list = LIST_HEAD_INIT(conn->list),
99 .fd = fd,
100 .socket = { socket[0], socket[1] },
101 .telnetd_pid = pid,
102 .port = port });
103
104 if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt,
105 IRQF_SHARED, "telnetd", conn) < 0) {
106 printk(KERN_ERR "port_accept : failed to get IRQ for "
107 "telnetd\n");
108 goto out_free;
109 }
110
111 if (atomic_read(&port->wait_count) == 0) {
112 os_write_file(fd, NO_WAITER_MSG, sizeof(NO_WAITER_MSG));
113 printk(KERN_ERR "No one waiting for port\n");
114 }
115 list_add(&conn->list, &port->pending);
116 return 1;
117
118 out_free:
119 kfree(conn);
120 out_close:
121 os_close_file(fd);
122 os_kill_process(pid, 1);
123 out:
124 return 0;
125 }
126
127 static DEFINE_MUTEX(ports_mutex);
128 static LIST_HEAD(ports);
129
port_work_proc(struct work_struct * unused)130 static void port_work_proc(struct work_struct *unused)
131 {
132 struct port_list *port;
133 struct list_head *ele;
134 unsigned long flags;
135
136 local_irq_save(flags);
137 list_for_each(ele, &ports) {
138 port = list_entry(ele, struct port_list, list);
139 if (!port->has_connection)
140 continue;
141
142 while (port_accept(port))
143 ;
144 port->has_connection = 0;
145 }
146 local_irq_restore(flags);
147 }
148
149 static DECLARE_WORK(port_work, port_work_proc);
150
port_interrupt(int irq,void * data)151 static irqreturn_t port_interrupt(int irq, void *data)
152 {
153 struct port_list *port = data;
154
155 port->has_connection = 1;
156 schedule_work(&port_work);
157 return IRQ_HANDLED;
158 }
159
port_data(int port_num)160 void *port_data(int port_num)
161 {
162 struct list_head *ele;
163 struct port_list *port;
164 struct port_dev *dev = NULL;
165 int fd;
166
167 mutex_lock(&ports_mutex);
168 list_for_each(ele, &ports) {
169 port = list_entry(ele, struct port_list, list);
170 if (port->port == port_num)
171 goto found;
172 }
173 port = kmalloc(sizeof(struct port_list), GFP_KERNEL);
174 if (port == NULL) {
175 printk(KERN_ERR "Allocation of port list failed\n");
176 goto out;
177 }
178
179 fd = port_listen_fd(port_num);
180 if (fd < 0) {
181 printk(KERN_ERR "binding to port %d failed, errno = %d\n",
182 port_num, -fd);
183 goto out_free;
184 }
185
186 if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt,
187 IRQF_SHARED, "port", port) < 0) {
188 printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num);
189 goto out_close;
190 }
191
192 *port = ((struct port_list)
193 { .list = LIST_HEAD_INIT(port->list),
194 .wait_count = ATOMIC_INIT(0),
195 .has_connection = 0,
196 .port = port_num,
197 .fd = fd,
198 .pending = LIST_HEAD_INIT(port->pending),
199 .connections = LIST_HEAD_INIT(port->connections) });
200 spin_lock_init(&port->lock);
201 init_completion(&port->done);
202 list_add(&port->list, &ports);
203
204 found:
205 dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL);
206 if (dev == NULL) {
207 printk(KERN_ERR "Allocation of port device entry failed\n");
208 goto out;
209 }
210
211 *dev = ((struct port_dev) { .port = port,
212 .helper_pid = -1,
213 .telnetd_pid = -1 });
214 goto out;
215
216 out_close:
217 os_close_file(fd);
218 out_free:
219 kfree(port);
220 out:
221 mutex_unlock(&ports_mutex);
222 return dev;
223 }
224
port_wait(void * data)225 int port_wait(void *data)
226 {
227 struct port_dev *dev = data;
228 struct connection *conn;
229 struct port_list *port = dev->port;
230 int fd;
231
232 atomic_inc(&port->wait_count);
233 while (1) {
234 fd = -ERESTARTSYS;
235 if (wait_for_completion_interruptible(&port->done))
236 goto out;
237
238 spin_lock(&port->lock);
239
240 conn = list_entry(port->connections.next, struct connection,
241 list);
242 list_del(&conn->list);
243 spin_unlock(&port->lock);
244
245 os_shutdown_socket(conn->socket[0], 1, 1);
246 os_close_file(conn->socket[0]);
247 os_shutdown_socket(conn->socket[1], 1, 1);
248 os_close_file(conn->socket[1]);
249
250 /* This is done here because freeing an IRQ can't be done
251 * within the IRQ handler. So, pipe_interrupt always ups
252 * the semaphore regardless of whether it got a successful
253 * connection. Then we loop here throwing out failed
254 * connections until a good one is found.
255 */
256 um_free_irq(TELNETD_IRQ, conn);
257
258 if (conn->fd >= 0)
259 break;
260 os_close_file(conn->fd);
261 kfree(conn);
262 }
263
264 fd = conn->fd;
265 dev->helper_pid = conn->helper_pid;
266 dev->telnetd_pid = conn->telnetd_pid;
267 kfree(conn);
268 out:
269 atomic_dec(&port->wait_count);
270 return fd;
271 }
272
port_remove_dev(void * d)273 void port_remove_dev(void *d)
274 {
275 struct port_dev *dev = d;
276
277 if (dev->helper_pid != -1)
278 os_kill_process(dev->helper_pid, 0);
279 if (dev->telnetd_pid != -1)
280 os_kill_process(dev->telnetd_pid, 1);
281 dev->helper_pid = -1;
282 dev->telnetd_pid = -1;
283 }
284
port_kern_free(void * d)285 void port_kern_free(void *d)
286 {
287 struct port_dev *dev = d;
288
289 port_remove_dev(dev);
290 kfree(dev);
291 }
292
free_port(void)293 static void free_port(void)
294 {
295 struct list_head *ele;
296 struct port_list *port;
297
298 list_for_each(ele, &ports) {
299 port = list_entry(ele, struct port_list, list);
300 free_irq_by_fd(port->fd);
301 os_close_file(port->fd);
302 }
303 }
304
305 __uml_exitcall(free_port);
306