xref: /linux/arch/um/drivers/port_kern.c (revision 811f35ff59b6f99ae272d6f5b96bc9e974f88196)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
4  */
5 
6 #include <linux/completion.h>
7 #include <linux/interrupt.h>
8 #include <linux/list.h>
9 #include <linux/mutex.h>
10 #include <linux/slab.h>
11 #include <linux/workqueue.h>
12 #include <asm/atomic.h>
13 #include <init.h>
14 #include <irq_kern.h>
15 #include <os.h>
16 #include "port.h"
17 
18 struct port_list {
19 	struct list_head list;
20 	atomic_t wait_count;
21 	int has_connection;
22 	struct completion done;
23 	int port;
24 	int fd;
25 	spinlock_t lock;
26 	struct list_head pending;
27 	struct list_head connections;
28 };
29 
30 struct port_dev {
31 	struct port_list *port;
32 	int helper_pid;
33 	int telnetd_pid;
34 };
35 
36 struct connection {
37 	struct list_head list;
38 	int fd;
39 	int helper_pid;
40 	int socket[2];
41 	int telnetd_pid;
42 	struct port_list *port;
43 };
44 
45 static irqreturn_t pipe_interrupt(int irq, void *data)
46 {
47 	struct connection *conn = data;
48 	int fd;
49 
50 	fd = os_rcv_fd(conn->socket[0], &conn->helper_pid);
51 	if (fd < 0) {
52 		if (fd == -EAGAIN)
53 			return IRQ_NONE;
54 
55 		printk(KERN_ERR "pipe_interrupt : os_rcv_fd returned %d\n",
56 		       -fd);
57 		os_close_file(conn->fd);
58 	}
59 
60 	list_del(&conn->list);
61 
62 	conn->fd = fd;
63 	list_add(&conn->list, &conn->port->connections);
64 
65 	complete(&conn->port->done);
66 	return IRQ_HANDLED;
67 }
68 
69 #define NO_WAITER_MSG \
70     "****\n" \
71     "There are currently no UML consoles waiting for port connections.\n" \
72     "Either disconnect from one to make it available or activate some more\n" \
73     "by enabling more consoles in the UML /etc/inittab.\n" \
74     "****\n"
75 
76 static int port_accept(struct port_list *port)
77 {
78 	struct connection *conn;
79 	int fd, socket[2], pid;
80 
81 	fd = port_connection(port->fd, socket, &pid);
82 	if (fd < 0) {
83 		if (fd != -EAGAIN)
84 			printk(KERN_ERR "port_accept : port_connection "
85 			       "returned %d\n", -fd);
86 		goto out;
87 	}
88 
89 	conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
90 	if (conn == NULL) {
91 		printk(KERN_ERR "port_accept : failed to allocate "
92 		       "connection\n");
93 		goto out_close;
94 	}
95 	*conn = ((struct connection)
96 		{ .list 	= LIST_HEAD_INIT(conn->list),
97 		  .fd 		= fd,
98 		  .socket  	= { socket[0], socket[1] },
99 		  .telnetd_pid 	= pid,
100 		  .port 	= port });
101 
102 	if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt,
103 			  IRQF_SHARED, "telnetd", conn) < 0) {
104 		printk(KERN_ERR "port_accept : failed to get IRQ for "
105 		       "telnetd\n");
106 		goto out_free;
107 	}
108 
109 	if (atomic_read(&port->wait_count) == 0) {
110 		os_write_file(fd, NO_WAITER_MSG, sizeof(NO_WAITER_MSG));
111 		printk(KERN_ERR "No one waiting for port\n");
112 	}
113 	list_add(&conn->list, &port->pending);
114 	return 1;
115 
116  out_free:
117 	kfree(conn);
118  out_close:
119 	os_close_file(fd);
120 	os_kill_process(pid, 1);
121  out:
122 	return 0;
123 }
124 
125 static DEFINE_MUTEX(ports_mutex);
126 static LIST_HEAD(ports);
127 
128 static void port_work_proc(struct work_struct *unused)
129 {
130 	struct port_list *port;
131 	struct list_head *ele;
132 	unsigned long flags;
133 
134 	local_irq_save(flags);
135 	list_for_each(ele, &ports) {
136 		port = list_entry(ele, struct port_list, list);
137 		if (!port->has_connection)
138 			continue;
139 
140 		while (port_accept(port))
141 			;
142 		port->has_connection = 0;
143 	}
144 	local_irq_restore(flags);
145 }
146 
147 DECLARE_WORK(port_work, port_work_proc);
148 
149 static irqreturn_t port_interrupt(int irq, void *data)
150 {
151 	struct port_list *port = data;
152 
153 	port->has_connection = 1;
154 	schedule_work(&port_work);
155 	return IRQ_HANDLED;
156 }
157 
158 void *port_data(int port_num)
159 {
160 	struct list_head *ele;
161 	struct port_list *port;
162 	struct port_dev *dev = NULL;
163 	int fd;
164 
165 	mutex_lock(&ports_mutex);
166 	list_for_each(ele, &ports) {
167 		port = list_entry(ele, struct port_list, list);
168 		if (port->port == port_num)
169 			goto found;
170 	}
171 	port = kmalloc(sizeof(struct port_list), GFP_KERNEL);
172 	if (port == NULL) {
173 		printk(KERN_ERR "Allocation of port list failed\n");
174 		goto out;
175 	}
176 
177 	fd = port_listen_fd(port_num);
178 	if (fd < 0) {
179 		printk(KERN_ERR "binding to port %d failed, errno = %d\n",
180 		       port_num, -fd);
181 		goto out_free;
182 	}
183 
184 	if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt,
185 			  IRQF_SHARED, "port", port) < 0) {
186 		printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num);
187 		goto out_close;
188 	}
189 
190 	*port = ((struct port_list)
191 		{ .list 	 	= LIST_HEAD_INIT(port->list),
192 		  .wait_count		= ATOMIC_INIT(0),
193 		  .has_connection 	= 0,
194 		  .port 	 	= port_num,
195 		  .fd  			= fd,
196 		  .pending 		= LIST_HEAD_INIT(port->pending),
197 		  .connections 		= LIST_HEAD_INIT(port->connections) });
198 	spin_lock_init(&port->lock);
199 	init_completion(&port->done);
200 	list_add(&port->list, &ports);
201 
202  found:
203 	dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL);
204 	if (dev == NULL) {
205 		printk(KERN_ERR "Allocation of port device entry failed\n");
206 		goto out;
207 	}
208 
209 	*dev = ((struct port_dev) { .port  		= port,
210 				    .helper_pid  	= -1,
211 				    .telnetd_pid  	= -1 });
212 	goto out;
213 
214  out_close:
215 	os_close_file(fd);
216  out_free:
217 	kfree(port);
218  out:
219 	mutex_unlock(&ports_mutex);
220 	return dev;
221 }
222 
223 int port_wait(void *data)
224 {
225 	struct port_dev *dev = data;
226 	struct connection *conn;
227 	struct port_list *port = dev->port;
228 	int fd;
229 
230 	atomic_inc(&port->wait_count);
231 	while (1) {
232 		fd = -ERESTARTSYS;
233 		if (wait_for_completion_interruptible(&port->done))
234 			goto out;
235 
236 		spin_lock(&port->lock);
237 
238 		conn = list_entry(port->connections.next, struct connection,
239 				  list);
240 		list_del(&conn->list);
241 		spin_unlock(&port->lock);
242 
243 		os_shutdown_socket(conn->socket[0], 1, 1);
244 		os_close_file(conn->socket[0]);
245 		os_shutdown_socket(conn->socket[1], 1, 1);
246 		os_close_file(conn->socket[1]);
247 
248 		/* This is done here because freeing an IRQ can't be done
249 		 * within the IRQ handler.  So, pipe_interrupt always ups
250 		 * the semaphore regardless of whether it got a successful
251 		 * connection.  Then we loop here throwing out failed
252 		 * connections until a good one is found.
253 		 */
254 		um_free_irq(TELNETD_IRQ, conn);
255 
256 		if (conn->fd >= 0)
257 			break;
258 		os_close_file(conn->fd);
259 		kfree(conn);
260 	}
261 
262 	fd = conn->fd;
263 	dev->helper_pid = conn->helper_pid;
264 	dev->telnetd_pid = conn->telnetd_pid;
265 	kfree(conn);
266  out:
267 	atomic_dec(&port->wait_count);
268 	return fd;
269 }
270 
271 void port_remove_dev(void *d)
272 {
273 	struct port_dev *dev = d;
274 
275 	if (dev->helper_pid != -1)
276 		os_kill_process(dev->helper_pid, 0);
277 	if (dev->telnetd_pid != -1)
278 		os_kill_process(dev->telnetd_pid, 1);
279 	dev->helper_pid = -1;
280 	dev->telnetd_pid = -1;
281 }
282 
283 void port_kern_free(void *d)
284 {
285 	struct port_dev *dev = d;
286 
287 	port_remove_dev(dev);
288 	kfree(dev);
289 }
290 
291 static void free_port(void)
292 {
293 	struct list_head *ele;
294 	struct port_list *port;
295 
296 	list_for_each(ele, &ports) {
297 		port = list_entry(ele, struct port_list, list);
298 		free_irq_by_fd(port->fd);
299 		os_close_file(port->fd);
300 	}
301 }
302 
303 __uml_exitcall(free_port);
304