xref: /linux/drivers/tty/hvc/hvc_iucv.c (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 /*
2  * hvc_iucv.c - z/VM IUCV hypervisor console (HVC) device driver
3  *
4  * This HVC device driver provides terminal access using
5  * z/VM IUCV communication paths.
6  *
7  * Copyright IBM Corp. 2008, 2009
8  *
9  * Author(s):	Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
10  */
11 #define KMSG_COMPONENT		"hvc_iucv"
12 #define pr_fmt(fmt)		KMSG_COMPONENT ": " fmt
13 
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <asm/ebcdic.h>
17 #include <linux/ctype.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/mempool.h>
22 #include <linux/moduleparam.h>
23 #include <linux/tty.h>
24 #include <linux/wait.h>
25 #include <net/iucv/iucv.h>
26 
27 #include "hvc_console.h"
28 
29 
30 /* General device driver settings */
31 #define HVC_IUCV_MAGIC		0xc9e4c3e5
32 #define MAX_HVC_IUCV_LINES	HVC_ALLOC_TTY_ADAPTERS
33 #define MEMPOOL_MIN_NR		(PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
34 
35 /* IUCV TTY message  */
36 #define MSG_VERSION		0x02	/* Message version */
37 #define MSG_TYPE_ERROR		0x01	/* Error message */
38 #define MSG_TYPE_TERMENV	0x02	/* Terminal environment variable */
39 #define MSG_TYPE_TERMIOS	0x04	/* Terminal IO struct update */
40 #define MSG_TYPE_WINSIZE	0x08	/* Terminal window size update */
41 #define MSG_TYPE_DATA		0x10	/* Terminal data */
42 
43 struct iucv_tty_msg {
44 	u8	version;		/* Message version */
45 	u8	type;			/* Message type */
46 #define MSG_MAX_DATALEN		((u16)(~0))
47 	u16	datalen;		/* Payload length */
48 	u8	data[];			/* Payload buffer */
49 } __attribute__((packed));
50 #define MSG_SIZE(s)		((s) + offsetof(struct iucv_tty_msg, data))
51 
52 enum iucv_state_t {
53 	IUCV_DISCONN	= 0,
54 	IUCV_CONNECTED	= 1,
55 	IUCV_SEVERED	= 2,
56 };
57 
58 enum tty_state_t {
59 	TTY_CLOSED	= 0,
60 	TTY_OPENED	= 1,
61 };
62 
63 struct hvc_iucv_private {
64 	struct hvc_struct	*hvc;		/* HVC struct reference */
65 	u8			srv_name[8];	/* IUCV service name (ebcdic) */
66 	unsigned char		is_console;	/* Linux console usage flag */
67 	enum iucv_state_t	iucv_state;	/* IUCV connection status */
68 	enum tty_state_t	tty_state;	/* TTY status */
69 	struct iucv_path	*path;		/* IUCV path pointer */
70 	spinlock_t		lock;		/* hvc_iucv_private lock */
71 #define SNDBUF_SIZE		(PAGE_SIZE)	/* must be < MSG_MAX_DATALEN */
72 	void			*sndbuf;	/* send buffer		  */
73 	size_t			sndbuf_len;	/* length of send buffer  */
74 #define QUEUE_SNDBUF_DELAY	(HZ / 25)
75 	struct delayed_work	sndbuf_work;	/* work: send iucv msg(s) */
76 	wait_queue_head_t	sndbuf_waitq;	/* wait for send completion */
77 	struct list_head	tty_outqueue;	/* outgoing IUCV messages */
78 	struct list_head	tty_inqueue;	/* incoming IUCV messages */
79 	struct device		*dev;		/* device structure */
80 	u8			info_path[16];	/* IUCV path info (dev attr) */
81 };
82 
83 struct iucv_tty_buffer {
84 	struct list_head	list;	/* list pointer */
85 	struct iucv_message	msg;	/* store an IUCV message */
86 	size_t			offset;	/* data buffer offset */
87 	struct iucv_tty_msg	*mbuf;	/* buffer to store input/output data */
88 };
89 
90 /* IUCV callback handler */
91 static	int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]);
92 static void hvc_iucv_path_severed(struct iucv_path *, u8[16]);
93 static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
94 static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
95 
96 
97 /* Kernel module parameter: use one terminal device as default */
98 static unsigned long hvc_iucv_devices = 1;
99 
100 /* Array of allocated hvc iucv tty lines... */
101 static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
102 #define IUCV_HVC_CON_IDX	(0)
103 /* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
104 #define MAX_VMID_FILTER		(500)
105 static size_t hvc_iucv_filter_size;
106 static void *hvc_iucv_filter;
107 static const char *hvc_iucv_filter_string;
108 static DEFINE_RWLOCK(hvc_iucv_filter_lock);
109 
110 /* Kmem cache and mempool for iucv_tty_buffer elements */
111 static struct kmem_cache *hvc_iucv_buffer_cache;
112 static mempool_t *hvc_iucv_mempool;
113 
114 /* IUCV handler callback functions */
115 static struct iucv_handler hvc_iucv_handler = {
116 	.path_pending  = hvc_iucv_path_pending,
117 	.path_severed  = hvc_iucv_path_severed,
118 	.message_complete = hvc_iucv_msg_complete,
119 	.message_pending  = hvc_iucv_msg_pending,
120 };
121 
122 
123 /**
124  * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
125  * @num:	The HVC virtual terminal number (vtermno)
126  *
127  * This function returns the struct hvc_iucv_private instance that corresponds
128  * to the HVC virtual terminal number specified as parameter @num.
129  */
130 static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
131 {
132 	if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
133 		return NULL;
134 	return hvc_iucv_table[num - HVC_IUCV_MAGIC];
135 }
136 
137 /**
138  * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
139  * @size:	Size of the internal buffer used to store data.
140  * @flags:	Memory allocation flags passed to mempool.
141  *
142  * This function allocates a new struct iucv_tty_buffer element and, optionally,
143  * allocates an internal data buffer with the specified size @size.
144  * The internal data buffer is always allocated with GFP_DMA which is
145  * required for receiving and sending data with IUCV.
146  * Note: The total message size arises from the internal buffer size and the
147  *	 members of the iucv_tty_msg structure.
148  * The function returns NULL if memory allocation has failed.
149  */
150 static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
151 {
152 	struct iucv_tty_buffer *bufp;
153 
154 	bufp = mempool_alloc(hvc_iucv_mempool, flags);
155 	if (!bufp)
156 		return NULL;
157 	memset(bufp, 0, sizeof(*bufp));
158 
159 	if (size > 0) {
160 		bufp->msg.length = MSG_SIZE(size);
161 		bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
162 		if (!bufp->mbuf) {
163 			mempool_free(bufp, hvc_iucv_mempool);
164 			return NULL;
165 		}
166 		bufp->mbuf->version = MSG_VERSION;
167 		bufp->mbuf->type    = MSG_TYPE_DATA;
168 		bufp->mbuf->datalen = (u16) size;
169 	}
170 	return bufp;
171 }
172 
173 /**
174  * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
175  * @bufp:	Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
176  */
177 static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
178 {
179 	kfree(bufp->mbuf);
180 	mempool_free(bufp, hvc_iucv_mempool);
181 }
182 
183 /**
184  * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
185  * @list:	List containing struct iucv_tty_buffer elements.
186  */
187 static void destroy_tty_buffer_list(struct list_head *list)
188 {
189 	struct iucv_tty_buffer *ent, *next;
190 
191 	list_for_each_entry_safe(ent, next, list, list) {
192 		list_del(&ent->list);
193 		destroy_tty_buffer(ent);
194 	}
195 }
196 
197 /**
198  * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
199  * @priv:		Pointer to struct hvc_iucv_private
200  * @buf:		HVC buffer for writing received terminal data.
201  * @count:		HVC buffer size.
202  * @has_more_data:	Pointer to an int variable.
203  *
204  * The function picks up pending messages from the input queue and receives
205  * the message data that is then written to the specified buffer @buf.
206  * If the buffer size @count is less than the data message size, the
207  * message is kept on the input queue and @has_more_data is set to 1.
208  * If all message data has been written, the message is removed from
209  * the input queue.
210  *
211  * The function returns the number of bytes written to the terminal, zero if
212  * there are no pending data messages available or if there is no established
213  * IUCV path.
214  * If the IUCV path has been severed, then -EPIPE is returned to cause a
215  * hang up (that is issued by the HVC layer).
216  */
217 static int hvc_iucv_write(struct hvc_iucv_private *priv,
218 			  char *buf, int count, int *has_more_data)
219 {
220 	struct iucv_tty_buffer *rb;
221 	int written;
222 	int rc;
223 
224 	/* immediately return if there is no IUCV connection */
225 	if (priv->iucv_state == IUCV_DISCONN)
226 		return 0;
227 
228 	/* if the IUCV path has been severed, return -EPIPE to inform the
229 	 * HVC layer to hang up the tty device. */
230 	if (priv->iucv_state == IUCV_SEVERED)
231 		return -EPIPE;
232 
233 	/* check if there are pending messages */
234 	if (list_empty(&priv->tty_inqueue))
235 		return 0;
236 
237 	/* receive an iucv message and flip data to the tty (ldisc) */
238 	rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
239 
240 	written = 0;
241 	if (!rb->mbuf) { /* message not yet received ... */
242 		/* allocate mem to store msg data; if no memory is available
243 		 * then leave the buffer on the list and re-try later */
244 		rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
245 		if (!rb->mbuf)
246 			return -ENOMEM;
247 
248 		rc = __iucv_message_receive(priv->path, &rb->msg, 0,
249 					    rb->mbuf, rb->msg.length, NULL);
250 		switch (rc) {
251 		case 0: /* Successful	    */
252 			break;
253 		case 2:	/* No message found */
254 		case 9: /* Message purged   */
255 			break;
256 		default:
257 			written = -EIO;
258 		}
259 		/* remove buffer if an error has occurred or received data
260 		 * is not correct */
261 		if (rc || (rb->mbuf->version != MSG_VERSION) ||
262 			  (rb->msg.length    != MSG_SIZE(rb->mbuf->datalen)))
263 			goto out_remove_buffer;
264 	}
265 
266 	switch (rb->mbuf->type) {
267 	case MSG_TYPE_DATA:
268 		written = min_t(int, rb->mbuf->datalen - rb->offset, count);
269 		memcpy(buf, rb->mbuf->data + rb->offset, written);
270 		if (written < (rb->mbuf->datalen - rb->offset)) {
271 			rb->offset += written;
272 			*has_more_data = 1;
273 			goto out_written;
274 		}
275 		break;
276 
277 	case MSG_TYPE_WINSIZE:
278 		if (rb->mbuf->datalen != sizeof(struct winsize))
279 			break;
280 		/* The caller must ensure that the hvc is locked, which
281 		 * is the case when called from hvc_iucv_get_chars() */
282 		__hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
283 		break;
284 
285 	case MSG_TYPE_ERROR:	/* ignored ... */
286 	case MSG_TYPE_TERMENV:	/* ignored ... */
287 	case MSG_TYPE_TERMIOS:	/* ignored ... */
288 		break;
289 	}
290 
291 out_remove_buffer:
292 	list_del(&rb->list);
293 	destroy_tty_buffer(rb);
294 	*has_more_data = !list_empty(&priv->tty_inqueue);
295 
296 out_written:
297 	return written;
298 }
299 
300 /**
301  * hvc_iucv_get_chars() - HVC get_chars operation.
302  * @vtermno:	HVC virtual terminal number.
303  * @buf:	Pointer to a buffer to store data
304  * @count:	Size of buffer available for writing
305  *
306  * The HVC thread calls this method to read characters from the back-end.
307  * If an IUCV communication path has been established, pending IUCV messages
308  * are received and data is copied into buffer @buf up to @count bytes.
309  *
310  * Locking:	The routine gets called under an irqsave() spinlock; and
311  *		the routine locks the struct hvc_iucv_private->lock to call
312  *		helper functions.
313  */
314 static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
315 {
316 	struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
317 	int written;
318 	int has_more_data;
319 
320 	if (count <= 0)
321 		return 0;
322 
323 	if (!priv)
324 		return -ENODEV;
325 
326 	spin_lock(&priv->lock);
327 	has_more_data = 0;
328 	written = hvc_iucv_write(priv, buf, count, &has_more_data);
329 	spin_unlock(&priv->lock);
330 
331 	/* if there are still messages on the queue... schedule another run */
332 	if (has_more_data)
333 		hvc_kick();
334 
335 	return written;
336 }
337 
338 /**
339  * hvc_iucv_queue() - Buffer terminal data for sending.
340  * @priv:	Pointer to struct hvc_iucv_private instance.
341  * @buf:	Buffer containing data to send.
342  * @count:	Size of buffer and amount of data to send.
343  *
344  * The function queues data for sending. To actually send the buffered data,
345  * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
346  * The function returns the number of data bytes that has been buffered.
347  *
348  * If the device is not connected, data is ignored and the function returns
349  * @count.
350  * If the buffer is full, the function returns 0.
351  * If an existing IUCV communicaton path has been severed, -EPIPE is returned
352  * (that can be passed to HVC layer to cause a tty hangup).
353  */
354 static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
355 			  int count)
356 {
357 	size_t len;
358 
359 	if (priv->iucv_state == IUCV_DISCONN)
360 		return count;			/* ignore data */
361 
362 	if (priv->iucv_state == IUCV_SEVERED)
363 		return -EPIPE;
364 
365 	len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
366 	if (!len)
367 		return 0;
368 
369 	memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
370 	priv->sndbuf_len += len;
371 
372 	if (priv->iucv_state == IUCV_CONNECTED)
373 		schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
374 
375 	return len;
376 }
377 
378 /**
379  * hvc_iucv_send() - Send an IUCV message containing terminal data.
380  * @priv:	Pointer to struct hvc_iucv_private instance.
381  *
382  * If an IUCV communication path has been established, the buffered output data
383  * is sent via an IUCV message and the number of bytes sent is returned.
384  * Returns 0 if there is no established IUCV communication path or
385  * -EPIPE if an existing IUCV communicaton path has been severed.
386  */
387 static int hvc_iucv_send(struct hvc_iucv_private *priv)
388 {
389 	struct iucv_tty_buffer *sb;
390 	int rc, len;
391 
392 	if (priv->iucv_state == IUCV_SEVERED)
393 		return -EPIPE;
394 
395 	if (priv->iucv_state == IUCV_DISCONN)
396 		return -EIO;
397 
398 	if (!priv->sndbuf_len)
399 		return 0;
400 
401 	/* allocate internal buffer to store msg data and also compute total
402 	 * message length */
403 	sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
404 	if (!sb)
405 		return -ENOMEM;
406 
407 	memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
408 	sb->mbuf->datalen = (u16) priv->sndbuf_len;
409 	sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
410 
411 	list_add_tail(&sb->list, &priv->tty_outqueue);
412 
413 	rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
414 				 (void *) sb->mbuf, sb->msg.length);
415 	if (rc) {
416 		/* drop the message here; however we might want to handle
417 		 * 0x03 (msg limit reached) by trying again... */
418 		list_del(&sb->list);
419 		destroy_tty_buffer(sb);
420 	}
421 	len = priv->sndbuf_len;
422 	priv->sndbuf_len = 0;
423 
424 	return len;
425 }
426 
427 /**
428  * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
429  * @work:	Work structure.
430  *
431  * This work queue function sends buffered output data over IUCV and,
432  * if not all buffered data could be sent, reschedules itself.
433  */
434 static void hvc_iucv_sndbuf_work(struct work_struct *work)
435 {
436 	struct hvc_iucv_private *priv;
437 
438 	priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
439 	if (!priv)
440 		return;
441 
442 	spin_lock_bh(&priv->lock);
443 	hvc_iucv_send(priv);
444 	spin_unlock_bh(&priv->lock);
445 }
446 
447 /**
448  * hvc_iucv_put_chars() - HVC put_chars operation.
449  * @vtermno:	HVC virtual terminal number.
450  * @buf:	Pointer to an buffer to read data from
451  * @count:	Size of buffer available for reading
452  *
453  * The HVC thread calls this method to write characters to the back-end.
454  * The function calls hvc_iucv_queue() to queue terminal data for sending.
455  *
456  * Locking:	The method gets called under an irqsave() spinlock; and
457  *		locks struct hvc_iucv_private->lock.
458  */
459 static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
460 {
461 	struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
462 	int queued;
463 
464 	if (count <= 0)
465 		return 0;
466 
467 	if (!priv)
468 		return -ENODEV;
469 
470 	spin_lock(&priv->lock);
471 	queued = hvc_iucv_queue(priv, buf, count);
472 	spin_unlock(&priv->lock);
473 
474 	return queued;
475 }
476 
477 /**
478  * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
479  * @hp:	Pointer to the HVC device (struct hvc_struct)
480  * @id:	Additional data (originally passed to hvc_alloc): the index of an struct
481  *	hvc_iucv_private instance.
482  *
483  * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
484  * instance that is derived from @id. Always returns 0.
485  *
486  * Locking:	struct hvc_iucv_private->lock, spin_lock_bh
487  */
488 static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
489 {
490 	struct hvc_iucv_private *priv;
491 
492 	priv = hvc_iucv_get_private(id);
493 	if (!priv)
494 		return 0;
495 
496 	spin_lock_bh(&priv->lock);
497 	priv->tty_state = TTY_OPENED;
498 	spin_unlock_bh(&priv->lock);
499 
500 	return 0;
501 }
502 
503 /**
504  * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
505  * @priv:	Pointer to the struct hvc_iucv_private instance.
506  */
507 static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
508 {
509 	destroy_tty_buffer_list(&priv->tty_outqueue);
510 	destroy_tty_buffer_list(&priv->tty_inqueue);
511 
512 	priv->tty_state = TTY_CLOSED;
513 	priv->iucv_state = IUCV_DISCONN;
514 
515 	priv->sndbuf_len = 0;
516 }
517 
518 /**
519  * tty_outqueue_empty() - Test if the tty outq is empty
520  * @priv:	Pointer to struct hvc_iucv_private instance.
521  */
522 static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
523 {
524 	int rc;
525 
526 	spin_lock_bh(&priv->lock);
527 	rc = list_empty(&priv->tty_outqueue);
528 	spin_unlock_bh(&priv->lock);
529 
530 	return rc;
531 }
532 
533 /**
534  * flush_sndbuf_sync() - Flush send buffer and wait for completion
535  * @priv:	Pointer to struct hvc_iucv_private instance.
536  *
537  * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
538  * to flush any buffered terminal output data and waits for completion.
539  */
540 static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
541 {
542 	int sync_wait;
543 
544 	cancel_delayed_work_sync(&priv->sndbuf_work);
545 
546 	spin_lock_bh(&priv->lock);
547 	hvc_iucv_send(priv);		/* force sending buffered data */
548 	sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
549 	spin_unlock_bh(&priv->lock);
550 
551 	if (sync_wait)
552 		wait_event_timeout(priv->sndbuf_waitq,
553 				   tty_outqueue_empty(priv), HZ/10);
554 }
555 
556 /**
557  * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
558  * @priv:	Pointer to hvc_iucv_private structure
559  *
560  * This routine severs an existing IUCV communication path and hangs
561  * up the underlying HVC terminal device.
562  * The hang-up occurs only if an IUCV communication path is established;
563  * otherwise there is no need to hang up the terminal device.
564  *
565  * The IUCV HVC hang-up is separated into two steps:
566  * 1. After the IUCV path has been severed, the iucv_state is set to
567  *    IUCV_SEVERED.
568  * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
569  *    IUCV_SEVERED state causes the tty hang-up in the HVC layer.
570  *
571  * If the tty has not yet been opened, clean up the hvc_iucv_private
572  * structure to allow re-connects.
573  * If the tty has been opened, let get_chars() return -EPIPE to signal
574  * the HVC layer to hang up the tty and, if so, wake up the HVC thread
575  * to call get_chars()...
576  *
577  * Special notes on hanging up a HVC terminal instantiated as console:
578  * Hang-up:	1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
579  *		2. do_tty_hangup() calls tty->ops->close() for console_filp
580  *			=> no hangup notifier is called by HVC (default)
581  *		2. hvc_close() returns because of tty_hung_up_p(filp)
582  *			=> no delete notifier is called!
583  * Finally, the back-end is not being notified, thus, the tty session is
584  * kept active (TTY_OPEN) to be ready for re-connects.
585  *
586  * Locking:	spin_lock(&priv->lock) w/o disabling bh
587  */
588 static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
589 {
590 	struct iucv_path *path;
591 
592 	path = NULL;
593 	spin_lock(&priv->lock);
594 	if (priv->iucv_state == IUCV_CONNECTED) {
595 		path = priv->path;
596 		priv->path = NULL;
597 		priv->iucv_state = IUCV_SEVERED;
598 		if (priv->tty_state == TTY_CLOSED)
599 			hvc_iucv_cleanup(priv);
600 		else
601 			/* console is special (see above) */
602 			if (priv->is_console) {
603 				hvc_iucv_cleanup(priv);
604 				priv->tty_state = TTY_OPENED;
605 			} else
606 				hvc_kick();
607 	}
608 	spin_unlock(&priv->lock);
609 
610 	/* finally sever path (outside of priv->lock due to lock ordering) */
611 	if (path) {
612 		iucv_path_sever(path, NULL);
613 		iucv_path_free(path);
614 	}
615 }
616 
617 /**
618  * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
619  * @hp:		Pointer to the HVC device (struct hvc_struct)
620  * @id:		Additional data (originally passed to hvc_alloc):
621  *		the index of an struct hvc_iucv_private instance.
622  *
623  * This routine notifies the HVC back-end that a tty hangup (carrier loss,
624  * virtual or otherwise) has occurred.
625  * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
626  * to keep an existing IUCV communication path established.
627  * (Background: vhangup() is called from user space (by getty or login) to
628  *		disable writing to the tty by other applications).
629  * If the tty has been opened and an established IUCV path has been severed
630  * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
631  *
632  * Locking:	struct hvc_iucv_private->lock
633  */
634 static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
635 {
636 	struct hvc_iucv_private *priv;
637 
638 	priv = hvc_iucv_get_private(id);
639 	if (!priv)
640 		return;
641 
642 	flush_sndbuf_sync(priv);
643 
644 	spin_lock_bh(&priv->lock);
645 	/* NOTE: If the hangup was scheduled by ourself (from the iucv
646 	 *	 path_servered callback [IUCV_SEVERED]), we have to clean up
647 	 *	 our structure and to set state to TTY_CLOSED.
648 	 *	 If the tty was hung up otherwise (e.g. vhangup()), then we
649 	 *	 ignore this hangup and keep an established IUCV path open...
650 	 *	 (...the reason is that we are not able to connect back to the
651 	 *	 client if we disconnect on hang up) */
652 	priv->tty_state = TTY_CLOSED;
653 
654 	if (priv->iucv_state == IUCV_SEVERED)
655 		hvc_iucv_cleanup(priv);
656 	spin_unlock_bh(&priv->lock);
657 }
658 
659 /**
660  * hvc_iucv_dtr_rts() - HVC notifier for handling DTR/RTS
661  * @hp:		Pointer the HVC device (struct hvc_struct)
662  * @raise:	Non-zero to raise or zero to lower DTR/RTS lines
663  *
664  * This routine notifies the HVC back-end to raise or lower DTR/RTS
665  * lines.  Raising DTR/RTS is ignored.  Lowering DTR/RTS indicates to
666  * drop the IUCV connection (similar to hang up the modem).
667  */
668 static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise)
669 {
670 	struct hvc_iucv_private *priv;
671 	struct iucv_path        *path;
672 
673 	/* Raising the DTR/RTS is ignored as IUCV connections can be
674 	 * established at any times.
675 	 */
676 	if (raise)
677 		return;
678 
679 	priv = hvc_iucv_get_private(hp->vtermno);
680 	if (!priv)
681 		return;
682 
683 	/* Lowering the DTR/RTS lines disconnects an established IUCV
684 	 * connection.
685 	 */
686 	flush_sndbuf_sync(priv);
687 
688 	spin_lock_bh(&priv->lock);
689 	path = priv->path;		/* save reference to IUCV path */
690 	priv->path = NULL;
691 	priv->iucv_state = IUCV_DISCONN;
692 	spin_unlock_bh(&priv->lock);
693 
694 	/* Sever IUCV path outside of priv->lock due to lock ordering of:
695 	 * priv->lock <--> iucv_table_lock */
696 	if (path) {
697 		iucv_path_sever(path, NULL);
698 		iucv_path_free(path);
699 	}
700 }
701 
702 /**
703  * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
704  * @hp:		Pointer to the HVC device (struct hvc_struct)
705  * @id:		Additional data (originally passed to hvc_alloc):
706  *		the index of an struct hvc_iucv_private instance.
707  *
708  * This routine notifies the HVC back-end that the last tty device fd has been
709  * closed.  The function cleans up tty resources.  The clean-up of the IUCV
710  * connection is done in hvc_iucv_dtr_rts() and depends on the HUPCL termios
711  * control setting.
712  *
713  * Locking:	struct hvc_iucv_private->lock
714  */
715 static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
716 {
717 	struct hvc_iucv_private *priv;
718 
719 	priv = hvc_iucv_get_private(id);
720 	if (!priv)
721 		return;
722 
723 	flush_sndbuf_sync(priv);
724 
725 	spin_lock_bh(&priv->lock);
726 	destroy_tty_buffer_list(&priv->tty_outqueue);
727 	destroy_tty_buffer_list(&priv->tty_inqueue);
728 	priv->tty_state = TTY_CLOSED;
729 	priv->sndbuf_len = 0;
730 	spin_unlock_bh(&priv->lock);
731 }
732 
733 /**
734  * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
735  * @ipvmid:	Originating z/VM user ID (right padded with blanks)
736  *
737  * Returns 0 if the z/VM user ID @ipvmid is allowed to connection, otherwise
738  * non-zero.
739  */
740 static int hvc_iucv_filter_connreq(u8 ipvmid[8])
741 {
742 	size_t i;
743 
744 	/* Note: default policy is ACCEPT if no filter is set */
745 	if (!hvc_iucv_filter_size)
746 		return 0;
747 
748 	for (i = 0; i < hvc_iucv_filter_size; i++)
749 		if (0 == memcmp(ipvmid, hvc_iucv_filter + (8 * i), 8))
750 			return 0;
751 	return 1;
752 }
753 
754 /**
755  * hvc_iucv_path_pending() - IUCV handler to process a connection request.
756  * @path:	Pending path (struct iucv_path)
757  * @ipvmid:	z/VM system identifier of originator
758  * @ipuser:	User specified data for this path
759  *		(AF_IUCV: port/service name and originator port)
760  *
761  * The function uses the @ipuser data to determine if the pending path belongs
762  * to a terminal managed by this device driver.
763  * If the path belongs to this driver, ensure that the terminal is not accessed
764  * multiple times (only one connection to a terminal is allowed).
765  * If the terminal is not yet connected, the pending path is accepted and is
766  * associated to the appropriate struct hvc_iucv_private instance.
767  *
768  * Returns 0 if @path belongs to a terminal managed by the this device driver;
769  * otherwise returns -ENODEV in order to dispatch this path to other handlers.
770  *
771  * Locking:	struct hvc_iucv_private->lock
772  */
773 static	int hvc_iucv_path_pending(struct iucv_path *path,
774 				  u8 ipvmid[8], u8 ipuser[16])
775 {
776 	struct hvc_iucv_private *priv, *tmp;
777 	u8 wildcard[9] = "lnxhvc  ";
778 	int i, rc, find_unused;
779 	u8 nuser_data[16];
780 	u8 vm_user_id[9];
781 
782 	ASCEBC(wildcard, sizeof(wildcard));
783 	find_unused = !memcmp(wildcard, ipuser, 8);
784 
785 	/* First, check if the pending path request is managed by this
786 	 * IUCV handler:
787 	 * - find a disconnected device if ipuser contains the wildcard
788 	 * - find the device that matches the terminal ID in ipuser
789 	 */
790 	priv = NULL;
791 	for (i = 0; i < hvc_iucv_devices; i++) {
792 		tmp = hvc_iucv_table[i];
793 		if (!tmp)
794 			continue;
795 
796 		if (find_unused) {
797 			spin_lock(&tmp->lock);
798 			if (tmp->iucv_state == IUCV_DISCONN)
799 				priv = tmp;
800 			spin_unlock(&tmp->lock);
801 
802 		} else if (!memcmp(tmp->srv_name, ipuser, 8))
803 				priv = tmp;
804 		if (priv)
805 			break;
806 	}
807 	if (!priv)
808 		return -ENODEV;
809 
810 	/* Enforce that ipvmid is allowed to connect to us */
811 	read_lock(&hvc_iucv_filter_lock);
812 	rc = hvc_iucv_filter_connreq(ipvmid);
813 	read_unlock(&hvc_iucv_filter_lock);
814 	if (rc) {
815 		iucv_path_sever(path, ipuser);
816 		iucv_path_free(path);
817 		memcpy(vm_user_id, ipvmid, 8);
818 		vm_user_id[8] = 0;
819 		pr_info("A connection request from z/VM user ID %s "
820 			"was refused\n", vm_user_id);
821 		return 0;
822 	}
823 
824 	spin_lock(&priv->lock);
825 
826 	/* If the terminal is already connected or being severed, then sever
827 	 * this path to enforce that there is only ONE established communication
828 	 * path per terminal. */
829 	if (priv->iucv_state != IUCV_DISCONN) {
830 		iucv_path_sever(path, ipuser);
831 		iucv_path_free(path);
832 		goto out_path_handled;
833 	}
834 
835 	/* accept path */
836 	memcpy(nuser_data, ipuser + 8, 8);  /* remote service (for af_iucv) */
837 	memcpy(nuser_data + 8, ipuser, 8);  /* local service  (for af_iucv) */
838 	path->msglim = 0xffff;		    /* IUCV MSGLIMIT */
839 	path->flags &= ~IUCV_IPRMDATA;	    /* TODO: use IUCV_IPRMDATA */
840 	rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
841 	if (rc) {
842 		iucv_path_sever(path, ipuser);
843 		iucv_path_free(path);
844 		goto out_path_handled;
845 	}
846 	priv->path = path;
847 	priv->iucv_state = IUCV_CONNECTED;
848 
849 	/* store path information */
850 	memcpy(priv->info_path, ipvmid, 8);
851 	memcpy(priv->info_path + 8, ipuser + 8, 8);
852 
853 	/* flush buffered output data... */
854 	schedule_delayed_work(&priv->sndbuf_work, 5);
855 
856 out_path_handled:
857 	spin_unlock(&priv->lock);
858 	return 0;
859 }
860 
861 /**
862  * hvc_iucv_path_severed() - IUCV handler to process a path sever.
863  * @path:	Pending path (struct iucv_path)
864  * @ipuser:	User specified data for this path
865  *		(AF_IUCV: port/service name and originator port)
866  *
867  * This function calls the hvc_iucv_hangup() function for the
868  * respective IUCV HVC terminal.
869  *
870  * Locking:	struct hvc_iucv_private->lock
871  */
872 static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
873 {
874 	struct hvc_iucv_private *priv = path->private;
875 
876 	hvc_iucv_hangup(priv);
877 }
878 
879 /**
880  * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
881  * @path:	Pending path (struct iucv_path)
882  * @msg:	Pointer to the IUCV message
883  *
884  * The function puts an incoming message on the input queue for later
885  * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
886  * If the tty has not yet been opened, the message is rejected.
887  *
888  * Locking:	struct hvc_iucv_private->lock
889  */
890 static void hvc_iucv_msg_pending(struct iucv_path *path,
891 				 struct iucv_message *msg)
892 {
893 	struct hvc_iucv_private *priv = path->private;
894 	struct iucv_tty_buffer *rb;
895 
896 	/* reject messages that exceed max size of iucv_tty_msg->datalen */
897 	if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
898 		iucv_message_reject(path, msg);
899 		return;
900 	}
901 
902 	spin_lock(&priv->lock);
903 
904 	/* reject messages if tty has not yet been opened */
905 	if (priv->tty_state == TTY_CLOSED) {
906 		iucv_message_reject(path, msg);
907 		goto unlock_return;
908 	}
909 
910 	/* allocate tty buffer to save iucv msg only */
911 	rb = alloc_tty_buffer(0, GFP_ATOMIC);
912 	if (!rb) {
913 		iucv_message_reject(path, msg);
914 		goto unlock_return;	/* -ENOMEM */
915 	}
916 	rb->msg = *msg;
917 
918 	list_add_tail(&rb->list, &priv->tty_inqueue);
919 
920 	hvc_kick();	/* wake up hvc thread */
921 
922 unlock_return:
923 	spin_unlock(&priv->lock);
924 }
925 
926 /**
927  * hvc_iucv_msg_complete() - IUCV handler to process message completion
928  * @path:	Pending path (struct iucv_path)
929  * @msg:	Pointer to the IUCV message
930  *
931  * The function is called upon completion of message delivery to remove the
932  * message from the outqueue. Additional delivery information can be found
933  * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
934  *	       purged messages	 (0x010000 (IPADPGNR)).
935  *
936  * Locking:	struct hvc_iucv_private->lock
937  */
938 static void hvc_iucv_msg_complete(struct iucv_path *path,
939 				  struct iucv_message *msg)
940 {
941 	struct hvc_iucv_private *priv = path->private;
942 	struct iucv_tty_buffer	*ent, *next;
943 	LIST_HEAD(list_remove);
944 
945 	spin_lock(&priv->lock);
946 	list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
947 		if (ent->msg.id == msg->id) {
948 			list_move(&ent->list, &list_remove);
949 			break;
950 		}
951 	wake_up(&priv->sndbuf_waitq);
952 	spin_unlock(&priv->lock);
953 	destroy_tty_buffer_list(&list_remove);
954 }
955 
956 /**
957  * hvc_iucv_pm_freeze() - Freeze PM callback
958  * @dev:	IUVC HVC terminal device
959  *
960  * Sever an established IUCV communication path and
961  * trigger a hang-up of the underlying HVC terminal.
962  */
963 static int hvc_iucv_pm_freeze(struct device *dev)
964 {
965 	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
966 
967 	local_bh_disable();
968 	hvc_iucv_hangup(priv);
969 	local_bh_enable();
970 
971 	return 0;
972 }
973 
974 /**
975  * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback
976  * @dev:	IUVC HVC terminal device
977  *
978  * Wake up the HVC thread to trigger hang-up and respective
979  * HVC back-end notifier invocations.
980  */
981 static int hvc_iucv_pm_restore_thaw(struct device *dev)
982 {
983 	hvc_kick();
984 	return 0;
985 }
986 
987 static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
988 					struct device_attribute *attr,
989 					char *buf)
990 {
991 	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
992 	size_t len;
993 
994 	len = sizeof(priv->srv_name);
995 	memcpy(buf, priv->srv_name, len);
996 	EBCASC(buf, len);
997 	buf[len++] = '\n';
998 	return len;
999 }
1000 
1001 static ssize_t hvc_iucv_dev_state_show(struct device *dev,
1002 					struct device_attribute *attr,
1003 					char *buf)
1004 {
1005 	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
1006 	return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
1007 }
1008 
1009 static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
1010 				      struct device_attribute *attr,
1011 				      char *buf)
1012 {
1013 	struct hvc_iucv_private *priv = dev_get_drvdata(dev);
1014 	char vmid[9], ipuser[9];
1015 
1016 	memset(vmid, 0, sizeof(vmid));
1017 	memset(ipuser, 0, sizeof(ipuser));
1018 
1019 	spin_lock_bh(&priv->lock);
1020 	if (priv->iucv_state == IUCV_CONNECTED) {
1021 		memcpy(vmid, priv->info_path, 8);
1022 		memcpy(ipuser, priv->info_path + 8, 8);
1023 	}
1024 	spin_unlock_bh(&priv->lock);
1025 	EBCASC(ipuser, 8);
1026 
1027 	return sprintf(buf, "%s:%s\n", vmid, ipuser);
1028 }
1029 
1030 
1031 /* HVC operations */
1032 static const struct hv_ops hvc_iucv_ops = {
1033 	.get_chars = hvc_iucv_get_chars,
1034 	.put_chars = hvc_iucv_put_chars,
1035 	.notifier_add = hvc_iucv_notifier_add,
1036 	.notifier_del = hvc_iucv_notifier_del,
1037 	.notifier_hangup = hvc_iucv_notifier_hangup,
1038 	.dtr_rts = hvc_iucv_dtr_rts,
1039 };
1040 
1041 /* Suspend / resume device operations */
1042 static const struct dev_pm_ops hvc_iucv_pm_ops = {
1043 	.freeze	  = hvc_iucv_pm_freeze,
1044 	.thaw	  = hvc_iucv_pm_restore_thaw,
1045 	.restore  = hvc_iucv_pm_restore_thaw,
1046 };
1047 
1048 /* IUCV HVC device driver */
1049 static struct device_driver hvc_iucv_driver = {
1050 	.name = KMSG_COMPONENT,
1051 	.bus  = &iucv_bus,
1052 	.pm   = &hvc_iucv_pm_ops,
1053 };
1054 
1055 /* IUCV HVC device attributes */
1056 static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
1057 static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
1058 static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
1059 static struct attribute *hvc_iucv_dev_attrs[] = {
1060 	&dev_attr_termid.attr,
1061 	&dev_attr_state.attr,
1062 	&dev_attr_peer.attr,
1063 	NULL,
1064 };
1065 static struct attribute_group hvc_iucv_dev_attr_group = {
1066 	.attrs = hvc_iucv_dev_attrs,
1067 };
1068 static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
1069 	&hvc_iucv_dev_attr_group,
1070 	NULL,
1071 };
1072 
1073 
1074 /**
1075  * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
1076  * @id:			hvc_iucv_table index
1077  * @is_console:		Flag if the instance is used as Linux console
1078  *
1079  * This function allocates a new hvc_iucv_private structure and stores
1080  * the instance in hvc_iucv_table at index @id.
1081  * Returns 0 on success; otherwise non-zero.
1082  */
1083 static int __init hvc_iucv_alloc(int id, unsigned int is_console)
1084 {
1085 	struct hvc_iucv_private *priv;
1086 	char name[9];
1087 	int rc;
1088 
1089 	priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
1090 	if (!priv)
1091 		return -ENOMEM;
1092 
1093 	spin_lock_init(&priv->lock);
1094 	INIT_LIST_HEAD(&priv->tty_outqueue);
1095 	INIT_LIST_HEAD(&priv->tty_inqueue);
1096 	INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
1097 	init_waitqueue_head(&priv->sndbuf_waitq);
1098 
1099 	priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
1100 	if (!priv->sndbuf) {
1101 		kfree(priv);
1102 		return -ENOMEM;
1103 	}
1104 
1105 	/* set console flag */
1106 	priv->is_console = is_console;
1107 
1108 	/* allocate hvc device */
1109 	priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /*		  PAGE_SIZE */
1110 			      HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
1111 	if (IS_ERR(priv->hvc)) {
1112 		rc = PTR_ERR(priv->hvc);
1113 		goto out_error_hvc;
1114 	}
1115 
1116 	/* notify HVC thread instead of using polling */
1117 	priv->hvc->irq_requested = 1;
1118 
1119 	/* setup iucv related information */
1120 	snprintf(name, 9, "lnxhvc%-2d", id);
1121 	memcpy(priv->srv_name, name, 8);
1122 	ASCEBC(priv->srv_name, 8);
1123 
1124 	/* create and setup device */
1125 	priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
1126 	if (!priv->dev) {
1127 		rc = -ENOMEM;
1128 		goto out_error_dev;
1129 	}
1130 	dev_set_name(priv->dev, "hvc_iucv%d", id);
1131 	dev_set_drvdata(priv->dev, priv);
1132 	priv->dev->bus = &iucv_bus;
1133 	priv->dev->parent = iucv_root;
1134 	priv->dev->driver = &hvc_iucv_driver;
1135 	priv->dev->groups = hvc_iucv_dev_attr_groups;
1136 	priv->dev->release = (void (*)(struct device *)) kfree;
1137 	rc = device_register(priv->dev);
1138 	if (rc) {
1139 		put_device(priv->dev);
1140 		goto out_error_dev;
1141 	}
1142 
1143 	hvc_iucv_table[id] = priv;
1144 	return 0;
1145 
1146 out_error_dev:
1147 	hvc_remove(priv->hvc);
1148 out_error_hvc:
1149 	free_page((unsigned long) priv->sndbuf);
1150 	kfree(priv);
1151 
1152 	return rc;
1153 }
1154 
1155 /**
1156  * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
1157  */
1158 static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1159 {
1160 	hvc_remove(priv->hvc);
1161 	device_unregister(priv->dev);
1162 	free_page((unsigned long) priv->sndbuf);
1163 	kfree(priv);
1164 }
1165 
1166 /**
1167  * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
1168  * @filter:	String containing a comma-separated list of z/VM user IDs
1169  */
1170 static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1171 {
1172 	const char *nextdelim, *residual;
1173 	size_t len;
1174 
1175 	nextdelim = strchr(filter, ',');
1176 	if (nextdelim) {
1177 		len = nextdelim - filter;
1178 		residual = nextdelim + 1;
1179 	} else {
1180 		len = strlen(filter);
1181 		residual = filter + len;
1182 	}
1183 
1184 	if (len == 0)
1185 		return ERR_PTR(-EINVAL);
1186 
1187 	/* check for '\n' (if called from sysfs) */
1188 	if (filter[len - 1] == '\n')
1189 		len--;
1190 
1191 	if (len > 8)
1192 		return ERR_PTR(-EINVAL);
1193 
1194 	/* pad with blanks and save upper case version of user ID */
1195 	memset(dest, ' ', 8);
1196 	while (len--)
1197 		dest[len] = toupper(filter[len]);
1198 	return residual;
1199 }
1200 
1201 /**
1202  * hvc_iucv_setup_filter() - Set up z/VM user ID filter
1203  * @filter:	String consisting of a comma-separated list of z/VM user IDs
1204  *
1205  * The function parses the @filter string and creates an array containing
1206  * the list of z/VM user ID filter entries.
1207  * Return code 0 means success, -EINVAL if the filter is syntactically
1208  * incorrect, -ENOMEM if there was not enough memory to allocate the
1209  * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
1210  */
1211 static int hvc_iucv_setup_filter(const char *val)
1212 {
1213 	const char *residual;
1214 	int err;
1215 	size_t size, count;
1216 	void *array, *old_filter;
1217 
1218 	count = strlen(val);
1219 	if (count == 0 || (count == 1 && val[0] == '\n')) {
1220 		size  = 0;
1221 		array = NULL;
1222 		goto out_replace_filter;	/* clear filter */
1223 	}
1224 
1225 	/* count user IDs in order to allocate sufficient memory */
1226 	size = 1;
1227 	residual = val;
1228 	while ((residual = strchr(residual, ',')) != NULL) {
1229 		residual++;
1230 		size++;
1231 	}
1232 
1233 	/* check if the specified list exceeds the filter limit */
1234 	if (size > MAX_VMID_FILTER)
1235 		return -ENOSPC;
1236 
1237 	array = kzalloc(size * 8, GFP_KERNEL);
1238 	if (!array)
1239 		return -ENOMEM;
1240 
1241 	count = size;
1242 	residual = val;
1243 	while (*residual && count) {
1244 		residual = hvc_iucv_parse_filter(residual,
1245 						 array + ((size - count) * 8));
1246 		if (IS_ERR(residual)) {
1247 			err = PTR_ERR(residual);
1248 			kfree(array);
1249 			goto out_err;
1250 		}
1251 		count--;
1252 	}
1253 
1254 out_replace_filter:
1255 	write_lock_bh(&hvc_iucv_filter_lock);
1256 	old_filter = hvc_iucv_filter;
1257 	hvc_iucv_filter_size = size;
1258 	hvc_iucv_filter = array;
1259 	write_unlock_bh(&hvc_iucv_filter_lock);
1260 	kfree(old_filter);
1261 
1262 	err = 0;
1263 out_err:
1264 	return err;
1265 }
1266 
1267 /**
1268  * param_set_vmidfilter() - Set z/VM user ID filter parameter
1269  * @val:	String consisting of a comma-separated list of z/VM user IDs
1270  * @kp:		Kernel parameter pointing to hvc_iucv_filter array
1271  *
1272  * The function sets up the z/VM user ID filter specified as comma-separated
1273  * list of user IDs in @val.
1274  * Note: If it is called early in the boot process, @val is stored and
1275  *	 parsed later in hvc_iucv_init().
1276  */
1277 static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
1278 {
1279 	int rc;
1280 
1281 	if (!MACHINE_IS_VM || !hvc_iucv_devices)
1282 		return -ENODEV;
1283 
1284 	if (!val)
1285 		return -EINVAL;
1286 
1287 	rc = 0;
1288 	if (slab_is_available())
1289 		rc = hvc_iucv_setup_filter(val);
1290 	else
1291 		hvc_iucv_filter_string = val;	/* defer... */
1292 	return rc;
1293 }
1294 
1295 /**
1296  * param_get_vmidfilter() - Get z/VM user ID filter
1297  * @buffer:	Buffer to store z/VM user ID filter,
1298  *		(buffer size assumption PAGE_SIZE)
1299  * @kp:		Kernel parameter pointing to the hvc_iucv_filter array
1300  *
1301  * The function stores the filter as a comma-separated list of z/VM user IDs
1302  * in @buffer. Typically, sysfs routines call this function for attr show.
1303  */
1304 static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
1305 {
1306 	int rc;
1307 	size_t index, len;
1308 	void *start, *end;
1309 
1310 	if (!MACHINE_IS_VM || !hvc_iucv_devices)
1311 		return -ENODEV;
1312 
1313 	rc = 0;
1314 	read_lock_bh(&hvc_iucv_filter_lock);
1315 	for (index = 0; index < hvc_iucv_filter_size; index++) {
1316 		start = hvc_iucv_filter + (8 * index);
1317 		end   = memchr(start, ' ', 8);
1318 		len   = (end) ? end - start : 8;
1319 		memcpy(buffer + rc, start, len);
1320 		rc += len;
1321 		buffer[rc++] = ',';
1322 	}
1323 	read_unlock_bh(&hvc_iucv_filter_lock);
1324 	if (rc)
1325 		buffer[--rc] = '\0';	/* replace last comma and update rc */
1326 	return rc;
1327 }
1328 
1329 #define param_check_vmidfilter(name, p) __param_check(name, p, void)
1330 
1331 static struct kernel_param_ops param_ops_vmidfilter = {
1332 	.set = param_set_vmidfilter,
1333 	.get = param_get_vmidfilter,
1334 };
1335 
1336 /**
1337  * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
1338  */
1339 static int __init hvc_iucv_init(void)
1340 {
1341 	int rc;
1342 	unsigned int i;
1343 
1344 	if (!hvc_iucv_devices)
1345 		return -ENODEV;
1346 
1347 	if (!MACHINE_IS_VM) {
1348 		pr_notice("The z/VM IUCV HVC device driver cannot "
1349 			   "be used without z/VM\n");
1350 		rc = -ENODEV;
1351 		goto out_error;
1352 	}
1353 
1354 	if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1355 		pr_err("%lu is not a valid value for the hvc_iucv= "
1356 			"kernel parameter\n", hvc_iucv_devices);
1357 		rc = -EINVAL;
1358 		goto out_error;
1359 	}
1360 
1361 	/* register IUCV HVC device driver */
1362 	rc = driver_register(&hvc_iucv_driver);
1363 	if (rc)
1364 		goto out_error;
1365 
1366 	/* parse hvc_iucv_allow string and create z/VM user ID filter list */
1367 	if (hvc_iucv_filter_string) {
1368 		rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1369 		switch (rc) {
1370 		case 0:
1371 			break;
1372 		case -ENOMEM:
1373 			pr_err("Allocating memory failed with "
1374 				"reason code=%d\n", 3);
1375 			goto out_error;
1376 		case -EINVAL:
1377 			pr_err("hvc_iucv_allow= does not specify a valid "
1378 				"z/VM user ID list\n");
1379 			goto out_error;
1380 		case -ENOSPC:
1381 			pr_err("hvc_iucv_allow= specifies too many "
1382 				"z/VM user IDs\n");
1383 			goto out_error;
1384 		default:
1385 			goto out_error;
1386 		}
1387 	}
1388 
1389 	hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1390 					   sizeof(struct iucv_tty_buffer),
1391 					   0, 0, NULL);
1392 	if (!hvc_iucv_buffer_cache) {
1393 		pr_err("Allocating memory failed with reason code=%d\n", 1);
1394 		rc = -ENOMEM;
1395 		goto out_error;
1396 	}
1397 
1398 	hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1399 						    hvc_iucv_buffer_cache);
1400 	if (!hvc_iucv_mempool) {
1401 		pr_err("Allocating memory failed with reason code=%d\n", 2);
1402 		kmem_cache_destroy(hvc_iucv_buffer_cache);
1403 		rc = -ENOMEM;
1404 		goto out_error;
1405 	}
1406 
1407 	/* register the first terminal device as console
1408 	 * (must be done before allocating hvc terminal devices) */
1409 	rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1410 	if (rc) {
1411 		pr_err("Registering HVC terminal device as "
1412 		       "Linux console failed\n");
1413 		goto out_error_memory;
1414 	}
1415 
1416 	/* allocate hvc_iucv_private structs */
1417 	for (i = 0; i < hvc_iucv_devices; i++) {
1418 		rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1419 		if (rc) {
1420 			pr_err("Creating a new HVC terminal device "
1421 				"failed with error code=%d\n", rc);
1422 			goto out_error_hvc;
1423 		}
1424 	}
1425 
1426 	/* register IUCV callback handler */
1427 	rc = iucv_register(&hvc_iucv_handler, 0);
1428 	if (rc) {
1429 		pr_err("Registering IUCV handlers failed with error code=%d\n",
1430 			rc);
1431 		goto out_error_hvc;
1432 	}
1433 
1434 	return 0;
1435 
1436 out_error_hvc:
1437 	for (i = 0; i < hvc_iucv_devices; i++)
1438 		if (hvc_iucv_table[i])
1439 			hvc_iucv_destroy(hvc_iucv_table[i]);
1440 out_error_memory:
1441 	mempool_destroy(hvc_iucv_mempool);
1442 	kmem_cache_destroy(hvc_iucv_buffer_cache);
1443 out_error:
1444 	kfree(hvc_iucv_filter);
1445 	hvc_iucv_devices = 0; /* ensure that we do not provide any device */
1446 	return rc;
1447 }
1448 
1449 /**
1450  * hvc_iucv_config() - Parsing of hvc_iucv=  kernel command line parameter
1451  * @val:	Parameter value (numeric)
1452  */
1453 static	int __init hvc_iucv_config(char *val)
1454 {
1455 	 return kstrtoul(val, 10, &hvc_iucv_devices);
1456 }
1457 
1458 
1459 device_initcall(hvc_iucv_init);
1460 __setup("hvc_iucv=", hvc_iucv_config);
1461 core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
1462