xref: /linux/drivers/s390/net/netiucv.c (revision 96ac6d435100450f0565708d9b885ea2a7400e0a)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * IUCV network driver
4  *
5  * Copyright IBM Corp. 2001, 2009
6  *
7  * Author(s):
8  *	Original netiucv driver:
9  *		Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
10  *	Sysfs integration and all bugs therein:
11  *		Cornelia Huck (cornelia.huck@de.ibm.com)
12  *	PM functions:
13  *		Ursula Braun (ursula.braun@de.ibm.com)
14  *
15  * Documentation used:
16  *  the source of the original IUCV driver by:
17  *    Stefan Hegewald <hegewald@de.ibm.com>
18  *    Hartmut Penner <hpenner@de.ibm.com>
19  *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
20  *    Martin Schwidefsky (schwidefsky@de.ibm.com)
21  *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
22  */
23 
24 #define KMSG_COMPONENT "netiucv"
25 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
26 
27 #undef DEBUG
28 
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/errno.h>
34 #include <linux/types.h>
35 #include <linux/interrupt.h>
36 #include <linux/timer.h>
37 #include <linux/bitops.h>
38 
39 #include <linux/signal.h>
40 #include <linux/string.h>
41 #include <linux/device.h>
42 
43 #include <linux/ip.h>
44 #include <linux/if_arp.h>
45 #include <linux/tcp.h>
46 #include <linux/skbuff.h>
47 #include <linux/ctype.h>
48 #include <net/dst.h>
49 
50 #include <asm/io.h>
51 #include <linux/uaccess.h>
52 #include <asm/ebcdic.h>
53 
54 #include <net/iucv/iucv.h>
55 #include "fsm.h"
56 
57 MODULE_AUTHOR
58     ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
59 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
60 
61 /**
62  * Debug Facility stuff
63  */
64 #define IUCV_DBF_SETUP_NAME "iucv_setup"
65 #define IUCV_DBF_SETUP_LEN 64
66 #define IUCV_DBF_SETUP_PAGES 2
67 #define IUCV_DBF_SETUP_NR_AREAS 1
68 #define IUCV_DBF_SETUP_LEVEL 3
69 
70 #define IUCV_DBF_DATA_NAME "iucv_data"
71 #define IUCV_DBF_DATA_LEN 128
72 #define IUCV_DBF_DATA_PAGES 2
73 #define IUCV_DBF_DATA_NR_AREAS 1
74 #define IUCV_DBF_DATA_LEVEL 2
75 
76 #define IUCV_DBF_TRACE_NAME "iucv_trace"
77 #define IUCV_DBF_TRACE_LEN 16
78 #define IUCV_DBF_TRACE_PAGES 4
79 #define IUCV_DBF_TRACE_NR_AREAS 1
80 #define IUCV_DBF_TRACE_LEVEL 3
81 
82 #define IUCV_DBF_TEXT(name,level,text) \
83 	do { \
84 		debug_text_event(iucv_dbf_##name,level,text); \
85 	} while (0)
86 
87 #define IUCV_DBF_HEX(name,level,addr,len) \
88 	do { \
89 		debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
90 	} while (0)
91 
92 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
93 
94 #define IUCV_DBF_TEXT_(name, level, text...) \
95 	do { \
96 		if (debug_level_enabled(iucv_dbf_##name, level)) { \
97 			char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
98 			sprintf(__buf, text); \
99 			debug_text_event(iucv_dbf_##name, level, __buf); \
100 			put_cpu_var(iucv_dbf_txt_buf); \
101 		} \
102 	} while (0)
103 
104 #define IUCV_DBF_SPRINTF(name,level,text...) \
105 	do { \
106 		debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
107 		debug_sprintf_event(iucv_dbf_trace, level, text ); \
108 	} while (0)
109 
110 /**
111  * some more debug stuff
112  */
113 #define PRINTK_HEADER " iucv: "       /* for debugging */
114 
115 /* dummy device to make sure netiucv_pm functions are called */
116 static struct device *netiucv_dev;
117 
118 static int netiucv_pm_prepare(struct device *);
119 static void netiucv_pm_complete(struct device *);
120 static int netiucv_pm_freeze(struct device *);
121 static int netiucv_pm_restore_thaw(struct device *);
122 
123 static const struct dev_pm_ops netiucv_pm_ops = {
124 	.prepare = netiucv_pm_prepare,
125 	.complete = netiucv_pm_complete,
126 	.freeze = netiucv_pm_freeze,
127 	.thaw = netiucv_pm_restore_thaw,
128 	.restore = netiucv_pm_restore_thaw,
129 };
130 
131 static struct device_driver netiucv_driver = {
132 	.owner = THIS_MODULE,
133 	.name = "netiucv",
134 	.bus  = &iucv_bus,
135 	.pm = &netiucv_pm_ops,
136 };
137 
138 static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
139 static void netiucv_callback_connack(struct iucv_path *, u8 *);
140 static void netiucv_callback_connrej(struct iucv_path *, u8 *);
141 static void netiucv_callback_connsusp(struct iucv_path *, u8 *);
142 static void netiucv_callback_connres(struct iucv_path *, u8 *);
143 static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
144 static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
145 
146 static struct iucv_handler netiucv_handler = {
147 	.path_pending	  = netiucv_callback_connreq,
148 	.path_complete	  = netiucv_callback_connack,
149 	.path_severed	  = netiucv_callback_connrej,
150 	.path_quiesced	  = netiucv_callback_connsusp,
151 	.path_resumed	  = netiucv_callback_connres,
152 	.message_pending  = netiucv_callback_rx,
153 	.message_complete = netiucv_callback_txdone
154 };
155 
156 /**
157  * Per connection profiling data
158  */
159 struct connection_profile {
160 	unsigned long maxmulti;
161 	unsigned long maxcqueue;
162 	unsigned long doios_single;
163 	unsigned long doios_multi;
164 	unsigned long txlen;
165 	unsigned long tx_time;
166 	unsigned long send_stamp;
167 	unsigned long tx_pending;
168 	unsigned long tx_max_pending;
169 };
170 
171 /**
172  * Representation of one iucv connection
173  */
174 struct iucv_connection {
175 	struct list_head	  list;
176 	struct iucv_path	  *path;
177 	struct sk_buff            *rx_buff;
178 	struct sk_buff            *tx_buff;
179 	struct sk_buff_head       collect_queue;
180 	struct sk_buff_head	  commit_queue;
181 	spinlock_t                collect_lock;
182 	int                       collect_len;
183 	int                       max_buffsize;
184 	fsm_timer                 timer;
185 	fsm_instance              *fsm;
186 	struct net_device         *netdev;
187 	struct connection_profile prof;
188 	char                      userid[9];
189 	char			  userdata[17];
190 };
191 
192 /**
193  * Linked list of all connection structs.
194  */
195 static LIST_HEAD(iucv_connection_list);
196 static DEFINE_RWLOCK(iucv_connection_rwlock);
197 
198 /**
199  * Representation of event-data for the
200  * connection state machine.
201  */
202 struct iucv_event {
203 	struct iucv_connection *conn;
204 	void                   *data;
205 };
206 
207 /**
208  * Private part of the network device structure
209  */
210 struct netiucv_priv {
211 	struct net_device_stats stats;
212 	unsigned long           tbusy;
213 	fsm_instance            *fsm;
214         struct iucv_connection  *conn;
215 	struct device           *dev;
216 	int			 pm_state;
217 };
218 
219 /**
220  * Link level header for a packet.
221  */
222 struct ll_header {
223 	u16 next;
224 };
225 
226 #define NETIUCV_HDRLEN		 (sizeof(struct ll_header))
227 #define NETIUCV_BUFSIZE_MAX	 65537
228 #define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
229 #define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
230 #define NETIUCV_MTU_DEFAULT      9216
231 #define NETIUCV_QUEUELEN_DEFAULT 50
232 #define NETIUCV_TIMEOUT_5SEC     5000
233 
234 /**
235  * Compatibility macros for busy handling
236  * of network devices.
237  */
238 static void netiucv_clear_busy(struct net_device *dev)
239 {
240 	struct netiucv_priv *priv = netdev_priv(dev);
241 	clear_bit(0, &priv->tbusy);
242 	netif_wake_queue(dev);
243 }
244 
245 static int netiucv_test_and_set_busy(struct net_device *dev)
246 {
247 	struct netiucv_priv *priv = netdev_priv(dev);
248 	netif_stop_queue(dev);
249 	return test_and_set_bit(0, &priv->tbusy);
250 }
251 
252 static u8 iucvMagic_ascii[16] = {
253 	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
254 	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
255 };
256 
257 static u8 iucvMagic_ebcdic[16] = {
258 	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
259 	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
260 };
261 
262 /**
263  * Convert an iucv userId to its printable
264  * form (strip whitespace at end).
265  *
266  * @param An iucv userId
267  *
268  * @returns The printable string (static data!!)
269  */
270 static char *netiucv_printname(char *name, int len)
271 {
272 	static char tmp[17];
273 	char *p = tmp;
274 	memcpy(tmp, name, len);
275 	tmp[len] = '\0';
276 	while (*p && ((p - tmp) < len) && (!isspace(*p)))
277 		p++;
278 	*p = '\0';
279 	return tmp;
280 }
281 
282 static char *netiucv_printuser(struct iucv_connection *conn)
283 {
284 	static char tmp_uid[9];
285 	static char tmp_udat[17];
286 	static char buf[100];
287 
288 	if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
289 		tmp_uid[8] = '\0';
290 		tmp_udat[16] = '\0';
291 		memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8);
292 		memcpy(tmp_udat, conn->userdata, 16);
293 		EBCASC(tmp_udat, 16);
294 		memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
295 		sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
296 		return buf;
297 	} else
298 		return netiucv_printname(conn->userid, 8);
299 }
300 
301 /**
302  * States of the interface statemachine.
303  */
304 enum dev_states {
305 	DEV_STATE_STOPPED,
306 	DEV_STATE_STARTWAIT,
307 	DEV_STATE_STOPWAIT,
308 	DEV_STATE_RUNNING,
309 	/**
310 	 * MUST be always the last element!!
311 	 */
312 	NR_DEV_STATES
313 };
314 
315 static const char *dev_state_names[] = {
316 	"Stopped",
317 	"StartWait",
318 	"StopWait",
319 	"Running",
320 };
321 
322 /**
323  * Events of the interface statemachine.
324  */
325 enum dev_events {
326 	DEV_EVENT_START,
327 	DEV_EVENT_STOP,
328 	DEV_EVENT_CONUP,
329 	DEV_EVENT_CONDOWN,
330 	/**
331 	 * MUST be always the last element!!
332 	 */
333 	NR_DEV_EVENTS
334 };
335 
336 static const char *dev_event_names[] = {
337 	"Start",
338 	"Stop",
339 	"Connection up",
340 	"Connection down",
341 };
342 
343 /**
344  * Events of the connection statemachine
345  */
346 enum conn_events {
347 	/**
348 	 * Events, representing callbacks from
349 	 * lowlevel iucv layer)
350 	 */
351 	CONN_EVENT_CONN_REQ,
352 	CONN_EVENT_CONN_ACK,
353 	CONN_EVENT_CONN_REJ,
354 	CONN_EVENT_CONN_SUS,
355 	CONN_EVENT_CONN_RES,
356 	CONN_EVENT_RX,
357 	CONN_EVENT_TXDONE,
358 
359 	/**
360 	 * Events, representing errors return codes from
361 	 * calls to lowlevel iucv layer
362 	 */
363 
364 	/**
365 	 * Event, representing timer expiry.
366 	 */
367 	CONN_EVENT_TIMER,
368 
369 	/**
370 	 * Events, representing commands from upper levels.
371 	 */
372 	CONN_EVENT_START,
373 	CONN_EVENT_STOP,
374 
375 	/**
376 	 * MUST be always the last element!!
377 	 */
378 	NR_CONN_EVENTS,
379 };
380 
381 static const char *conn_event_names[] = {
382 	"Remote connection request",
383 	"Remote connection acknowledge",
384 	"Remote connection reject",
385 	"Connection suspended",
386 	"Connection resumed",
387 	"Data received",
388 	"Data sent",
389 
390 	"Timer",
391 
392 	"Start",
393 	"Stop",
394 };
395 
396 /**
397  * States of the connection statemachine.
398  */
399 enum conn_states {
400 	/**
401 	 * Connection not assigned to any device,
402 	 * initial state, invalid
403 	 */
404 	CONN_STATE_INVALID,
405 
406 	/**
407 	 * Userid assigned but not operating
408 	 */
409 	CONN_STATE_STOPPED,
410 
411 	/**
412 	 * Connection registered,
413 	 * no connection request sent yet,
414 	 * no connection request received
415 	 */
416 	CONN_STATE_STARTWAIT,
417 
418 	/**
419 	 * Connection registered and connection request sent,
420 	 * no acknowledge and no connection request received yet.
421 	 */
422 	CONN_STATE_SETUPWAIT,
423 
424 	/**
425 	 * Connection up and running idle
426 	 */
427 	CONN_STATE_IDLE,
428 
429 	/**
430 	 * Data sent, awaiting CONN_EVENT_TXDONE
431 	 */
432 	CONN_STATE_TX,
433 
434 	/**
435 	 * Error during registration.
436 	 */
437 	CONN_STATE_REGERR,
438 
439 	/**
440 	 * Error during registration.
441 	 */
442 	CONN_STATE_CONNERR,
443 
444 	/**
445 	 * MUST be always the last element!!
446 	 */
447 	NR_CONN_STATES,
448 };
449 
450 static const char *conn_state_names[] = {
451 	"Invalid",
452 	"Stopped",
453 	"StartWait",
454 	"SetupWait",
455 	"Idle",
456 	"TX",
457 	"Terminating",
458 	"Registration error",
459 	"Connect error",
460 };
461 
462 
463 /**
464  * Debug Facility Stuff
465  */
466 static debug_info_t *iucv_dbf_setup = NULL;
467 static debug_info_t *iucv_dbf_data = NULL;
468 static debug_info_t *iucv_dbf_trace = NULL;
469 
470 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
471 
472 static void iucv_unregister_dbf_views(void)
473 {
474 	debug_unregister(iucv_dbf_setup);
475 	debug_unregister(iucv_dbf_data);
476 	debug_unregister(iucv_dbf_trace);
477 }
478 static int iucv_register_dbf_views(void)
479 {
480 	iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
481 					IUCV_DBF_SETUP_PAGES,
482 					IUCV_DBF_SETUP_NR_AREAS,
483 					IUCV_DBF_SETUP_LEN);
484 	iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
485 				       IUCV_DBF_DATA_PAGES,
486 				       IUCV_DBF_DATA_NR_AREAS,
487 				       IUCV_DBF_DATA_LEN);
488 	iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
489 					IUCV_DBF_TRACE_PAGES,
490 					IUCV_DBF_TRACE_NR_AREAS,
491 					IUCV_DBF_TRACE_LEN);
492 
493 	if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
494 	    (iucv_dbf_trace == NULL)) {
495 		iucv_unregister_dbf_views();
496 		return -ENOMEM;
497 	}
498 	debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
499 	debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
500 
501 	debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
502 	debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
503 
504 	debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
505 	debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
506 
507 	return 0;
508 }
509 
510 /*
511  * Callback-wrappers, called from lowlevel iucv layer.
512  */
513 
514 static void netiucv_callback_rx(struct iucv_path *path,
515 				struct iucv_message *msg)
516 {
517 	struct iucv_connection *conn = path->private;
518 	struct iucv_event ev;
519 
520 	ev.conn = conn;
521 	ev.data = msg;
522 	fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
523 }
524 
525 static void netiucv_callback_txdone(struct iucv_path *path,
526 				    struct iucv_message *msg)
527 {
528 	struct iucv_connection *conn = path->private;
529 	struct iucv_event ev;
530 
531 	ev.conn = conn;
532 	ev.data = msg;
533 	fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
534 }
535 
536 static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
537 {
538 	struct iucv_connection *conn = path->private;
539 
540 	fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
541 }
542 
543 static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid,
544 				    u8 *ipuser)
545 {
546 	struct iucv_connection *conn = path->private;
547 	struct iucv_event ev;
548 	static char tmp_user[9];
549 	static char tmp_udat[17];
550 	int rc;
551 
552 	rc = -EINVAL;
553 	memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
554 	memcpy(tmp_udat, ipuser, 16);
555 	EBCASC(tmp_udat, 16);
556 	read_lock_bh(&iucv_connection_rwlock);
557 	list_for_each_entry(conn, &iucv_connection_list, list) {
558 		if (strncmp(ipvmid, conn->userid, 8) ||
559 		    strncmp(ipuser, conn->userdata, 16))
560 			continue;
561 		/* Found a matching connection for this path. */
562 		conn->path = path;
563 		ev.conn = conn;
564 		ev.data = path;
565 		fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
566 		rc = 0;
567 	}
568 	IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
569 		       tmp_user, netiucv_printname(tmp_udat, 16));
570 	read_unlock_bh(&iucv_connection_rwlock);
571 	return rc;
572 }
573 
574 static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser)
575 {
576 	struct iucv_connection *conn = path->private;
577 
578 	fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
579 }
580 
581 static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser)
582 {
583 	struct iucv_connection *conn = path->private;
584 
585 	fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
586 }
587 
588 static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser)
589 {
590 	struct iucv_connection *conn = path->private;
591 
592 	fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
593 }
594 
595 /**
596  * NOP action for statemachines
597  */
598 static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
599 {
600 }
601 
602 /*
603  * Actions of the connection statemachine
604  */
605 
606 /**
607  * netiucv_unpack_skb
608  * @conn: The connection where this skb has been received.
609  * @pskb: The received skb.
610  *
611  * Unpack a just received skb and hand it over to upper layers.
612  * Helper function for conn_action_rx.
613  */
614 static void netiucv_unpack_skb(struct iucv_connection *conn,
615 			       struct sk_buff *pskb)
616 {
617 	struct net_device     *dev = conn->netdev;
618 	struct netiucv_priv   *privptr = netdev_priv(dev);
619 	u16 offset = 0;
620 
621 	skb_put(pskb, NETIUCV_HDRLEN);
622 	pskb->dev = dev;
623 	pskb->ip_summed = CHECKSUM_NONE;
624 	pskb->protocol = cpu_to_be16(ETH_P_IP);
625 
626 	while (1) {
627 		struct sk_buff *skb;
628 		struct ll_header *header = (struct ll_header *) pskb->data;
629 
630 		if (!header->next)
631 			break;
632 
633 		skb_pull(pskb, NETIUCV_HDRLEN);
634 		header->next -= offset;
635 		offset += header->next;
636 		header->next -= NETIUCV_HDRLEN;
637 		if (skb_tailroom(pskb) < header->next) {
638 			IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
639 				header->next, skb_tailroom(pskb));
640 			return;
641 		}
642 		skb_put(pskb, header->next);
643 		skb_reset_mac_header(pskb);
644 		skb = dev_alloc_skb(pskb->len);
645 		if (!skb) {
646 			IUCV_DBF_TEXT(data, 2,
647 				"Out of memory in netiucv_unpack_skb\n");
648 			privptr->stats.rx_dropped++;
649 			return;
650 		}
651 		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
652 					  pskb->len);
653 		skb_reset_mac_header(skb);
654 		skb->dev = pskb->dev;
655 		skb->protocol = pskb->protocol;
656 		pskb->ip_summed = CHECKSUM_UNNECESSARY;
657 		privptr->stats.rx_packets++;
658 		privptr->stats.rx_bytes += skb->len;
659 		/*
660 		 * Since receiving is always initiated from a tasklet (in iucv.c),
661 		 * we must use netif_rx_ni() instead of netif_rx()
662 		 */
663 		netif_rx_ni(skb);
664 		skb_pull(pskb, header->next);
665 		skb_put(pskb, NETIUCV_HDRLEN);
666 	}
667 }
668 
669 static void conn_action_rx(fsm_instance *fi, int event, void *arg)
670 {
671 	struct iucv_event *ev = arg;
672 	struct iucv_connection *conn = ev->conn;
673 	struct iucv_message *msg = ev->data;
674 	struct netiucv_priv *privptr = netdev_priv(conn->netdev);
675 	int rc;
676 
677 	IUCV_DBF_TEXT(trace, 4, __func__);
678 
679 	if (!conn->netdev) {
680 		iucv_message_reject(conn->path, msg);
681 		IUCV_DBF_TEXT(data, 2,
682 			      "Received data for unlinked connection\n");
683 		return;
684 	}
685 	if (msg->length > conn->max_buffsize) {
686 		iucv_message_reject(conn->path, msg);
687 		privptr->stats.rx_dropped++;
688 		IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
689 			       msg->length, conn->max_buffsize);
690 		return;
691 	}
692 	conn->rx_buff->data = conn->rx_buff->head;
693 	skb_reset_tail_pointer(conn->rx_buff);
694 	conn->rx_buff->len = 0;
695 	rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
696 				  msg->length, NULL);
697 	if (rc || msg->length < 5) {
698 		privptr->stats.rx_errors++;
699 		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
700 		return;
701 	}
702 	netiucv_unpack_skb(conn, conn->rx_buff);
703 }
704 
705 static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
706 {
707 	struct iucv_event *ev = arg;
708 	struct iucv_connection *conn = ev->conn;
709 	struct iucv_message *msg = ev->data;
710 	struct iucv_message txmsg;
711 	struct netiucv_priv *privptr = NULL;
712 	u32 single_flag = msg->tag;
713 	u32 txbytes = 0;
714 	u32 txpackets = 0;
715 	u32 stat_maxcq = 0;
716 	struct sk_buff *skb;
717 	unsigned long saveflags;
718 	struct ll_header header;
719 	int rc;
720 
721 	IUCV_DBF_TEXT(trace, 4, __func__);
722 
723 	if (!conn || !conn->netdev) {
724 		IUCV_DBF_TEXT(data, 2,
725 			      "Send confirmation for unlinked connection\n");
726 		return;
727 	}
728 	privptr = netdev_priv(conn->netdev);
729 	conn->prof.tx_pending--;
730 	if (single_flag) {
731 		if ((skb = skb_dequeue(&conn->commit_queue))) {
732 			refcount_dec(&skb->users);
733 			if (privptr) {
734 				privptr->stats.tx_packets++;
735 				privptr->stats.tx_bytes +=
736 					(skb->len - NETIUCV_HDRLEN
737 						  - NETIUCV_HDRLEN);
738 			}
739 			dev_kfree_skb_any(skb);
740 		}
741 	}
742 	conn->tx_buff->data = conn->tx_buff->head;
743 	skb_reset_tail_pointer(conn->tx_buff);
744 	conn->tx_buff->len = 0;
745 	spin_lock_irqsave(&conn->collect_lock, saveflags);
746 	while ((skb = skb_dequeue(&conn->collect_queue))) {
747 		header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
748 		skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
749 		skb_copy_from_linear_data(skb,
750 					  skb_put(conn->tx_buff, skb->len),
751 					  skb->len);
752 		txbytes += skb->len;
753 		txpackets++;
754 		stat_maxcq++;
755 		refcount_dec(&skb->users);
756 		dev_kfree_skb_any(skb);
757 	}
758 	if (conn->collect_len > conn->prof.maxmulti)
759 		conn->prof.maxmulti = conn->collect_len;
760 	conn->collect_len = 0;
761 	spin_unlock_irqrestore(&conn->collect_lock, saveflags);
762 	if (conn->tx_buff->len == 0) {
763 		fsm_newstate(fi, CONN_STATE_IDLE);
764 		return;
765 	}
766 
767 	header.next = 0;
768 	skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
769 	conn->prof.send_stamp = jiffies;
770 	txmsg.class = 0;
771 	txmsg.tag = 0;
772 	rc = iucv_message_send(conn->path, &txmsg, 0, 0,
773 			       conn->tx_buff->data, conn->tx_buff->len);
774 	conn->prof.doios_multi++;
775 	conn->prof.txlen += conn->tx_buff->len;
776 	conn->prof.tx_pending++;
777 	if (conn->prof.tx_pending > conn->prof.tx_max_pending)
778 		conn->prof.tx_max_pending = conn->prof.tx_pending;
779 	if (rc) {
780 		conn->prof.tx_pending--;
781 		fsm_newstate(fi, CONN_STATE_IDLE);
782 		if (privptr)
783 			privptr->stats.tx_errors += txpackets;
784 		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
785 	} else {
786 		if (privptr) {
787 			privptr->stats.tx_packets += txpackets;
788 			privptr->stats.tx_bytes += txbytes;
789 		}
790 		if (stat_maxcq > conn->prof.maxcqueue)
791 			conn->prof.maxcqueue = stat_maxcq;
792 	}
793 }
794 
795 static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
796 {
797 	struct iucv_event *ev = arg;
798 	struct iucv_connection *conn = ev->conn;
799 	struct iucv_path *path = ev->data;
800 	struct net_device *netdev = conn->netdev;
801 	struct netiucv_priv *privptr = netdev_priv(netdev);
802 	int rc;
803 
804 	IUCV_DBF_TEXT(trace, 3, __func__);
805 
806 	conn->path = path;
807 	path->msglim = NETIUCV_QUEUELEN_DEFAULT;
808 	path->flags = 0;
809 	rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
810 	if (rc) {
811 		IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
812 		return;
813 	}
814 	fsm_newstate(fi, CONN_STATE_IDLE);
815 	netdev->tx_queue_len = conn->path->msglim;
816 	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
817 }
818 
819 static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
820 {
821 	struct iucv_event *ev = arg;
822 	struct iucv_path *path = ev->data;
823 
824 	IUCV_DBF_TEXT(trace, 3, __func__);
825 	iucv_path_sever(path, NULL);
826 }
827 
828 static void conn_action_connack(fsm_instance *fi, int event, void *arg)
829 {
830 	struct iucv_connection *conn = arg;
831 	struct net_device *netdev = conn->netdev;
832 	struct netiucv_priv *privptr = netdev_priv(netdev);
833 
834 	IUCV_DBF_TEXT(trace, 3, __func__);
835 	fsm_deltimer(&conn->timer);
836 	fsm_newstate(fi, CONN_STATE_IDLE);
837 	netdev->tx_queue_len = conn->path->msglim;
838 	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
839 }
840 
841 static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
842 {
843 	struct iucv_connection *conn = arg;
844 
845 	IUCV_DBF_TEXT(trace, 3, __func__);
846 	fsm_deltimer(&conn->timer);
847 	iucv_path_sever(conn->path, conn->userdata);
848 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
849 }
850 
851 static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
852 {
853 	struct iucv_connection *conn = arg;
854 	struct net_device *netdev = conn->netdev;
855 	struct netiucv_priv *privptr = netdev_priv(netdev);
856 
857 	IUCV_DBF_TEXT(trace, 3, __func__);
858 
859 	fsm_deltimer(&conn->timer);
860 	iucv_path_sever(conn->path, conn->userdata);
861 	dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
862 			       "connection\n", netiucv_printuser(conn));
863 	IUCV_DBF_TEXT(data, 2,
864 		      "conn_action_connsever: Remote dropped connection\n");
865 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
866 	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
867 }
868 
869 static void conn_action_start(fsm_instance *fi, int event, void *arg)
870 {
871 	struct iucv_connection *conn = arg;
872 	struct net_device *netdev = conn->netdev;
873 	struct netiucv_priv *privptr = netdev_priv(netdev);
874 	int rc;
875 
876 	IUCV_DBF_TEXT(trace, 3, __func__);
877 
878 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
879 
880 	/*
881 	 * We must set the state before calling iucv_connect because the
882 	 * callback handler could be called at any point after the connection
883 	 * request is sent
884 	 */
885 
886 	fsm_newstate(fi, CONN_STATE_SETUPWAIT);
887 	conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
888 	IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
889 		netdev->name, netiucv_printuser(conn));
890 
891 	rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
892 			       NULL, conn->userdata, conn);
893 	switch (rc) {
894 	case 0:
895 		netdev->tx_queue_len = conn->path->msglim;
896 		fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
897 			     CONN_EVENT_TIMER, conn);
898 		return;
899 	case 11:
900 		dev_warn(privptr->dev,
901 			"The IUCV device failed to connect to z/VM guest %s\n",
902 			netiucv_printname(conn->userid, 8));
903 		fsm_newstate(fi, CONN_STATE_STARTWAIT);
904 		break;
905 	case 12:
906 		dev_warn(privptr->dev,
907 			"The IUCV device failed to connect to the peer on z/VM"
908 			" guest %s\n", netiucv_printname(conn->userid, 8));
909 		fsm_newstate(fi, CONN_STATE_STARTWAIT);
910 		break;
911 	case 13:
912 		dev_err(privptr->dev,
913 			"Connecting the IUCV device would exceed the maximum"
914 			" number of IUCV connections\n");
915 		fsm_newstate(fi, CONN_STATE_CONNERR);
916 		break;
917 	case 14:
918 		dev_err(privptr->dev,
919 			"z/VM guest %s has too many IUCV connections"
920 			" to connect with the IUCV device\n",
921 			netiucv_printname(conn->userid, 8));
922 		fsm_newstate(fi, CONN_STATE_CONNERR);
923 		break;
924 	case 15:
925 		dev_err(privptr->dev,
926 			"The IUCV device cannot connect to a z/VM guest with no"
927 			" IUCV authorization\n");
928 		fsm_newstate(fi, CONN_STATE_CONNERR);
929 		break;
930 	default:
931 		dev_err(privptr->dev,
932 			"Connecting the IUCV device failed with error %d\n",
933 			rc);
934 		fsm_newstate(fi, CONN_STATE_CONNERR);
935 		break;
936 	}
937 	IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
938 	kfree(conn->path);
939 	conn->path = NULL;
940 }
941 
942 static void netiucv_purge_skb_queue(struct sk_buff_head *q)
943 {
944 	struct sk_buff *skb;
945 
946 	while ((skb = skb_dequeue(q))) {
947 		refcount_dec(&skb->users);
948 		dev_kfree_skb_any(skb);
949 	}
950 }
951 
952 static void conn_action_stop(fsm_instance *fi, int event, void *arg)
953 {
954 	struct iucv_event *ev = arg;
955 	struct iucv_connection *conn = ev->conn;
956 	struct net_device *netdev = conn->netdev;
957 	struct netiucv_priv *privptr = netdev_priv(netdev);
958 
959 	IUCV_DBF_TEXT(trace, 3, __func__);
960 
961 	fsm_deltimer(&conn->timer);
962 	fsm_newstate(fi, CONN_STATE_STOPPED);
963 	netiucv_purge_skb_queue(&conn->collect_queue);
964 	if (conn->path) {
965 		IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
966 		iucv_path_sever(conn->path, conn->userdata);
967 		kfree(conn->path);
968 		conn->path = NULL;
969 	}
970 	netiucv_purge_skb_queue(&conn->commit_queue);
971 	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
972 }
973 
974 static void conn_action_inval(fsm_instance *fi, int event, void *arg)
975 {
976 	struct iucv_connection *conn = arg;
977 	struct net_device *netdev = conn->netdev;
978 
979 	IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
980 		netdev->name, conn->userid);
981 }
982 
983 static const fsm_node conn_fsm[] = {
984 	{ CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
985 	{ CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
986 
987 	{ CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
988 	{ CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
989 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
990 	{ CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
991 	{ CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
992 	{ CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
993 	{ CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
994 
995 	{ CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
996         { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
997 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
998 	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
999 	{ CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
1000 
1001 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
1002 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
1003 
1004 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
1005 	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
1006 	{ CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
1007 
1008 	{ CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
1009 	{ CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
1010 
1011 	{ CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
1012 	{ CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
1013 };
1014 
1015 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1016 
1017 
1018 /*
1019  * Actions for interface - statemachine.
1020  */
1021 
1022 /**
1023  * dev_action_start
1024  * @fi: An instance of an interface statemachine.
1025  * @event: The event, just happened.
1026  * @arg: Generic pointer, casted from struct net_device * upon call.
1027  *
1028  * Startup connection by sending CONN_EVENT_START to it.
1029  */
1030 static void dev_action_start(fsm_instance *fi, int event, void *arg)
1031 {
1032 	struct net_device   *dev = arg;
1033 	struct netiucv_priv *privptr = netdev_priv(dev);
1034 
1035 	IUCV_DBF_TEXT(trace, 3, __func__);
1036 
1037 	fsm_newstate(fi, DEV_STATE_STARTWAIT);
1038 	fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1039 }
1040 
1041 /**
1042  * Shutdown connection by sending CONN_EVENT_STOP to it.
1043  *
1044  * @param fi    An instance of an interface statemachine.
1045  * @param event The event, just happened.
1046  * @param arg   Generic pointer, casted from struct net_device * upon call.
1047  */
1048 static void
1049 dev_action_stop(fsm_instance *fi, int event, void *arg)
1050 {
1051 	struct net_device   *dev = arg;
1052 	struct netiucv_priv *privptr = netdev_priv(dev);
1053 	struct iucv_event   ev;
1054 
1055 	IUCV_DBF_TEXT(trace, 3, __func__);
1056 
1057 	ev.conn = privptr->conn;
1058 
1059 	fsm_newstate(fi, DEV_STATE_STOPWAIT);
1060 	fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1061 }
1062 
1063 /**
1064  * Called from connection statemachine
1065  * when a connection is up and running.
1066  *
1067  * @param fi    An instance of an interface statemachine.
1068  * @param event The event, just happened.
1069  * @param arg   Generic pointer, casted from struct net_device * upon call.
1070  */
1071 static void
1072 dev_action_connup(fsm_instance *fi, int event, void *arg)
1073 {
1074 	struct net_device   *dev = arg;
1075 	struct netiucv_priv *privptr = netdev_priv(dev);
1076 
1077 	IUCV_DBF_TEXT(trace, 3, __func__);
1078 
1079 	switch (fsm_getstate(fi)) {
1080 		case DEV_STATE_STARTWAIT:
1081 			fsm_newstate(fi, DEV_STATE_RUNNING);
1082 			dev_info(privptr->dev,
1083 				"The IUCV device has been connected"
1084 				" successfully to %s\n",
1085 				netiucv_printuser(privptr->conn));
1086 			IUCV_DBF_TEXT(setup, 3,
1087 				"connection is up and running\n");
1088 			break;
1089 		case DEV_STATE_STOPWAIT:
1090 			IUCV_DBF_TEXT(data, 2,
1091 				"dev_action_connup: in DEV_STATE_STOPWAIT\n");
1092 			break;
1093 	}
1094 }
1095 
1096 /**
1097  * Called from connection statemachine
1098  * when a connection has been shutdown.
1099  *
1100  * @param fi    An instance of an interface statemachine.
1101  * @param event The event, just happened.
1102  * @param arg   Generic pointer, casted from struct net_device * upon call.
1103  */
1104 static void
1105 dev_action_conndown(fsm_instance *fi, int event, void *arg)
1106 {
1107 	IUCV_DBF_TEXT(trace, 3, __func__);
1108 
1109 	switch (fsm_getstate(fi)) {
1110 		case DEV_STATE_RUNNING:
1111 			fsm_newstate(fi, DEV_STATE_STARTWAIT);
1112 			break;
1113 		case DEV_STATE_STOPWAIT:
1114 			fsm_newstate(fi, DEV_STATE_STOPPED);
1115 			IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1116 			break;
1117 	}
1118 }
1119 
1120 static const fsm_node dev_fsm[] = {
1121 	{ DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
1122 
1123 	{ DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
1124 	{ DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
1125 
1126 	{ DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
1127 	{ DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
1128 
1129 	{ DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
1130 	{ DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
1131 	{ DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
1132 };
1133 
1134 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1135 
1136 /**
1137  * Transmit a packet.
1138  * This is a helper function for netiucv_tx().
1139  *
1140  * @param conn Connection to be used for sending.
1141  * @param skb Pointer to struct sk_buff of packet to send.
1142  *            The linklevel header has already been set up
1143  *            by netiucv_tx().
1144  *
1145  * @return 0 on success, -ERRNO on failure. (Never fails.)
1146  */
1147 static int netiucv_transmit_skb(struct iucv_connection *conn,
1148 				struct sk_buff *skb)
1149 {
1150 	struct iucv_message msg;
1151 	unsigned long saveflags;
1152 	struct ll_header header;
1153 	int rc;
1154 
1155 	if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1156 		int l = skb->len + NETIUCV_HDRLEN;
1157 
1158 		spin_lock_irqsave(&conn->collect_lock, saveflags);
1159 		if (conn->collect_len + l >
1160 		    (conn->max_buffsize - NETIUCV_HDRLEN)) {
1161 			rc = -EBUSY;
1162 			IUCV_DBF_TEXT(data, 2,
1163 				      "EBUSY from netiucv_transmit_skb\n");
1164 		} else {
1165 			refcount_inc(&skb->users);
1166 			skb_queue_tail(&conn->collect_queue, skb);
1167 			conn->collect_len += l;
1168 			rc = 0;
1169 		}
1170 		spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1171 	} else {
1172 		struct sk_buff *nskb = skb;
1173 		/**
1174 		 * Copy the skb to a new allocated skb in lowmem only if the
1175 		 * data is located above 2G in memory or tailroom is < 2.
1176 		 */
1177 		unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1178 				    NETIUCV_HDRLEN)) >> 31;
1179 		int copied = 0;
1180 		if (hi || (skb_tailroom(skb) < 2)) {
1181 			nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1182 					 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1183 			if (!nskb) {
1184 				IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1185 				rc = -ENOMEM;
1186 				return rc;
1187 			} else {
1188 				skb_reserve(nskb, NETIUCV_HDRLEN);
1189 				skb_put_data(nskb, skb->data, skb->len);
1190 			}
1191 			copied = 1;
1192 		}
1193 		/**
1194 		 * skb now is below 2G and has enough room. Add headers.
1195 		 */
1196 		header.next = nskb->len + NETIUCV_HDRLEN;
1197 		memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1198 		header.next = 0;
1199 		skb_put_data(nskb, &header, NETIUCV_HDRLEN);
1200 
1201 		fsm_newstate(conn->fsm, CONN_STATE_TX);
1202 		conn->prof.send_stamp = jiffies;
1203 
1204 		msg.tag = 1;
1205 		msg.class = 0;
1206 		rc = iucv_message_send(conn->path, &msg, 0, 0,
1207 				       nskb->data, nskb->len);
1208 		conn->prof.doios_single++;
1209 		conn->prof.txlen += skb->len;
1210 		conn->prof.tx_pending++;
1211 		if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1212 			conn->prof.tx_max_pending = conn->prof.tx_pending;
1213 		if (rc) {
1214 			struct netiucv_priv *privptr;
1215 			fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1216 			conn->prof.tx_pending--;
1217 			privptr = netdev_priv(conn->netdev);
1218 			if (privptr)
1219 				privptr->stats.tx_errors++;
1220 			if (copied)
1221 				dev_kfree_skb(nskb);
1222 			else {
1223 				/**
1224 				 * Remove our headers. They get added
1225 				 * again on retransmit.
1226 				 */
1227 				skb_pull(skb, NETIUCV_HDRLEN);
1228 				skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1229 			}
1230 			IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1231 		} else {
1232 			if (copied)
1233 				dev_kfree_skb(skb);
1234 			refcount_inc(&nskb->users);
1235 			skb_queue_tail(&conn->commit_queue, nskb);
1236 		}
1237 	}
1238 
1239 	return rc;
1240 }
1241 
1242 /*
1243  * Interface API for upper network layers
1244  */
1245 
1246 /**
1247  * Open an interface.
1248  * Called from generic network layer when ifconfig up is run.
1249  *
1250  * @param dev Pointer to interface struct.
1251  *
1252  * @return 0 on success, -ERRNO on failure. (Never fails.)
1253  */
1254 static int netiucv_open(struct net_device *dev)
1255 {
1256 	struct netiucv_priv *priv = netdev_priv(dev);
1257 
1258 	fsm_event(priv->fsm, DEV_EVENT_START, dev);
1259 	return 0;
1260 }
1261 
1262 /**
1263  * Close an interface.
1264  * Called from generic network layer when ifconfig down is run.
1265  *
1266  * @param dev Pointer to interface struct.
1267  *
1268  * @return 0 on success, -ERRNO on failure. (Never fails.)
1269  */
1270 static int netiucv_close(struct net_device *dev)
1271 {
1272 	struct netiucv_priv *priv = netdev_priv(dev);
1273 
1274 	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1275 	return 0;
1276 }
1277 
1278 static int netiucv_pm_prepare(struct device *dev)
1279 {
1280 	IUCV_DBF_TEXT(trace, 3, __func__);
1281 	return 0;
1282 }
1283 
1284 static void netiucv_pm_complete(struct device *dev)
1285 {
1286 	IUCV_DBF_TEXT(trace, 3, __func__);
1287 	return;
1288 }
1289 
1290 /**
1291  * netiucv_pm_freeze() - Freeze PM callback
1292  * @dev:	netiucv device
1293  *
1294  * close open netiucv interfaces
1295  */
1296 static int netiucv_pm_freeze(struct device *dev)
1297 {
1298 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1299 	struct net_device *ndev = NULL;
1300 	int rc = 0;
1301 
1302 	IUCV_DBF_TEXT(trace, 3, __func__);
1303 	if (priv && priv->conn)
1304 		ndev = priv->conn->netdev;
1305 	if (!ndev)
1306 		goto out;
1307 	netif_device_detach(ndev);
1308 	priv->pm_state = fsm_getstate(priv->fsm);
1309 	rc = netiucv_close(ndev);
1310 out:
1311 	return rc;
1312 }
1313 
1314 /**
1315  * netiucv_pm_restore_thaw() - Thaw and restore PM callback
1316  * @dev:	netiucv device
1317  *
1318  * re-open netiucv interfaces closed during freeze
1319  */
1320 static int netiucv_pm_restore_thaw(struct device *dev)
1321 {
1322 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1323 	struct net_device *ndev = NULL;
1324 	int rc = 0;
1325 
1326 	IUCV_DBF_TEXT(trace, 3, __func__);
1327 	if (priv && priv->conn)
1328 		ndev = priv->conn->netdev;
1329 	if (!ndev)
1330 		goto out;
1331 	switch (priv->pm_state) {
1332 	case DEV_STATE_RUNNING:
1333 	case DEV_STATE_STARTWAIT:
1334 		rc = netiucv_open(ndev);
1335 		break;
1336 	default:
1337 		break;
1338 	}
1339 	netif_device_attach(ndev);
1340 out:
1341 	return rc;
1342 }
1343 
1344 /**
1345  * Start transmission of a packet.
1346  * Called from generic network device layer.
1347  *
1348  * @param skb Pointer to buffer containing the packet.
1349  * @param dev Pointer to interface struct.
1350  *
1351  * @return 0 if packet consumed, !0 if packet rejected.
1352  *         Note: If we return !0, then the packet is free'd by
1353  *               the generic network layer.
1354  */
1355 static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1356 {
1357 	struct netiucv_priv *privptr = netdev_priv(dev);
1358 	int rc;
1359 
1360 	IUCV_DBF_TEXT(trace, 4, __func__);
1361 	/**
1362 	 * Some sanity checks ...
1363 	 */
1364 	if (skb == NULL) {
1365 		IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1366 		privptr->stats.tx_dropped++;
1367 		return NETDEV_TX_OK;
1368 	}
1369 	if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1370 		IUCV_DBF_TEXT(data, 2,
1371 			"netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1372 		dev_kfree_skb(skb);
1373 		privptr->stats.tx_dropped++;
1374 		return NETDEV_TX_OK;
1375 	}
1376 
1377 	/**
1378 	 * If connection is not running, try to restart it
1379 	 * and throw away packet.
1380 	 */
1381 	if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1382 		dev_kfree_skb(skb);
1383 		privptr->stats.tx_dropped++;
1384 		privptr->stats.tx_errors++;
1385 		privptr->stats.tx_carrier_errors++;
1386 		return NETDEV_TX_OK;
1387 	}
1388 
1389 	if (netiucv_test_and_set_busy(dev)) {
1390 		IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1391 		return NETDEV_TX_BUSY;
1392 	}
1393 	netif_trans_update(dev);
1394 	rc = netiucv_transmit_skb(privptr->conn, skb);
1395 	netiucv_clear_busy(dev);
1396 	return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1397 }
1398 
1399 /**
1400  * netiucv_stats
1401  * @dev: Pointer to interface struct.
1402  *
1403  * Returns interface statistics of a device.
1404  *
1405  * Returns pointer to stats struct of this interface.
1406  */
1407 static struct net_device_stats *netiucv_stats (struct net_device * dev)
1408 {
1409 	struct netiucv_priv *priv = netdev_priv(dev);
1410 
1411 	IUCV_DBF_TEXT(trace, 5, __func__);
1412 	return &priv->stats;
1413 }
1414 
1415 /*
1416  * attributes in sysfs
1417  */
1418 
1419 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1420 			 char *buf)
1421 {
1422 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1423 
1424 	IUCV_DBF_TEXT(trace, 5, __func__);
1425 	return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1426 }
1427 
1428 static int netiucv_check_user(const char *buf, size_t count, char *username,
1429 			      char *userdata)
1430 {
1431 	const char *p;
1432 	int i;
1433 
1434 	p = strchr(buf, '.');
1435 	if ((p && ((count > 26) ||
1436 		   ((p - buf) > 8) ||
1437 		   (buf + count - p > 18))) ||
1438 	    (!p && (count > 9))) {
1439 		IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1440 		return -EINVAL;
1441 	}
1442 
1443 	for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
1444 		if (isalnum(*p) || *p == '$') {
1445 			username[i] = toupper(*p);
1446 			continue;
1447 		}
1448 		if (*p == '\n')
1449 			/* trailing lf, grr */
1450 			break;
1451 		IUCV_DBF_TEXT_(setup, 2,
1452 			       "conn_write: invalid character %02x\n", *p);
1453 		return -EINVAL;
1454 	}
1455 	while (i < 8)
1456 		username[i++] = ' ';
1457 	username[8] = '\0';
1458 
1459 	if (*p == '.') {
1460 		p++;
1461 		for (i = 0; i < 16 && *p; i++, p++) {
1462 			if (*p == '\n')
1463 				break;
1464 			userdata[i] = toupper(*p);
1465 		}
1466 		while (i > 0 && i < 16)
1467 			userdata[i++] = ' ';
1468 	} else
1469 		memcpy(userdata, iucvMagic_ascii, 16);
1470 	userdata[16] = '\0';
1471 	ASCEBC(userdata, 16);
1472 
1473 	return 0;
1474 }
1475 
1476 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1477 			  const char *buf, size_t count)
1478 {
1479 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1480 	struct net_device *ndev = priv->conn->netdev;
1481 	char	username[9];
1482 	char	userdata[17];
1483 	int	rc;
1484 	struct iucv_connection *cp;
1485 
1486 	IUCV_DBF_TEXT(trace, 3, __func__);
1487 	rc = netiucv_check_user(buf, count, username, userdata);
1488 	if (rc)
1489 		return rc;
1490 
1491 	if (memcmp(username, priv->conn->userid, 9) &&
1492 	    (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1493 		/* username changed while the interface is active. */
1494 		IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1495 		return -EPERM;
1496 	}
1497 	read_lock_bh(&iucv_connection_rwlock);
1498 	list_for_each_entry(cp, &iucv_connection_list, list) {
1499 		if (!strncmp(username, cp->userid, 9) &&
1500 		   !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
1501 			read_unlock_bh(&iucv_connection_rwlock);
1502 			IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
1503 				"already exists\n", netiucv_printuser(cp));
1504 			return -EEXIST;
1505 		}
1506 	}
1507 	read_unlock_bh(&iucv_connection_rwlock);
1508 	memcpy(priv->conn->userid, username, 9);
1509 	memcpy(priv->conn->userdata, userdata, 17);
1510 	return count;
1511 }
1512 
1513 static DEVICE_ATTR(user, 0644, user_show, user_write);
1514 
1515 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1516 			    char *buf)
1517 {
1518 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1519 
1520 	IUCV_DBF_TEXT(trace, 5, __func__);
1521 	return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1522 }
1523 
1524 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1525 			     const char *buf, size_t count)
1526 {
1527 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1528 	struct net_device *ndev = priv->conn->netdev;
1529 	unsigned int bs1;
1530 	int rc;
1531 
1532 	IUCV_DBF_TEXT(trace, 3, __func__);
1533 	if (count >= 39)
1534 		return -EINVAL;
1535 
1536 	rc = kstrtouint(buf, 0, &bs1);
1537 
1538 	if (rc == -EINVAL) {
1539 		IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n",
1540 			buf);
1541 		return -EINVAL;
1542 	}
1543 	if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) {
1544 		IUCV_DBF_TEXT_(setup, 2,
1545 			"buffer_write: buffer size %d too large\n",
1546 			bs1);
1547 		return -EINVAL;
1548 	}
1549 	if ((ndev->flags & IFF_RUNNING) &&
1550 	    (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1551 		IUCV_DBF_TEXT_(setup, 2,
1552 			"buffer_write: buffer size %d too small\n",
1553 			bs1);
1554 		return -EINVAL;
1555 	}
1556 	if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1557 		IUCV_DBF_TEXT_(setup, 2,
1558 			"buffer_write: buffer size %d too small\n",
1559 			bs1);
1560 		return -EINVAL;
1561 	}
1562 
1563 	priv->conn->max_buffsize = bs1;
1564 	if (!(ndev->flags & IFF_RUNNING))
1565 		ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1566 
1567 	return count;
1568 
1569 }
1570 
1571 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1572 
1573 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1574 			     char *buf)
1575 {
1576 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1577 
1578 	IUCV_DBF_TEXT(trace, 5, __func__);
1579 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1580 }
1581 
1582 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1583 
1584 static ssize_t conn_fsm_show (struct device *dev,
1585 			      struct device_attribute *attr, char *buf)
1586 {
1587 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1588 
1589 	IUCV_DBF_TEXT(trace, 5, __func__);
1590 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1591 }
1592 
1593 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1594 
1595 static ssize_t maxmulti_show (struct device *dev,
1596 			      struct device_attribute *attr, char *buf)
1597 {
1598 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1599 
1600 	IUCV_DBF_TEXT(trace, 5, __func__);
1601 	return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1602 }
1603 
1604 static ssize_t maxmulti_write (struct device *dev,
1605 			       struct device_attribute *attr,
1606 			       const char *buf, size_t count)
1607 {
1608 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1609 
1610 	IUCV_DBF_TEXT(trace, 4, __func__);
1611 	priv->conn->prof.maxmulti = 0;
1612 	return count;
1613 }
1614 
1615 static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1616 
1617 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1618 			   char *buf)
1619 {
1620 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1621 
1622 	IUCV_DBF_TEXT(trace, 5, __func__);
1623 	return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1624 }
1625 
1626 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1627 			    const char *buf, size_t count)
1628 {
1629 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1630 
1631 	IUCV_DBF_TEXT(trace, 4, __func__);
1632 	priv->conn->prof.maxcqueue = 0;
1633 	return count;
1634 }
1635 
1636 static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1637 
1638 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1639 			   char *buf)
1640 {
1641 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1642 
1643 	IUCV_DBF_TEXT(trace, 5, __func__);
1644 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1645 }
1646 
1647 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1648 			    const char *buf, size_t count)
1649 {
1650 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1651 
1652 	IUCV_DBF_TEXT(trace, 4, __func__);
1653 	priv->conn->prof.doios_single = 0;
1654 	return count;
1655 }
1656 
1657 static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1658 
1659 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1660 			   char *buf)
1661 {
1662 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1663 
1664 	IUCV_DBF_TEXT(trace, 5, __func__);
1665 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1666 }
1667 
1668 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1669 			    const char *buf, size_t count)
1670 {
1671 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1672 
1673 	IUCV_DBF_TEXT(trace, 5, __func__);
1674 	priv->conn->prof.doios_multi = 0;
1675 	return count;
1676 }
1677 
1678 static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1679 
1680 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1681 			   char *buf)
1682 {
1683 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1684 
1685 	IUCV_DBF_TEXT(trace, 5, __func__);
1686 	return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1687 }
1688 
1689 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1690 			    const char *buf, size_t count)
1691 {
1692 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1693 
1694 	IUCV_DBF_TEXT(trace, 4, __func__);
1695 	priv->conn->prof.txlen = 0;
1696 	return count;
1697 }
1698 
1699 static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1700 
1701 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1702 			    char *buf)
1703 {
1704 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1705 
1706 	IUCV_DBF_TEXT(trace, 5, __func__);
1707 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1708 }
1709 
1710 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1711 			     const char *buf, size_t count)
1712 {
1713 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1714 
1715 	IUCV_DBF_TEXT(trace, 4, __func__);
1716 	priv->conn->prof.tx_time = 0;
1717 	return count;
1718 }
1719 
1720 static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1721 
1722 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1723 			    char *buf)
1724 {
1725 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1726 
1727 	IUCV_DBF_TEXT(trace, 5, __func__);
1728 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1729 }
1730 
1731 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1732 			     const char *buf, size_t count)
1733 {
1734 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1735 
1736 	IUCV_DBF_TEXT(trace, 4, __func__);
1737 	priv->conn->prof.tx_pending = 0;
1738 	return count;
1739 }
1740 
1741 static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1742 
1743 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1744 			    char *buf)
1745 {
1746 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1747 
1748 	IUCV_DBF_TEXT(trace, 5, __func__);
1749 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1750 }
1751 
1752 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1753 			     const char *buf, size_t count)
1754 {
1755 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1756 
1757 	IUCV_DBF_TEXT(trace, 4, __func__);
1758 	priv->conn->prof.tx_max_pending = 0;
1759 	return count;
1760 }
1761 
1762 static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1763 
1764 static struct attribute *netiucv_attrs[] = {
1765 	&dev_attr_buffer.attr,
1766 	&dev_attr_user.attr,
1767 	NULL,
1768 };
1769 
1770 static struct attribute_group netiucv_attr_group = {
1771 	.attrs = netiucv_attrs,
1772 };
1773 
1774 static struct attribute *netiucv_stat_attrs[] = {
1775 	&dev_attr_device_fsm_state.attr,
1776 	&dev_attr_connection_fsm_state.attr,
1777 	&dev_attr_max_tx_buffer_used.attr,
1778 	&dev_attr_max_chained_skbs.attr,
1779 	&dev_attr_tx_single_write_ops.attr,
1780 	&dev_attr_tx_multi_write_ops.attr,
1781 	&dev_attr_netto_bytes.attr,
1782 	&dev_attr_max_tx_io_time.attr,
1783 	&dev_attr_tx_pending.attr,
1784 	&dev_attr_tx_max_pending.attr,
1785 	NULL,
1786 };
1787 
1788 static struct attribute_group netiucv_stat_attr_group = {
1789 	.name  = "stats",
1790 	.attrs = netiucv_stat_attrs,
1791 };
1792 
1793 static const struct attribute_group *netiucv_attr_groups[] = {
1794 	&netiucv_stat_attr_group,
1795 	&netiucv_attr_group,
1796 	NULL,
1797 };
1798 
1799 static int netiucv_register_device(struct net_device *ndev)
1800 {
1801 	struct netiucv_priv *priv = netdev_priv(ndev);
1802 	struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1803 	int ret;
1804 
1805 	IUCV_DBF_TEXT(trace, 3, __func__);
1806 
1807 	if (dev) {
1808 		dev_set_name(dev, "net%s", ndev->name);
1809 		dev->bus = &iucv_bus;
1810 		dev->parent = iucv_root;
1811 		dev->groups = netiucv_attr_groups;
1812 		/*
1813 		 * The release function could be called after the
1814 		 * module has been unloaded. It's _only_ task is to
1815 		 * free the struct. Therefore, we specify kfree()
1816 		 * directly here. (Probably a little bit obfuscating
1817 		 * but legitime ...).
1818 		 */
1819 		dev->release = (void (*)(struct device *))kfree;
1820 		dev->driver = &netiucv_driver;
1821 	} else
1822 		return -ENOMEM;
1823 
1824 	ret = device_register(dev);
1825 	if (ret) {
1826 		put_device(dev);
1827 		return ret;
1828 	}
1829 	priv->dev = dev;
1830 	dev_set_drvdata(dev, priv);
1831 	return 0;
1832 }
1833 
1834 static void netiucv_unregister_device(struct device *dev)
1835 {
1836 	IUCV_DBF_TEXT(trace, 3, __func__);
1837 	device_unregister(dev);
1838 }
1839 
1840 /**
1841  * Allocate and initialize a new connection structure.
1842  * Add it to the list of netiucv connections;
1843  */
1844 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1845 						      char *username,
1846 						      char *userdata)
1847 {
1848 	struct iucv_connection *conn;
1849 
1850 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1851 	if (!conn)
1852 		goto out;
1853 	skb_queue_head_init(&conn->collect_queue);
1854 	skb_queue_head_init(&conn->commit_queue);
1855 	spin_lock_init(&conn->collect_lock);
1856 	conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1857 	conn->netdev = dev;
1858 
1859 	conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1860 	if (!conn->rx_buff)
1861 		goto out_conn;
1862 	conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1863 	if (!conn->tx_buff)
1864 		goto out_rx;
1865 	conn->fsm = init_fsm("netiucvconn", conn_state_names,
1866 			     conn_event_names, NR_CONN_STATES,
1867 			     NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1868 			     GFP_KERNEL);
1869 	if (!conn->fsm)
1870 		goto out_tx;
1871 
1872 	fsm_settimer(conn->fsm, &conn->timer);
1873 	fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1874 
1875 	if (userdata)
1876 		memcpy(conn->userdata, userdata, 17);
1877 	if (username) {
1878 		memcpy(conn->userid, username, 9);
1879 		fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1880 	}
1881 
1882 	write_lock_bh(&iucv_connection_rwlock);
1883 	list_add_tail(&conn->list, &iucv_connection_list);
1884 	write_unlock_bh(&iucv_connection_rwlock);
1885 	return conn;
1886 
1887 out_tx:
1888 	kfree_skb(conn->tx_buff);
1889 out_rx:
1890 	kfree_skb(conn->rx_buff);
1891 out_conn:
1892 	kfree(conn);
1893 out:
1894 	return NULL;
1895 }
1896 
1897 /**
1898  * Release a connection structure and remove it from the
1899  * list of netiucv connections.
1900  */
1901 static void netiucv_remove_connection(struct iucv_connection *conn)
1902 {
1903 
1904 	IUCV_DBF_TEXT(trace, 3, __func__);
1905 	write_lock_bh(&iucv_connection_rwlock);
1906 	list_del_init(&conn->list);
1907 	write_unlock_bh(&iucv_connection_rwlock);
1908 	fsm_deltimer(&conn->timer);
1909 	netiucv_purge_skb_queue(&conn->collect_queue);
1910 	if (conn->path) {
1911 		iucv_path_sever(conn->path, conn->userdata);
1912 		kfree(conn->path);
1913 		conn->path = NULL;
1914 	}
1915 	netiucv_purge_skb_queue(&conn->commit_queue);
1916 	kfree_fsm(conn->fsm);
1917 	kfree_skb(conn->rx_buff);
1918 	kfree_skb(conn->tx_buff);
1919 }
1920 
1921 /**
1922  * Release everything of a net device.
1923  */
1924 static void netiucv_free_netdevice(struct net_device *dev)
1925 {
1926 	struct netiucv_priv *privptr = netdev_priv(dev);
1927 
1928 	IUCV_DBF_TEXT(trace, 3, __func__);
1929 
1930 	if (!dev)
1931 		return;
1932 
1933 	if (privptr) {
1934 		if (privptr->conn)
1935 			netiucv_remove_connection(privptr->conn);
1936 		if (privptr->fsm)
1937 			kfree_fsm(privptr->fsm);
1938 		privptr->conn = NULL; privptr->fsm = NULL;
1939 		/* privptr gets freed by free_netdev() */
1940 	}
1941 }
1942 
1943 /**
1944  * Initialize a net device. (Called from kernel in alloc_netdev())
1945  */
1946 static const struct net_device_ops netiucv_netdev_ops = {
1947 	.ndo_open		= netiucv_open,
1948 	.ndo_stop		= netiucv_close,
1949 	.ndo_get_stats		= netiucv_stats,
1950 	.ndo_start_xmit		= netiucv_tx,
1951 };
1952 
1953 static void netiucv_setup_netdevice(struct net_device *dev)
1954 {
1955 	dev->mtu	         = NETIUCV_MTU_DEFAULT;
1956 	dev->min_mtu		 = 576;
1957 	dev->max_mtu		 = NETIUCV_MTU_MAX;
1958 	dev->needs_free_netdev   = true;
1959 	dev->priv_destructor     = netiucv_free_netdevice;
1960 	dev->hard_header_len     = NETIUCV_HDRLEN;
1961 	dev->addr_len            = 0;
1962 	dev->type                = ARPHRD_SLIP;
1963 	dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
1964 	dev->flags	         = IFF_POINTOPOINT | IFF_NOARP;
1965 	dev->netdev_ops		 = &netiucv_netdev_ops;
1966 }
1967 
1968 /**
1969  * Allocate and initialize everything of a net device.
1970  */
1971 static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
1972 {
1973 	struct netiucv_priv *privptr;
1974 	struct net_device *dev;
1975 
1976 	dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1977 			   NET_NAME_UNKNOWN, netiucv_setup_netdevice);
1978 	if (!dev)
1979 		return NULL;
1980 	rtnl_lock();
1981 	if (dev_alloc_name(dev, dev->name) < 0)
1982 		goto out_netdev;
1983 
1984 	privptr = netdev_priv(dev);
1985 	privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1986 				dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1987 				dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1988 	if (!privptr->fsm)
1989 		goto out_netdev;
1990 
1991 	privptr->conn = netiucv_new_connection(dev, username, userdata);
1992 	if (!privptr->conn) {
1993 		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1994 		goto out_fsm;
1995 	}
1996 	fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1997 	return dev;
1998 
1999 out_fsm:
2000 	kfree_fsm(privptr->fsm);
2001 out_netdev:
2002 	rtnl_unlock();
2003 	free_netdev(dev);
2004 	return NULL;
2005 }
2006 
2007 static ssize_t connection_store(struct device_driver *drv, const char *buf,
2008 				size_t count)
2009 {
2010 	char username[9];
2011 	char userdata[17];
2012 	int rc;
2013 	struct net_device *dev;
2014 	struct netiucv_priv *priv;
2015 	struct iucv_connection *cp;
2016 
2017 	IUCV_DBF_TEXT(trace, 3, __func__);
2018 	rc = netiucv_check_user(buf, count, username, userdata);
2019 	if (rc)
2020 		return rc;
2021 
2022 	read_lock_bh(&iucv_connection_rwlock);
2023 	list_for_each_entry(cp, &iucv_connection_list, list) {
2024 		if (!strncmp(username, cp->userid, 9) &&
2025 		    !strncmp(userdata, cp->userdata, 17)) {
2026 			read_unlock_bh(&iucv_connection_rwlock);
2027 			IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
2028 				"already exists\n", netiucv_printuser(cp));
2029 			return -EEXIST;
2030 		}
2031 	}
2032 	read_unlock_bh(&iucv_connection_rwlock);
2033 
2034 	dev = netiucv_init_netdevice(username, userdata);
2035 	if (!dev) {
2036 		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2037 		return -ENODEV;
2038 	}
2039 
2040 	rc = netiucv_register_device(dev);
2041 	if (rc) {
2042 		rtnl_unlock();
2043 		IUCV_DBF_TEXT_(setup, 2,
2044 			"ret %d from netiucv_register_device\n", rc);
2045 		goto out_free_ndev;
2046 	}
2047 
2048 	/* sysfs magic */
2049 	priv = netdev_priv(dev);
2050 	SET_NETDEV_DEV(dev, priv->dev);
2051 
2052 	rc = register_netdevice(dev);
2053 	rtnl_unlock();
2054 	if (rc)
2055 		goto out_unreg;
2056 
2057 	dev_info(priv->dev, "The IUCV interface to %s has been established "
2058 			    "successfully\n",
2059 		netiucv_printuser(priv->conn));
2060 
2061 	return count;
2062 
2063 out_unreg:
2064 	netiucv_unregister_device(priv->dev);
2065 out_free_ndev:
2066 	netiucv_free_netdevice(dev);
2067 	return rc;
2068 }
2069 static DRIVER_ATTR_WO(connection);
2070 
2071 static ssize_t remove_store(struct device_driver *drv, const char *buf,
2072 			    size_t count)
2073 {
2074 	struct iucv_connection *cp;
2075         struct net_device *ndev;
2076         struct netiucv_priv *priv;
2077         struct device *dev;
2078         char name[IFNAMSIZ];
2079 	const char *p;
2080         int i;
2081 
2082 	IUCV_DBF_TEXT(trace, 3, __func__);
2083 
2084         if (count >= IFNAMSIZ)
2085                 count = IFNAMSIZ - 1;
2086 
2087 	for (i = 0, p = buf; i < count && *p; i++, p++) {
2088 		if (*p == '\n' || *p == ' ')
2089                         /* trailing lf, grr */
2090                         break;
2091 		name[i] = *p;
2092         }
2093         name[i] = '\0';
2094 
2095 	read_lock_bh(&iucv_connection_rwlock);
2096 	list_for_each_entry(cp, &iucv_connection_list, list) {
2097 		ndev = cp->netdev;
2098 		priv = netdev_priv(ndev);
2099                 dev = priv->dev;
2100 		if (strncmp(name, ndev->name, count))
2101 			continue;
2102 		read_unlock_bh(&iucv_connection_rwlock);
2103                 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2104 			dev_warn(dev, "The IUCV device is connected"
2105 				" to %s and cannot be removed\n",
2106 				priv->conn->userid);
2107 			IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2108 			return -EPERM;
2109                 }
2110                 unregister_netdev(ndev);
2111                 netiucv_unregister_device(dev);
2112                 return count;
2113         }
2114 	read_unlock_bh(&iucv_connection_rwlock);
2115 	IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2116         return -EINVAL;
2117 }
2118 static DRIVER_ATTR_WO(remove);
2119 
2120 static struct attribute * netiucv_drv_attrs[] = {
2121 	&driver_attr_connection.attr,
2122 	&driver_attr_remove.attr,
2123 	NULL,
2124 };
2125 
2126 static struct attribute_group netiucv_drv_attr_group = {
2127 	.attrs = netiucv_drv_attrs,
2128 };
2129 
2130 static const struct attribute_group *netiucv_drv_attr_groups[] = {
2131 	&netiucv_drv_attr_group,
2132 	NULL,
2133 };
2134 
2135 static void netiucv_banner(void)
2136 {
2137 	pr_info("driver initialized\n");
2138 }
2139 
2140 static void __exit netiucv_exit(void)
2141 {
2142 	struct iucv_connection *cp;
2143 	struct net_device *ndev;
2144 	struct netiucv_priv *priv;
2145 	struct device *dev;
2146 
2147 	IUCV_DBF_TEXT(trace, 3, __func__);
2148 	while (!list_empty(&iucv_connection_list)) {
2149 		cp = list_entry(iucv_connection_list.next,
2150 				struct iucv_connection, list);
2151 		ndev = cp->netdev;
2152 		priv = netdev_priv(ndev);
2153 		dev = priv->dev;
2154 
2155 		unregister_netdev(ndev);
2156 		netiucv_unregister_device(dev);
2157 	}
2158 
2159 	device_unregister(netiucv_dev);
2160 	driver_unregister(&netiucv_driver);
2161 	iucv_unregister(&netiucv_handler, 1);
2162 	iucv_unregister_dbf_views();
2163 
2164 	pr_info("driver unloaded\n");
2165 	return;
2166 }
2167 
2168 static int __init netiucv_init(void)
2169 {
2170 	int rc;
2171 
2172 	rc = iucv_register_dbf_views();
2173 	if (rc)
2174 		goto out;
2175 	rc = iucv_register(&netiucv_handler, 1);
2176 	if (rc)
2177 		goto out_dbf;
2178 	IUCV_DBF_TEXT(trace, 3, __func__);
2179 	netiucv_driver.groups = netiucv_drv_attr_groups;
2180 	rc = driver_register(&netiucv_driver);
2181 	if (rc) {
2182 		IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2183 		goto out_iucv;
2184 	}
2185 	/* establish dummy device */
2186 	netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2187 	if (!netiucv_dev) {
2188 		rc = -ENOMEM;
2189 		goto out_driver;
2190 	}
2191 	dev_set_name(netiucv_dev, "netiucv");
2192 	netiucv_dev->bus = &iucv_bus;
2193 	netiucv_dev->parent = iucv_root;
2194 	netiucv_dev->release = (void (*)(struct device *))kfree;
2195 	netiucv_dev->driver = &netiucv_driver;
2196 	rc = device_register(netiucv_dev);
2197 	if (rc) {
2198 		put_device(netiucv_dev);
2199 		goto out_driver;
2200 	}
2201 	netiucv_banner();
2202 	return rc;
2203 
2204 out_driver:
2205 	driver_unregister(&netiucv_driver);
2206 out_iucv:
2207 	iucv_unregister(&netiucv_handler, 1);
2208 out_dbf:
2209 	iucv_unregister_dbf_views();
2210 out:
2211 	return rc;
2212 }
2213 
2214 module_init(netiucv_init);
2215 module_exit(netiucv_exit);
2216 MODULE_LICENSE("GPL");
2217