xref: /linux/drivers/s390/net/netiucv.c (revision f24e9f586b377749dff37554696cf3a105540c94)
1 /*
2  * IUCV network driver
3  *
4  * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5  * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6  *
7  * Sysfs integration and all bugs therein by Cornelia Huck
8  * (cornelia.huck@de.ibm.com)
9  *
10  * Documentation used:
11  *  the source of the original IUCV driver by:
12  *    Stefan Hegewald <hegewald@de.ibm.com>
13  *    Hartmut Penner <hpenner@de.ibm.com>
14  *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
15  *    Martin Schwidefsky (schwidefsky@de.ibm.com)
16  *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
17  *
18  * This program is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License as published by
20  * the Free Software Foundation; either version 2, or (at your option)
21  * any later version.
22  *
23  * This program is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26  * GNU General Public License for more details.
27  *
28  * You should have received a copy of the GNU General Public License
29  * along with this program; if not, write to the Free Software
30  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31  *
32  */
33 
34 #undef DEBUG
35 
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/kernel.h>
39 #include <linux/slab.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/timer.h>
44 #include <linux/sched.h>
45 #include <linux/bitops.h>
46 
47 #include <linux/signal.h>
48 #include <linux/string.h>
49 #include <linux/device.h>
50 
51 #include <linux/ip.h>
52 #include <linux/if_arp.h>
53 #include <linux/tcp.h>
54 #include <linux/skbuff.h>
55 #include <linux/ctype.h>
56 #include <net/dst.h>
57 
58 #include <asm/io.h>
59 #include <asm/uaccess.h>
60 
61 #include "iucv.h"
62 #include "fsm.h"
63 
64 MODULE_AUTHOR
65     ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
66 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
67 
68 
69 #define PRINTK_HEADER " iucv: "       /* for debugging */
70 
71 static struct device_driver netiucv_driver = {
72 	.name = "netiucv",
73 	.bus  = &iucv_bus,
74 };
75 
76 /**
77  * Per connection profiling data
78  */
79 struct connection_profile {
80 	unsigned long maxmulti;
81 	unsigned long maxcqueue;
82 	unsigned long doios_single;
83 	unsigned long doios_multi;
84 	unsigned long txlen;
85 	unsigned long tx_time;
86 	struct timespec send_stamp;
87 	unsigned long tx_pending;
88 	unsigned long tx_max_pending;
89 };
90 
91 /**
92  * Representation of one iucv connection
93  */
94 struct iucv_connection {
95 	struct iucv_connection    *next;
96 	iucv_handle_t             handle;
97 	__u16                     pathid;
98 	struct sk_buff            *rx_buff;
99 	struct sk_buff            *tx_buff;
100 	struct sk_buff_head       collect_queue;
101 	struct sk_buff_head	  commit_queue;
102 	spinlock_t                collect_lock;
103 	int                       collect_len;
104 	int                       max_buffsize;
105 	fsm_timer                 timer;
106 	fsm_instance              *fsm;
107 	struct net_device         *netdev;
108 	struct connection_profile prof;
109 	char                      userid[9];
110 };
111 
112 /**
113  * Linked list of all connection structs.
114  */
115 struct iucv_connection_struct {
116 	struct iucv_connection *iucv_connections;
117 	rwlock_t iucv_rwlock;
118 };
119 
120 static struct iucv_connection_struct iucv_conns;
121 
122 /**
123  * Representation of event-data for the
124  * connection state machine.
125  */
126 struct iucv_event {
127 	struct iucv_connection *conn;
128 	void                   *data;
129 };
130 
131 /**
132  * Private part of the network device structure
133  */
134 struct netiucv_priv {
135 	struct net_device_stats stats;
136 	unsigned long           tbusy;
137 	fsm_instance            *fsm;
138         struct iucv_connection  *conn;
139 	struct device           *dev;
140 };
141 
142 /**
143  * Link level header for a packet.
144  */
145 typedef struct ll_header_t {
146 	__u16 next;
147 } ll_header;
148 
149 #define NETIUCV_HDRLEN           (sizeof(ll_header))
150 #define NETIUCV_BUFSIZE_MAX      32768
151 #define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
152 #define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
153 #define NETIUCV_MTU_DEFAULT      9216
154 #define NETIUCV_QUEUELEN_DEFAULT 50
155 #define NETIUCV_TIMEOUT_5SEC     5000
156 
157 /**
158  * Compatibility macros for busy handling
159  * of network devices.
160  */
161 static __inline__ void netiucv_clear_busy(struct net_device *dev)
162 {
163 	clear_bit(0, &(((struct netiucv_priv *)dev->priv)->tbusy));
164 	netif_wake_queue(dev);
165 }
166 
167 static __inline__ int netiucv_test_and_set_busy(struct net_device *dev)
168 {
169 	netif_stop_queue(dev);
170 	return test_and_set_bit(0, &((struct netiucv_priv *)dev->priv)->tbusy);
171 }
172 
173 static __u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
174 static __u8 iucvMagic[16] = {
175 	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
176 	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
177 };
178 
179 /**
180  * This mask means the 16-byte IUCV "magic" and the origin userid must
181  * match exactly as specified in order to give connection_pending()
182  * control.
183  */
184 static __u8 netiucv_mask[] = {
185 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
186 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
187 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
188 };
189 
190 /**
191  * Convert an iucv userId to its printable
192  * form (strip whitespace at end).
193  *
194  * @param An iucv userId
195  *
196  * @returns The printable string (static data!!)
197  */
198 static __inline__ char *
199 netiucv_printname(char *name)
200 {
201 	static char tmp[9];
202 	char *p = tmp;
203 	memcpy(tmp, name, 8);
204 	tmp[8] = '\0';
205 	while (*p && (!isspace(*p)))
206 		p++;
207 	*p = '\0';
208 	return tmp;
209 }
210 
211 /**
212  * States of the interface statemachine.
213  */
214 enum dev_states {
215 	DEV_STATE_STOPPED,
216 	DEV_STATE_STARTWAIT,
217 	DEV_STATE_STOPWAIT,
218 	DEV_STATE_RUNNING,
219 	/**
220 	 * MUST be always the last element!!
221 	 */
222 	NR_DEV_STATES
223 };
224 
225 static const char *dev_state_names[] = {
226 	"Stopped",
227 	"StartWait",
228 	"StopWait",
229 	"Running",
230 };
231 
232 /**
233  * Events of the interface statemachine.
234  */
235 enum dev_events {
236 	DEV_EVENT_START,
237 	DEV_EVENT_STOP,
238 	DEV_EVENT_CONUP,
239 	DEV_EVENT_CONDOWN,
240 	/**
241 	 * MUST be always the last element!!
242 	 */
243 	NR_DEV_EVENTS
244 };
245 
246 static const char *dev_event_names[] = {
247 	"Start",
248 	"Stop",
249 	"Connection up",
250 	"Connection down",
251 };
252 
253 /**
254  * Events of the connection statemachine
255  */
256 enum conn_events {
257 	/**
258 	 * Events, representing callbacks from
259 	 * lowlevel iucv layer)
260 	 */
261 	CONN_EVENT_CONN_REQ,
262 	CONN_EVENT_CONN_ACK,
263 	CONN_EVENT_CONN_REJ,
264 	CONN_EVENT_CONN_SUS,
265 	CONN_EVENT_CONN_RES,
266 	CONN_EVENT_RX,
267 	CONN_EVENT_TXDONE,
268 
269 	/**
270 	 * Events, representing errors return codes from
271 	 * calls to lowlevel iucv layer
272 	 */
273 
274 	/**
275 	 * Event, representing timer expiry.
276 	 */
277 	CONN_EVENT_TIMER,
278 
279 	/**
280 	 * Events, representing commands from upper levels.
281 	 */
282 	CONN_EVENT_START,
283 	CONN_EVENT_STOP,
284 
285 	/**
286 	 * MUST be always the last element!!
287 	 */
288 	NR_CONN_EVENTS,
289 };
290 
291 static const char *conn_event_names[] = {
292 	"Remote connection request",
293 	"Remote connection acknowledge",
294 	"Remote connection reject",
295 	"Connection suspended",
296 	"Connection resumed",
297 	"Data received",
298 	"Data sent",
299 
300 	"Timer",
301 
302 	"Start",
303 	"Stop",
304 };
305 
306 /**
307  * States of the connection statemachine.
308  */
309 enum conn_states {
310 	/**
311 	 * Connection not assigned to any device,
312 	 * initial state, invalid
313 	 */
314 	CONN_STATE_INVALID,
315 
316 	/**
317 	 * Userid assigned but not operating
318 	 */
319 	CONN_STATE_STOPPED,
320 
321 	/**
322 	 * Connection registered,
323 	 * no connection request sent yet,
324 	 * no connection request received
325 	 */
326 	CONN_STATE_STARTWAIT,
327 
328 	/**
329 	 * Connection registered and connection request sent,
330 	 * no acknowledge and no connection request received yet.
331 	 */
332 	CONN_STATE_SETUPWAIT,
333 
334 	/**
335 	 * Connection up and running idle
336 	 */
337 	CONN_STATE_IDLE,
338 
339 	/**
340 	 * Data sent, awaiting CONN_EVENT_TXDONE
341 	 */
342 	CONN_STATE_TX,
343 
344 	/**
345 	 * Error during registration.
346 	 */
347 	CONN_STATE_REGERR,
348 
349 	/**
350 	 * Error during registration.
351 	 */
352 	CONN_STATE_CONNERR,
353 
354 	/**
355 	 * MUST be always the last element!!
356 	 */
357 	NR_CONN_STATES,
358 };
359 
360 static const char *conn_state_names[] = {
361 	"Invalid",
362 	"Stopped",
363 	"StartWait",
364 	"SetupWait",
365 	"Idle",
366 	"TX",
367 	"Terminating",
368 	"Registration error",
369 	"Connect error",
370 };
371 
372 
373 /**
374  * Debug Facility Stuff
375  */
376 static debug_info_t *iucv_dbf_setup = NULL;
377 static debug_info_t *iucv_dbf_data = NULL;
378 static debug_info_t *iucv_dbf_trace = NULL;
379 
380 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
381 
382 static void
383 iucv_unregister_dbf_views(void)
384 {
385 	if (iucv_dbf_setup)
386 		debug_unregister(iucv_dbf_setup);
387 	if (iucv_dbf_data)
388 		debug_unregister(iucv_dbf_data);
389 	if (iucv_dbf_trace)
390 		debug_unregister(iucv_dbf_trace);
391 }
392 static int
393 iucv_register_dbf_views(void)
394 {
395 	iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
396 					IUCV_DBF_SETUP_PAGES,
397 					IUCV_DBF_SETUP_NR_AREAS,
398 					IUCV_DBF_SETUP_LEN);
399 	iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
400 				       IUCV_DBF_DATA_PAGES,
401 				       IUCV_DBF_DATA_NR_AREAS,
402 				       IUCV_DBF_DATA_LEN);
403 	iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
404 					IUCV_DBF_TRACE_PAGES,
405 					IUCV_DBF_TRACE_NR_AREAS,
406 					IUCV_DBF_TRACE_LEN);
407 
408 	if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
409 	    (iucv_dbf_trace == NULL)) {
410 		iucv_unregister_dbf_views();
411 		return -ENOMEM;
412 	}
413 	debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
414 	debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
415 
416 	debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
417 	debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
418 
419 	debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
420 	debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
421 
422 	return 0;
423 }
424 
425 /**
426  * Callback-wrappers, called from lowlevel iucv layer.
427  *****************************************************************************/
428 
429 static void
430 netiucv_callback_rx(iucv_MessagePending *eib, void *pgm_data)
431 {
432 	struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
433 	struct iucv_event ev;
434 
435 	ev.conn = conn;
436 	ev.data = (void *)eib;
437 
438 	fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
439 }
440 
441 static void
442 netiucv_callback_txdone(iucv_MessageComplete *eib, void *pgm_data)
443 {
444 	struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
445 	struct iucv_event ev;
446 
447 	ev.conn = conn;
448 	ev.data = (void *)eib;
449 	fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
450 }
451 
452 static void
453 netiucv_callback_connack(iucv_ConnectionComplete *eib, void *pgm_data)
454 {
455 	struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
456 	struct iucv_event ev;
457 
458 	ev.conn = conn;
459 	ev.data = (void *)eib;
460 	fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, &ev);
461 }
462 
463 static void
464 netiucv_callback_connreq(iucv_ConnectionPending *eib, void *pgm_data)
465 {
466 	struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
467 	struct iucv_event ev;
468 
469 	ev.conn = conn;
470 	ev.data = (void *)eib;
471 	fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
472 }
473 
474 static void
475 netiucv_callback_connrej(iucv_ConnectionSevered *eib, void *pgm_data)
476 {
477 	struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
478 	struct iucv_event ev;
479 
480 	ev.conn = conn;
481 	ev.data = (void *)eib;
482 	fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, &ev);
483 }
484 
485 static void
486 netiucv_callback_connsusp(iucv_ConnectionQuiesced *eib, void *pgm_data)
487 {
488 	struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
489 	struct iucv_event ev;
490 
491 	ev.conn = conn;
492 	ev.data = (void *)eib;
493 	fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, &ev);
494 }
495 
496 static void
497 netiucv_callback_connres(iucv_ConnectionResumed *eib, void *pgm_data)
498 {
499 	struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
500 	struct iucv_event ev;
501 
502 	ev.conn = conn;
503 	ev.data = (void *)eib;
504 	fsm_event(conn->fsm, CONN_EVENT_CONN_RES, &ev);
505 }
506 
507 static iucv_interrupt_ops_t netiucv_ops = {
508 	.ConnectionPending  = netiucv_callback_connreq,
509 	.ConnectionComplete = netiucv_callback_connack,
510 	.ConnectionSevered  = netiucv_callback_connrej,
511 	.ConnectionQuiesced = netiucv_callback_connsusp,
512 	.ConnectionResumed  = netiucv_callback_connres,
513 	.MessagePending     = netiucv_callback_rx,
514 	.MessageComplete    = netiucv_callback_txdone
515 };
516 
517 /**
518  * Dummy NOP action for all statemachines
519  */
520 static void
521 fsm_action_nop(fsm_instance *fi, int event, void *arg)
522 {
523 }
524 
525 /**
526  * Actions of the connection statemachine
527  *****************************************************************************/
528 
529 /**
530  * Helper function for conn_action_rx()
531  * Unpack a just received skb and hand it over to
532  * upper layers.
533  *
534  * @param conn The connection where this skb has been received.
535  * @param pskb The received skb.
536  */
537 //static __inline__ void
538 static void
539 netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb)
540 {
541 	struct net_device     *dev = conn->netdev;
542 	struct netiucv_priv   *privptr = dev->priv;
543 	__u16          offset = 0;
544 
545 	skb_put(pskb, NETIUCV_HDRLEN);
546 	pskb->dev = dev;
547 	pskb->ip_summed = CHECKSUM_NONE;
548 	pskb->protocol = ntohs(ETH_P_IP);
549 
550 	while (1) {
551 		struct sk_buff *skb;
552 		ll_header *header = (ll_header *)pskb->data;
553 
554 		if (!header->next)
555 			break;
556 
557 		skb_pull(pskb, NETIUCV_HDRLEN);
558 		header->next -= offset;
559 		offset += header->next;
560 		header->next -= NETIUCV_HDRLEN;
561 		if (skb_tailroom(pskb) < header->next) {
562 			PRINT_WARN("%s: Illegal next field in iucv header: "
563 			       "%d > %d\n",
564 			       dev->name, header->next, skb_tailroom(pskb));
565 			IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
566 				header->next, skb_tailroom(pskb));
567 			return;
568 		}
569 		skb_put(pskb, header->next);
570 		pskb->mac.raw = pskb->data;
571 		skb = dev_alloc_skb(pskb->len);
572 		if (!skb) {
573 			PRINT_WARN("%s Out of memory in netiucv_unpack_skb\n",
574 			       dev->name);
575 			IUCV_DBF_TEXT(data, 2,
576 				"Out of memory in netiucv_unpack_skb\n");
577 			privptr->stats.rx_dropped++;
578 			return;
579 		}
580 		memcpy(skb_put(skb, pskb->len), pskb->data, pskb->len);
581 		skb->mac.raw = skb->data;
582 		skb->dev = pskb->dev;
583 		skb->protocol = pskb->protocol;
584 		pskb->ip_summed = CHECKSUM_UNNECESSARY;
585 		/*
586 		 * Since receiving is always initiated from a tasklet (in iucv.c),
587 		 * we must use netif_rx_ni() instead of netif_rx()
588 		 */
589 		netif_rx_ni(skb);
590 		dev->last_rx = jiffies;
591 		privptr->stats.rx_packets++;
592 		privptr->stats.rx_bytes += skb->len;
593 		skb_pull(pskb, header->next);
594 		skb_put(pskb, NETIUCV_HDRLEN);
595 	}
596 }
597 
598 static void
599 conn_action_rx(fsm_instance *fi, int event, void *arg)
600 {
601 	struct iucv_event *ev = (struct iucv_event *)arg;
602 	struct iucv_connection *conn = ev->conn;
603 	iucv_MessagePending *eib = (iucv_MessagePending *)ev->data;
604 	struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv;
605 
606 	__u32 msglen = eib->ln1msg2.ipbfln1f;
607 	int rc;
608 
609 	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
610 
611 	if (!conn->netdev) {
612 		/* FRITZ: How to tell iucv LL to drop the msg? */
613 		PRINT_WARN("Received data for unlinked connection\n");
614 		IUCV_DBF_TEXT(data, 2,
615 			"Received data for unlinked connection\n");
616 		return;
617 	}
618 	if (msglen > conn->max_buffsize) {
619 		/* FRITZ: How to tell iucv LL to drop the msg? */
620 		privptr->stats.rx_dropped++;
621 		PRINT_WARN("msglen %d > max_buffsize %d\n",
622 			msglen, conn->max_buffsize);
623 		IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
624 			msglen, conn->max_buffsize);
625 		return;
626 	}
627 	conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head;
628 	conn->rx_buff->len = 0;
629 	rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls,
630 			  conn->rx_buff->data, msglen, NULL, NULL, NULL);
631 	if (rc || msglen < 5) {
632 		privptr->stats.rx_errors++;
633 		PRINT_WARN("iucv_receive returned %08x\n", rc);
634 		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
635 		return;
636 	}
637 	netiucv_unpack_skb(conn, conn->rx_buff);
638 }
639 
640 static void
641 conn_action_txdone(fsm_instance *fi, int event, void *arg)
642 {
643 	struct iucv_event *ev = (struct iucv_event *)arg;
644 	struct iucv_connection *conn = ev->conn;
645 	iucv_MessageComplete *eib = (iucv_MessageComplete *)ev->data;
646 	struct netiucv_priv *privptr = NULL;
647 			         /* Shut up, gcc! skb is always below 2G. */
648 	__u32 single_flag = eib->ipmsgtag;
649 	__u32 txbytes = 0;
650 	__u32 txpackets = 0;
651 	__u32 stat_maxcq = 0;
652 	struct sk_buff *skb;
653 	unsigned long saveflags;
654 	ll_header header;
655 
656 	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
657 
658 	if (conn && conn->netdev && conn->netdev->priv)
659 		privptr = (struct netiucv_priv *)conn->netdev->priv;
660 	conn->prof.tx_pending--;
661 	if (single_flag) {
662 		if ((skb = skb_dequeue(&conn->commit_queue))) {
663 			atomic_dec(&skb->users);
664 			dev_kfree_skb_any(skb);
665 			if (privptr) {
666 				privptr->stats.tx_packets++;
667 				privptr->stats.tx_bytes +=
668 					(skb->len - NETIUCV_HDRLEN
669 					 	  - NETIUCV_HDRLEN);
670 			}
671 		}
672 	}
673 	conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head;
674 	conn->tx_buff->len = 0;
675 	spin_lock_irqsave(&conn->collect_lock, saveflags);
676 	while ((skb = skb_dequeue(&conn->collect_queue))) {
677 		header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
678 		memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
679 		       NETIUCV_HDRLEN);
680 		memcpy(skb_put(conn->tx_buff, skb->len), skb->data, skb->len);
681 		txbytes += skb->len;
682 		txpackets++;
683 		stat_maxcq++;
684 		atomic_dec(&skb->users);
685 		dev_kfree_skb_any(skb);
686 	}
687 	if (conn->collect_len > conn->prof.maxmulti)
688 		conn->prof.maxmulti = conn->collect_len;
689 	conn->collect_len = 0;
690 	spin_unlock_irqrestore(&conn->collect_lock, saveflags);
691 	if (conn->tx_buff->len) {
692 		int rc;
693 
694 		header.next = 0;
695 		memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
696 		       NETIUCV_HDRLEN);
697 
698 		conn->prof.send_stamp = xtime;
699 		rc = iucv_send(conn->pathid, NULL, 0, 0, 0, 0,
700 			       conn->tx_buff->data, conn->tx_buff->len);
701 		conn->prof.doios_multi++;
702 		conn->prof.txlen += conn->tx_buff->len;
703 		conn->prof.tx_pending++;
704 		if (conn->prof.tx_pending > conn->prof.tx_max_pending)
705 			conn->prof.tx_max_pending = conn->prof.tx_pending;
706 		if (rc) {
707 			conn->prof.tx_pending--;
708 			fsm_newstate(fi, CONN_STATE_IDLE);
709 			if (privptr)
710 				privptr->stats.tx_errors += txpackets;
711 			PRINT_WARN("iucv_send returned %08x\n",	rc);
712 			IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
713 		} else {
714 			if (privptr) {
715 				privptr->stats.tx_packets += txpackets;
716 				privptr->stats.tx_bytes += txbytes;
717 			}
718 			if (stat_maxcq > conn->prof.maxcqueue)
719 				conn->prof.maxcqueue = stat_maxcq;
720 		}
721 	} else
722 		fsm_newstate(fi, CONN_STATE_IDLE);
723 }
724 
725 static void
726 conn_action_connaccept(fsm_instance *fi, int event, void *arg)
727 {
728 	struct iucv_event *ev = (struct iucv_event *)arg;
729 	struct iucv_connection *conn = ev->conn;
730 	iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
731 	struct net_device *netdev = conn->netdev;
732 	struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
733 	int rc;
734 	__u16 msglimit;
735 	__u8 udata[16];
736 
737 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
738 
739 	rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0,
740 			 conn->handle, conn, NULL, &msglimit);
741 	if (rc) {
742 		PRINT_WARN("%s: IUCV accept failed with error %d\n",
743 		       netdev->name, rc);
744 		IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
745 		return;
746 	}
747 	fsm_newstate(fi, CONN_STATE_IDLE);
748 	conn->pathid = eib->ippathid;
749 	netdev->tx_queue_len = msglimit;
750 	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
751 }
752 
753 static void
754 conn_action_connreject(fsm_instance *fi, int event, void *arg)
755 {
756 	struct iucv_event *ev = (struct iucv_event *)arg;
757 	struct iucv_connection *conn = ev->conn;
758 	struct net_device *netdev = conn->netdev;
759 	iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
760 	__u8 udata[16];
761 
762 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
763 
764 	iucv_sever(eib->ippathid, udata);
765 	if (eib->ippathid != conn->pathid) {
766 		PRINT_INFO("%s: IR Connection Pending; "
767 			"pathid %d does not match original pathid %d\n",
768 			netdev->name, eib->ippathid, conn->pathid);
769 		IUCV_DBF_TEXT_(data, 2,
770 			"connreject: IR pathid %d, conn. pathid %d\n",
771 			eib->ippathid, conn->pathid);
772 		iucv_sever(conn->pathid, udata);
773 	}
774 }
775 
776 static void
777 conn_action_connack(fsm_instance *fi, int event, void *arg)
778 {
779 	struct iucv_event *ev = (struct iucv_event *)arg;
780 	struct iucv_connection *conn = ev->conn;
781 	iucv_ConnectionComplete *eib = (iucv_ConnectionComplete *)ev->data;
782 	struct net_device *netdev = conn->netdev;
783 	struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
784 
785 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
786 
787 	fsm_deltimer(&conn->timer);
788 	fsm_newstate(fi, CONN_STATE_IDLE);
789 	if (eib->ippathid != conn->pathid) {
790 		PRINT_INFO("%s: IR Connection Complete; "
791 			"pathid %d does not match original pathid %d\n",
792 			netdev->name, eib->ippathid, conn->pathid);
793 		IUCV_DBF_TEXT_(data, 2,
794 			"connack: IR pathid %d, conn. pathid %d\n",
795 			eib->ippathid, conn->pathid);
796 		conn->pathid = eib->ippathid;
797 	}
798 	netdev->tx_queue_len = eib->ipmsglim;
799 	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
800 }
801 
802 static void
803 conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
804 {
805 	struct iucv_connection *conn = (struct iucv_connection *)arg;
806 	__u8 udata[16];
807 
808 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
809 
810 	fsm_deltimer(&conn->timer);
811 	iucv_sever(conn->pathid, udata);
812 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
813 }
814 
815 static void
816 conn_action_connsever(fsm_instance *fi, int event, void *arg)
817 {
818 	struct iucv_event *ev = (struct iucv_event *)arg;
819 	struct iucv_connection *conn = ev->conn;
820 	struct net_device *netdev = conn->netdev;
821 	struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
822 	__u8 udata[16];
823 
824 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
825 
826 	fsm_deltimer(&conn->timer);
827 	iucv_sever(conn->pathid, udata);
828 	PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
829 	IUCV_DBF_TEXT(data, 2,
830 		"conn_action_connsever: Remote dropped connection\n");
831 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
832 	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
833 }
834 
835 static void
836 conn_action_start(fsm_instance *fi, int event, void *arg)
837 {
838 	struct iucv_event *ev = (struct iucv_event *)arg;
839 	struct iucv_connection *conn = ev->conn;
840 	__u16 msglimit;
841 	int rc;
842 
843 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
844 
845 	if (!conn->handle) {
846 		IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n");
847 		conn->handle =
848 			iucv_register_program(iucvMagic, conn->userid,
849 					      netiucv_mask,
850 					      &netiucv_ops, conn);
851 		fsm_newstate(fi, CONN_STATE_STARTWAIT);
852 		if (!conn->handle) {
853 			fsm_newstate(fi, CONN_STATE_REGERR);
854 			conn->handle = NULL;
855 			IUCV_DBF_TEXT(setup, 2,
856 				"NULL from iucv_register_program\n");
857 			return;
858 		}
859 
860 		PRINT_DEBUG("%s('%s'): registered successfully\n",
861 			 conn->netdev->name, conn->userid);
862 	}
863 
864 	PRINT_DEBUG("%s('%s'): connecting ...\n",
865 		 conn->netdev->name, conn->userid);
866 
867 	/* We must set the state before calling iucv_connect because the callback
868 	 * handler could be called at any point after the connection request is
869 	 * sent */
870 
871 	fsm_newstate(fi, CONN_STATE_SETUPWAIT);
872 	rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic,
873 			  conn->userid, iucv_host, 0, NULL, &msglimit,
874 			  conn->handle, conn);
875 	switch (rc) {
876 		case 0:
877 			conn->netdev->tx_queue_len = msglimit;
878 			fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
879 				CONN_EVENT_TIMER, conn);
880 			return;
881 		case 11:
882 			PRINT_INFO("%s: User %s is currently not available.\n",
883 			       conn->netdev->name,
884 			       netiucv_printname(conn->userid));
885 			fsm_newstate(fi, CONN_STATE_STARTWAIT);
886 			return;
887 		case 12:
888 			PRINT_INFO("%s: User %s is currently not ready.\n",
889 			       conn->netdev->name,
890 			       netiucv_printname(conn->userid));
891 			fsm_newstate(fi, CONN_STATE_STARTWAIT);
892 			return;
893 		case 13:
894 			PRINT_WARN("%s: Too many IUCV connections.\n",
895 			       conn->netdev->name);
896 			fsm_newstate(fi, CONN_STATE_CONNERR);
897 			break;
898 		case 14:
899 			PRINT_WARN(
900 			       "%s: User %s has too many IUCV connections.\n",
901 			       conn->netdev->name,
902 			       netiucv_printname(conn->userid));
903 			fsm_newstate(fi, CONN_STATE_CONNERR);
904 			break;
905 		case 15:
906 			PRINT_WARN(
907 			       "%s: No IUCV authorization in CP directory.\n",
908 			       conn->netdev->name);
909 			fsm_newstate(fi, CONN_STATE_CONNERR);
910 			break;
911 		default:
912 			PRINT_WARN("%s: iucv_connect returned error %d\n",
913 			       conn->netdev->name, rc);
914 			fsm_newstate(fi, CONN_STATE_CONNERR);
915 			break;
916 	}
917 	IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
918 	IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
919 	iucv_unregister_program(conn->handle);
920 	conn->handle = NULL;
921 }
922 
923 static void
924 netiucv_purge_skb_queue(struct sk_buff_head *q)
925 {
926 	struct sk_buff *skb;
927 
928 	while ((skb = skb_dequeue(q))) {
929 		atomic_dec(&skb->users);
930 		dev_kfree_skb_any(skb);
931 	}
932 }
933 
934 static void
935 conn_action_stop(fsm_instance *fi, int event, void *arg)
936 {
937 	struct iucv_event *ev = (struct iucv_event *)arg;
938 	struct iucv_connection *conn = ev->conn;
939 	struct net_device *netdev = conn->netdev;
940 	struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
941 
942 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
943 
944 	fsm_deltimer(&conn->timer);
945 	fsm_newstate(fi, CONN_STATE_STOPPED);
946 	netiucv_purge_skb_queue(&conn->collect_queue);
947 	if (conn->handle)
948 		IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
949 		iucv_unregister_program(conn->handle);
950 	conn->handle = NULL;
951 	netiucv_purge_skb_queue(&conn->commit_queue);
952 	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
953 }
954 
955 static void
956 conn_action_inval(fsm_instance *fi, int event, void *arg)
957 {
958 	struct iucv_event *ev = (struct iucv_event *)arg;
959 	struct iucv_connection *conn = ev->conn;
960 	struct net_device *netdev = conn->netdev;
961 
962 	PRINT_WARN("%s: Cannot connect without username\n",
963 	       netdev->name);
964 	IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
965 }
966 
967 static const fsm_node conn_fsm[] = {
968 	{ CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
969 	{ CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
970 
971 	{ CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
972 	{ CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
973 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
974 	{ CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
975 	{ CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
976 	{ CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
977 	{ CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
978 
979 	{ CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
980         { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
981 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
982 	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
983 	{ CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
984 
985 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
986 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
987 
988 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
989 	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
990 	{ CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
991 
992 	{ CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
993 	{ CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
994 
995 	{ CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
996 	{ CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
997 };
998 
999 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1000 
1001 
1002 /**
1003  * Actions for interface - statemachine.
1004  *****************************************************************************/
1005 
1006 /**
1007  * Startup connection by sending CONN_EVENT_START to it.
1008  *
1009  * @param fi    An instance of an interface statemachine.
1010  * @param event The event, just happened.
1011  * @param arg   Generic pointer, casted from struct net_device * upon call.
1012  */
1013 static void
1014 dev_action_start(fsm_instance *fi, int event, void *arg)
1015 {
1016 	struct net_device   *dev = (struct net_device *)arg;
1017 	struct netiucv_priv *privptr = dev->priv;
1018 	struct iucv_event   ev;
1019 
1020 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1021 
1022 	ev.conn = privptr->conn;
1023 	fsm_newstate(fi, DEV_STATE_STARTWAIT);
1024 	fsm_event(privptr->conn->fsm, CONN_EVENT_START, &ev);
1025 }
1026 
1027 /**
1028  * Shutdown connection by sending CONN_EVENT_STOP to it.
1029  *
1030  * @param fi    An instance of an interface statemachine.
1031  * @param event The event, just happened.
1032  * @param arg   Generic pointer, casted from struct net_device * upon call.
1033  */
1034 static void
1035 dev_action_stop(fsm_instance *fi, int event, void *arg)
1036 {
1037 	struct net_device   *dev = (struct net_device *)arg;
1038 	struct netiucv_priv *privptr = dev->priv;
1039 	struct iucv_event   ev;
1040 
1041 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1042 
1043 	ev.conn = privptr->conn;
1044 
1045 	fsm_newstate(fi, DEV_STATE_STOPWAIT);
1046 	fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1047 }
1048 
1049 /**
1050  * Called from connection statemachine
1051  * when a connection is up and running.
1052  *
1053  * @param fi    An instance of an interface statemachine.
1054  * @param event The event, just happened.
1055  * @param arg   Generic pointer, casted from struct net_device * upon call.
1056  */
1057 static void
1058 dev_action_connup(fsm_instance *fi, int event, void *arg)
1059 {
1060 	struct net_device   *dev = (struct net_device *)arg;
1061 	struct netiucv_priv *privptr = dev->priv;
1062 
1063 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1064 
1065 	switch (fsm_getstate(fi)) {
1066 		case DEV_STATE_STARTWAIT:
1067 			fsm_newstate(fi, DEV_STATE_RUNNING);
1068 			PRINT_INFO("%s: connected with remote side %s\n",
1069 			       dev->name, privptr->conn->userid);
1070 			IUCV_DBF_TEXT(setup, 3,
1071 				"connection is up and running\n");
1072 			break;
1073 		case DEV_STATE_STOPWAIT:
1074 			PRINT_INFO(
1075 			       "%s: got connection UP event during shutdown!\n",
1076 			       dev->name);
1077 			IUCV_DBF_TEXT(data, 2,
1078 				"dev_action_connup: in DEV_STATE_STOPWAIT\n");
1079 			break;
1080 	}
1081 }
1082 
1083 /**
1084  * Called from connection statemachine
1085  * when a connection has been shutdown.
1086  *
1087  * @param fi    An instance of an interface statemachine.
1088  * @param event The event, just happened.
1089  * @param arg   Generic pointer, casted from struct net_device * upon call.
1090  */
1091 static void
1092 dev_action_conndown(fsm_instance *fi, int event, void *arg)
1093 {
1094 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1095 
1096 	switch (fsm_getstate(fi)) {
1097 		case DEV_STATE_RUNNING:
1098 			fsm_newstate(fi, DEV_STATE_STARTWAIT);
1099 			break;
1100 		case DEV_STATE_STOPWAIT:
1101 			fsm_newstate(fi, DEV_STATE_STOPPED);
1102 			IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1103 			break;
1104 	}
1105 }
1106 
1107 static const fsm_node dev_fsm[] = {
1108 	{ DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
1109 
1110 	{ DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
1111 	{ DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
1112 
1113 	{ DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
1114 	{ DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
1115 
1116 	{ DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
1117 	{ DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
1118 	{ DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   fsm_action_nop      },
1119 };
1120 
1121 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1122 
1123 /**
1124  * Transmit a packet.
1125  * This is a helper function for netiucv_tx().
1126  *
1127  * @param conn Connection to be used for sending.
1128  * @param skb Pointer to struct sk_buff of packet to send.
1129  *            The linklevel header has already been set up
1130  *            by netiucv_tx().
1131  *
1132  * @return 0 on success, -ERRNO on failure. (Never fails.)
1133  */
1134 static int
1135 netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
1136 	unsigned long saveflags;
1137 	ll_header header;
1138 	int       rc = 0;
1139 
1140 	if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1141 		int l = skb->len + NETIUCV_HDRLEN;
1142 
1143 		spin_lock_irqsave(&conn->collect_lock, saveflags);
1144 		if (conn->collect_len + l >
1145 		    (conn->max_buffsize - NETIUCV_HDRLEN)) {
1146 			rc = -EBUSY;
1147 			IUCV_DBF_TEXT(data, 2,
1148 				"EBUSY from netiucv_transmit_skb\n");
1149 		} else {
1150 			atomic_inc(&skb->users);
1151 			skb_queue_tail(&conn->collect_queue, skb);
1152 			conn->collect_len += l;
1153 		}
1154 		spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1155 	} else {
1156 		struct sk_buff *nskb = skb;
1157 		/**
1158 		 * Copy the skb to a new allocated skb in lowmem only if the
1159 		 * data is located above 2G in memory or tailroom is < 2.
1160 		 */
1161 		unsigned long hi =
1162 			((unsigned long)(skb->tail + NETIUCV_HDRLEN)) >> 31;
1163 		int copied = 0;
1164 		if (hi || (skb_tailroom(skb) < 2)) {
1165 			nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1166 					 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1167 			if (!nskb) {
1168 				PRINT_WARN("%s: Could not allocate tx_skb\n",
1169 				       conn->netdev->name);
1170 				IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1171 				rc = -ENOMEM;
1172 				return rc;
1173 			} else {
1174 				skb_reserve(nskb, NETIUCV_HDRLEN);
1175 				memcpy(skb_put(nskb, skb->len),
1176 				       skb->data, skb->len);
1177 			}
1178 			copied = 1;
1179 		}
1180 		/**
1181 		 * skb now is below 2G and has enough room. Add headers.
1182 		 */
1183 		header.next = nskb->len + NETIUCV_HDRLEN;
1184 		memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1185 		header.next = 0;
1186 		memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header,  NETIUCV_HDRLEN);
1187 
1188 		fsm_newstate(conn->fsm, CONN_STATE_TX);
1189 		conn->prof.send_stamp = xtime;
1190 
1191 		rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */,
1192 			0, nskb->data, nskb->len);
1193 			       /* Shut up, gcc! nskb is always below 2G. */
1194 		conn->prof.doios_single++;
1195 		conn->prof.txlen += skb->len;
1196 		conn->prof.tx_pending++;
1197 		if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1198 			conn->prof.tx_max_pending = conn->prof.tx_pending;
1199 		if (rc) {
1200 			struct netiucv_priv *privptr;
1201 			fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1202 			conn->prof.tx_pending--;
1203 			privptr = (struct netiucv_priv *)conn->netdev->priv;
1204 			if (privptr)
1205 				privptr->stats.tx_errors++;
1206 			if (copied)
1207 				dev_kfree_skb(nskb);
1208 			else {
1209 				/**
1210 				 * Remove our headers. They get added
1211 				 * again on retransmit.
1212 				 */
1213 				skb_pull(skb, NETIUCV_HDRLEN);
1214 				skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1215 			}
1216 			PRINT_WARN("iucv_send returned %08x\n",	rc);
1217 			IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1218 		} else {
1219 			if (copied)
1220 				dev_kfree_skb(skb);
1221 			atomic_inc(&nskb->users);
1222 			skb_queue_tail(&conn->commit_queue, nskb);
1223 		}
1224 	}
1225 
1226 	return rc;
1227 }
1228 
1229 /**
1230  * Interface API for upper network layers
1231  *****************************************************************************/
1232 
1233 /**
1234  * Open an interface.
1235  * Called from generic network layer when ifconfig up is run.
1236  *
1237  * @param dev Pointer to interface struct.
1238  *
1239  * @return 0 on success, -ERRNO on failure. (Never fails.)
1240  */
1241 static int
1242 netiucv_open(struct net_device *dev) {
1243 	fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev);
1244 	return 0;
1245 }
1246 
1247 /**
1248  * Close an interface.
1249  * Called from generic network layer when ifconfig down is run.
1250  *
1251  * @param dev Pointer to interface struct.
1252  *
1253  * @return 0 on success, -ERRNO on failure. (Never fails.)
1254  */
1255 static int
1256 netiucv_close(struct net_device *dev) {
1257 	fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_STOP, dev);
1258 	return 0;
1259 }
1260 
1261 /**
1262  * Start transmission of a packet.
1263  * Called from generic network device layer.
1264  *
1265  * @param skb Pointer to buffer containing the packet.
1266  * @param dev Pointer to interface struct.
1267  *
1268  * @return 0 if packet consumed, !0 if packet rejected.
1269  *         Note: If we return !0, then the packet is free'd by
1270  *               the generic network layer.
1271  */
1272 static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1273 {
1274 	int          rc = 0;
1275 	struct netiucv_priv *privptr = dev->priv;
1276 
1277 	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1278 	/**
1279 	 * Some sanity checks ...
1280 	 */
1281 	if (skb == NULL) {
1282 		PRINT_WARN("%s: NULL sk_buff passed\n", dev->name);
1283 		IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1284 		privptr->stats.tx_dropped++;
1285 		return 0;
1286 	}
1287 	if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1288 		PRINT_WARN("%s: Got sk_buff with head room < %ld bytes\n",
1289 		       dev->name, NETIUCV_HDRLEN);
1290 		IUCV_DBF_TEXT(data, 2,
1291 			"netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1292 		dev_kfree_skb(skb);
1293 		privptr->stats.tx_dropped++;
1294 		return 0;
1295 	}
1296 
1297 	/**
1298 	 * If connection is not running, try to restart it
1299 	 * and throw away packet.
1300 	 */
1301 	if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1302 		fsm_event(privptr->fsm, DEV_EVENT_START, dev);
1303 		dev_kfree_skb(skb);
1304 		privptr->stats.tx_dropped++;
1305 		privptr->stats.tx_errors++;
1306 		privptr->stats.tx_carrier_errors++;
1307 		return 0;
1308 	}
1309 
1310 	if (netiucv_test_and_set_busy(dev)) {
1311 		IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1312 		return -EBUSY;
1313 	}
1314 	dev->trans_start = jiffies;
1315 	if (netiucv_transmit_skb(privptr->conn, skb))
1316 		rc = 1;
1317 	netiucv_clear_busy(dev);
1318 	return rc;
1319 }
1320 
1321 /**
1322  * Returns interface statistics of a device.
1323  *
1324  * @param dev Pointer to interface struct.
1325  *
1326  * @return Pointer to stats struct of this interface.
1327  */
1328 static struct net_device_stats *
1329 netiucv_stats (struct net_device * dev)
1330 {
1331 	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1332 	return &((struct netiucv_priv *)dev->priv)->stats;
1333 }
1334 
1335 /**
1336  * Sets MTU of an interface.
1337  *
1338  * @param dev     Pointer to interface struct.
1339  * @param new_mtu The new MTU to use for this interface.
1340  *
1341  * @return 0 on success, -EINVAL if MTU is out of valid range.
1342  *         (valid range is 576 .. NETIUCV_MTU_MAX).
1343  */
1344 static int
1345 netiucv_change_mtu (struct net_device * dev, int new_mtu)
1346 {
1347 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1348 	if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) {
1349 		IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1350 		return -EINVAL;
1351 	}
1352 	dev->mtu = new_mtu;
1353 	return 0;
1354 }
1355 
1356 /**
1357  * attributes in sysfs
1358  *****************************************************************************/
1359 
1360 static ssize_t
1361 user_show (struct device *dev, struct device_attribute *attr, char *buf)
1362 {
1363 	struct netiucv_priv *priv = dev->driver_data;
1364 
1365 	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1366 	return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1367 }
1368 
1369 static ssize_t
1370 user_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1371 {
1372 	struct netiucv_priv *priv = dev->driver_data;
1373 	struct net_device *ndev = priv->conn->netdev;
1374 	char    *p;
1375 	char    *tmp;
1376 	char 	username[9];
1377 	int 	i;
1378 	struct iucv_connection **clist = &iucv_conns.iucv_connections;
1379 	unsigned long flags;
1380 
1381 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1382 	if (count>9) {
1383 		PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1384 		IUCV_DBF_TEXT_(setup, 2,
1385 			"%d is length of username\n", (int)count);
1386 		return -EINVAL;
1387 	}
1388 
1389 	tmp = strsep((char **) &buf, "\n");
1390 	for (i=0, p=tmp; i<8 && *p; i++, p++) {
1391 		if (isalnum(*p) || (*p == '$'))
1392 			username[i]= toupper(*p);
1393 		else if (*p == '\n') {
1394 			/* trailing lf, grr */
1395 			break;
1396 		} else {
1397 			PRINT_WARN("netiucv: Invalid char %c in username!\n",
1398 				*p);
1399 			IUCV_DBF_TEXT_(setup, 2,
1400 				"username: invalid character %c\n",
1401 				*p);
1402 			return -EINVAL;
1403 		}
1404 	}
1405 	while (i<8)
1406 		username[i++] = ' ';
1407 	username[8] = '\0';
1408 
1409 	if (memcmp(username, priv->conn->userid, 9)) {
1410 		/* username changed */
1411 		if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
1412 			PRINT_WARN(
1413 				"netiucv: device %s active, connected to %s\n",
1414 				dev->bus_id, priv->conn->userid);
1415 			PRINT_WARN("netiucv: user cannot be updated\n");
1416 			IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1417 			return -EBUSY;
1418 		}
1419 	}
1420 	read_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
1421 	while (*clist) {
1422                 if (!strncmp(username, (*clist)->userid, 9) ||
1423 		    ((*clist)->netdev != ndev))
1424                         break;
1425                 clist = &((*clist)->next);
1426         }
1427 	read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
1428         if (*clist) {
1429                 PRINT_WARN("netiucv: Connection to %s already exists\n",
1430                         username);
1431                 return -EEXIST;
1432         }
1433 	memcpy(priv->conn->userid, username, 9);
1434 
1435 	return count;
1436 
1437 }
1438 
1439 static DEVICE_ATTR(user, 0644, user_show, user_write);
1440 
1441 static ssize_t
1442 buffer_show (struct device *dev, struct device_attribute *attr, char *buf)
1443 {
1444 	struct netiucv_priv *priv = dev->driver_data;
1445 
1446 	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1447 	return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1448 }
1449 
1450 static ssize_t
1451 buffer_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1452 {
1453 	struct netiucv_priv *priv = dev->driver_data;
1454 	struct net_device *ndev = priv->conn->netdev;
1455 	char         *e;
1456 	int          bs1;
1457 
1458 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1459 	if (count >= 39)
1460 		return -EINVAL;
1461 
1462 	bs1 = simple_strtoul(buf, &e, 0);
1463 
1464 	if (e && (!isspace(*e))) {
1465 		PRINT_WARN("netiucv: Invalid character in buffer!\n");
1466 		IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1467 		return -EINVAL;
1468 	}
1469 	if (bs1 > NETIUCV_BUFSIZE_MAX) {
1470 		PRINT_WARN("netiucv: Given buffer size %d too large.\n",
1471 			bs1);
1472 		IUCV_DBF_TEXT_(setup, 2,
1473 			"buffer_write: buffer size %d too large\n",
1474 			bs1);
1475 		return -EINVAL;
1476 	}
1477 	if ((ndev->flags & IFF_RUNNING) &&
1478 	    (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1479 		PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1480 			bs1);
1481 		IUCV_DBF_TEXT_(setup, 2,
1482 			"buffer_write: buffer size %d too small\n",
1483 			bs1);
1484 		return -EINVAL;
1485 	}
1486 	if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1487 		PRINT_WARN("netiucv: Given buffer size %d too small.\n",
1488 			bs1);
1489 		IUCV_DBF_TEXT_(setup, 2,
1490 			"buffer_write: buffer size %d too small\n",
1491 			bs1);
1492 		return -EINVAL;
1493 	}
1494 
1495 	priv->conn->max_buffsize = bs1;
1496 	if (!(ndev->flags & IFF_RUNNING))
1497 		ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1498 
1499 	return count;
1500 
1501 }
1502 
1503 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1504 
1505 static ssize_t
1506 dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf)
1507 {
1508 	struct netiucv_priv *priv = dev->driver_data;
1509 
1510 	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1511 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1512 }
1513 
1514 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1515 
1516 static ssize_t
1517 conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf)
1518 {
1519 	struct netiucv_priv *priv = dev->driver_data;
1520 
1521 	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1522 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1523 }
1524 
1525 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1526 
1527 static ssize_t
1528 maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf)
1529 {
1530 	struct netiucv_priv *priv = dev->driver_data;
1531 
1532 	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1533 	return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1534 }
1535 
1536 static ssize_t
1537 maxmulti_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1538 {
1539 	struct netiucv_priv *priv = dev->driver_data;
1540 
1541 	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1542 	priv->conn->prof.maxmulti = 0;
1543 	return count;
1544 }
1545 
1546 static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1547 
1548 static ssize_t
1549 maxcq_show (struct device *dev, struct device_attribute *attr, char *buf)
1550 {
1551 	struct netiucv_priv *priv = dev->driver_data;
1552 
1553 	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1554 	return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1555 }
1556 
1557 static ssize_t
1558 maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1559 {
1560 	struct netiucv_priv *priv = dev->driver_data;
1561 
1562 	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1563 	priv->conn->prof.maxcqueue = 0;
1564 	return count;
1565 }
1566 
1567 static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1568 
1569 static ssize_t
1570 sdoio_show (struct device *dev, struct device_attribute *attr, char *buf)
1571 {
1572 	struct netiucv_priv *priv = dev->driver_data;
1573 
1574 	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1575 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1576 }
1577 
1578 static ssize_t
1579 sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1580 {
1581 	struct netiucv_priv *priv = dev->driver_data;
1582 
1583 	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1584 	priv->conn->prof.doios_single = 0;
1585 	return count;
1586 }
1587 
1588 static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1589 
1590 static ssize_t
1591 mdoio_show (struct device *dev, struct device_attribute *attr, char *buf)
1592 {
1593 	struct netiucv_priv *priv = dev->driver_data;
1594 
1595 	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1596 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1597 }
1598 
1599 static ssize_t
1600 mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1601 {
1602 	struct netiucv_priv *priv = dev->driver_data;
1603 
1604 	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1605 	priv->conn->prof.doios_multi = 0;
1606 	return count;
1607 }
1608 
1609 static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1610 
1611 static ssize_t
1612 txlen_show (struct device *dev, struct device_attribute *attr, char *buf)
1613 {
1614 	struct netiucv_priv *priv = dev->driver_data;
1615 
1616 	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1617 	return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1618 }
1619 
1620 static ssize_t
1621 txlen_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1622 {
1623 	struct netiucv_priv *priv = dev->driver_data;
1624 
1625 	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1626 	priv->conn->prof.txlen = 0;
1627 	return count;
1628 }
1629 
1630 static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1631 
1632 static ssize_t
1633 txtime_show (struct device *dev, struct device_attribute *attr, char *buf)
1634 {
1635 	struct netiucv_priv *priv = dev->driver_data;
1636 
1637 	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1638 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1639 }
1640 
1641 static ssize_t
1642 txtime_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1643 {
1644 	struct netiucv_priv *priv = dev->driver_data;
1645 
1646 	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1647 	priv->conn->prof.tx_time = 0;
1648 	return count;
1649 }
1650 
1651 static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1652 
1653 static ssize_t
1654 txpend_show (struct device *dev, struct device_attribute *attr, char *buf)
1655 {
1656 	struct netiucv_priv *priv = dev->driver_data;
1657 
1658 	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1659 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1660 }
1661 
1662 static ssize_t
1663 txpend_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1664 {
1665 	struct netiucv_priv *priv = dev->driver_data;
1666 
1667 	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1668 	priv->conn->prof.tx_pending = 0;
1669 	return count;
1670 }
1671 
1672 static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1673 
1674 static ssize_t
1675 txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf)
1676 {
1677 	struct netiucv_priv *priv = dev->driver_data;
1678 
1679 	IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
1680 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1681 }
1682 
1683 static ssize_t
1684 txmpnd_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1685 {
1686 	struct netiucv_priv *priv = dev->driver_data;
1687 
1688 	IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
1689 	priv->conn->prof.tx_max_pending = 0;
1690 	return count;
1691 }
1692 
1693 static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1694 
1695 static struct attribute *netiucv_attrs[] = {
1696 	&dev_attr_buffer.attr,
1697 	&dev_attr_user.attr,
1698 	NULL,
1699 };
1700 
1701 static struct attribute_group netiucv_attr_group = {
1702 	.attrs = netiucv_attrs,
1703 };
1704 
1705 static struct attribute *netiucv_stat_attrs[] = {
1706 	&dev_attr_device_fsm_state.attr,
1707 	&dev_attr_connection_fsm_state.attr,
1708 	&dev_attr_max_tx_buffer_used.attr,
1709 	&dev_attr_max_chained_skbs.attr,
1710 	&dev_attr_tx_single_write_ops.attr,
1711 	&dev_attr_tx_multi_write_ops.attr,
1712 	&dev_attr_netto_bytes.attr,
1713 	&dev_attr_max_tx_io_time.attr,
1714 	&dev_attr_tx_pending.attr,
1715 	&dev_attr_tx_max_pending.attr,
1716 	NULL,
1717 };
1718 
1719 static struct attribute_group netiucv_stat_attr_group = {
1720 	.name  = "stats",
1721 	.attrs = netiucv_stat_attrs,
1722 };
1723 
1724 static inline int
1725 netiucv_add_files(struct device *dev)
1726 {
1727 	int ret;
1728 
1729 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1730 	ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1731 	if (ret)
1732 		return ret;
1733 	ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
1734 	if (ret)
1735 		sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1736 	return ret;
1737 }
1738 
1739 static inline void
1740 netiucv_remove_files(struct device *dev)
1741 {
1742 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1743 	sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1744 	sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1745 }
1746 
1747 static int
1748 netiucv_register_device(struct net_device *ndev)
1749 {
1750 	struct netiucv_priv *priv = ndev->priv;
1751 	struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1752 	int ret;
1753 
1754 
1755 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1756 
1757 	if (dev) {
1758 		snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
1759 		dev->bus = &iucv_bus;
1760 		dev->parent = iucv_root;
1761 		/*
1762 		 * The release function could be called after the
1763 		 * module has been unloaded. It's _only_ task is to
1764 		 * free the struct. Therefore, we specify kfree()
1765 		 * directly here. (Probably a little bit obfuscating
1766 		 * but legitime ...).
1767 		 */
1768 		dev->release = (void (*)(struct device *))kfree;
1769 		dev->driver = &netiucv_driver;
1770 	} else
1771 		return -ENOMEM;
1772 
1773 	ret = device_register(dev);
1774 
1775 	if (ret)
1776 		return ret;
1777 	ret = netiucv_add_files(dev);
1778 	if (ret)
1779 		goto out_unreg;
1780 	priv->dev = dev;
1781 	dev->driver_data = priv;
1782 	return 0;
1783 
1784 out_unreg:
1785 	device_unregister(dev);
1786 	return ret;
1787 }
1788 
1789 static void
1790 netiucv_unregister_device(struct device *dev)
1791 {
1792 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1793 	netiucv_remove_files(dev);
1794 	device_unregister(dev);
1795 }
1796 
1797 /**
1798  * Allocate and initialize a new connection structure.
1799  * Add it to the list of netiucv connections;
1800  */
1801 static struct iucv_connection *
1802 netiucv_new_connection(struct net_device *dev, char *username)
1803 {
1804 	unsigned long flags;
1805 	struct iucv_connection **clist = &iucv_conns.iucv_connections;
1806 	struct iucv_connection *conn =
1807 		kzalloc(sizeof(struct iucv_connection), GFP_KERNEL);
1808 
1809 	if (conn) {
1810 		skb_queue_head_init(&conn->collect_queue);
1811 		skb_queue_head_init(&conn->commit_queue);
1812 		spin_lock_init(&conn->collect_lock);
1813 		conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1814 		conn->netdev = dev;
1815 
1816 		conn->rx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
1817 					  GFP_KERNEL | GFP_DMA);
1818 		if (!conn->rx_buff) {
1819 			kfree(conn);
1820 			return NULL;
1821 		}
1822 		conn->tx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
1823 					  GFP_KERNEL | GFP_DMA);
1824 		if (!conn->tx_buff) {
1825 			kfree_skb(conn->rx_buff);
1826 			kfree(conn);
1827 			return NULL;
1828 		}
1829 		conn->fsm = init_fsm("netiucvconn", conn_state_names,
1830 				     conn_event_names, NR_CONN_STATES,
1831 				     NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1832 				     GFP_KERNEL);
1833 		if (!conn->fsm) {
1834 			kfree_skb(conn->tx_buff);
1835 			kfree_skb(conn->rx_buff);
1836 			kfree(conn);
1837 			return NULL;
1838 		}
1839 		fsm_settimer(conn->fsm, &conn->timer);
1840 		fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1841 
1842 		if (username) {
1843 			memcpy(conn->userid, username, 9);
1844 			fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1845 		}
1846 
1847 		write_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
1848 		conn->next = *clist;
1849 		*clist = conn;
1850 		write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
1851 	}
1852 	return conn;
1853 }
1854 
1855 /**
1856  * Release a connection structure and remove it from the
1857  * list of netiucv connections.
1858  */
1859 static void
1860 netiucv_remove_connection(struct iucv_connection *conn)
1861 {
1862 	struct iucv_connection **clist = &iucv_conns.iucv_connections;
1863 	unsigned long flags;
1864 
1865 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1866 	if (conn == NULL)
1867 		return;
1868 	write_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
1869 	while (*clist) {
1870 		if (*clist == conn) {
1871 			*clist = conn->next;
1872 			write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
1873 			if (conn->handle) {
1874 				iucv_unregister_program(conn->handle);
1875 				conn->handle = NULL;
1876 			}
1877 			fsm_deltimer(&conn->timer);
1878 			kfree_fsm(conn->fsm);
1879 			kfree_skb(conn->rx_buff);
1880 			kfree_skb(conn->tx_buff);
1881 			return;
1882 		}
1883 		clist = &((*clist)->next);
1884 	}
1885 	write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
1886 }
1887 
1888 /**
1889  * Release everything of a net device.
1890  */
1891 static void
1892 netiucv_free_netdevice(struct net_device *dev)
1893 {
1894 	struct netiucv_priv *privptr;
1895 
1896 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1897 
1898 	if (!dev)
1899 		return;
1900 
1901 	privptr = (struct netiucv_priv *)dev->priv;
1902 	if (privptr) {
1903 		if (privptr->conn)
1904 			netiucv_remove_connection(privptr->conn);
1905 		if (privptr->fsm)
1906 			kfree_fsm(privptr->fsm);
1907 		privptr->conn = NULL; privptr->fsm = NULL;
1908 		/* privptr gets freed by free_netdev() */
1909 	}
1910 	free_netdev(dev);
1911 }
1912 
1913 /**
1914  * Initialize a net device. (Called from kernel in alloc_netdev())
1915  */
1916 static void
1917 netiucv_setup_netdevice(struct net_device *dev)
1918 {
1919         memset(dev->priv, 0, sizeof(struct netiucv_priv));
1920 
1921 	dev->mtu	         = NETIUCV_MTU_DEFAULT;
1922 	dev->hard_start_xmit     = netiucv_tx;
1923 	dev->open	         = netiucv_open;
1924 	dev->stop	         = netiucv_close;
1925 	dev->get_stats	         = netiucv_stats;
1926 	dev->change_mtu          = netiucv_change_mtu;
1927 	dev->destructor          = netiucv_free_netdevice;
1928 	dev->hard_header_len     = NETIUCV_HDRLEN;
1929 	dev->addr_len            = 0;
1930 	dev->type                = ARPHRD_SLIP;
1931 	dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
1932 	dev->flags	         = IFF_POINTOPOINT | IFF_NOARP;
1933 	SET_MODULE_OWNER(dev);
1934 }
1935 
1936 /**
1937  * Allocate and initialize everything of a net device.
1938  */
1939 static struct net_device *
1940 netiucv_init_netdevice(char *username)
1941 {
1942 	struct netiucv_priv *privptr;
1943 	struct net_device *dev;
1944 
1945 	dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1946 			   netiucv_setup_netdevice);
1947 	if (!dev)
1948 		return NULL;
1949 	if (dev_alloc_name(dev, dev->name) < 0) {
1950 		free_netdev(dev);
1951 		return NULL;
1952 	}
1953 
1954 	privptr = (struct netiucv_priv *)dev->priv;
1955 	privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1956 				dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1957 				dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1958 	if (!privptr->fsm) {
1959 		free_netdev(dev);
1960 		return NULL;
1961 	}
1962 	privptr->conn = netiucv_new_connection(dev, username);
1963 	if (!privptr->conn) {
1964 		kfree_fsm(privptr->fsm);
1965 		free_netdev(dev);
1966 		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1967 		return NULL;
1968 	}
1969 	fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1970 
1971 	return dev;
1972 }
1973 
1974 static ssize_t
1975 conn_write(struct device_driver *drv, const char *buf, size_t count)
1976 {
1977 	char *p;
1978 	char username[9];
1979 	int i, ret;
1980 	struct net_device *dev;
1981 	struct iucv_connection **clist = &iucv_conns.iucv_connections;
1982 	unsigned long flags;
1983 
1984 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
1985 	if (count>9) {
1986 		PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
1987 		IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1988 		return -EINVAL;
1989 	}
1990 
1991 	for (i=0, p=(char *)buf; i<8 && *p; i++, p++) {
1992 		if (isalnum(*p) || (*p == '$'))
1993 			username[i]= toupper(*p);
1994 		else if (*p == '\n') {
1995 			/* trailing lf, grr */
1996 			break;
1997 		} else {
1998 			PRINT_WARN("netiucv: Invalid character in username!\n");
1999 			IUCV_DBF_TEXT_(setup, 2,
2000 				"conn_write: invalid character %c\n", *p);
2001 			return -EINVAL;
2002 		}
2003 	}
2004 	while (i<8)
2005 		username[i++] = ' ';
2006 	username[8] = '\0';
2007 
2008 	read_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
2009 	while (*clist) {
2010 		if (!strncmp(username, (*clist)->userid, 9))
2011 			break;
2012 		clist = &((*clist)->next);
2013 	}
2014 	read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
2015 	if (*clist) {
2016 		PRINT_WARN("netiucv: Connection to %s already exists\n",
2017 			username);
2018 		return -EEXIST;
2019 	}
2020 	dev = netiucv_init_netdevice(username);
2021 	if (!dev) {
2022 		PRINT_WARN(
2023 		       "netiucv: Could not allocate network device structure "
2024 		       "for user '%s'\n", netiucv_printname(username));
2025 		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2026 		return -ENODEV;
2027 	}
2028 
2029 	if ((ret = netiucv_register_device(dev))) {
2030 		IUCV_DBF_TEXT_(setup, 2,
2031 			"ret %d from netiucv_register_device\n", ret);
2032 		goto out_free_ndev;
2033 	}
2034 
2035 	/* sysfs magic */
2036 	SET_NETDEV_DEV(dev,
2037 			(struct device*)((struct netiucv_priv*)dev->priv)->dev);
2038 
2039 	if ((ret = register_netdev(dev))) {
2040 		netiucv_unregister_device((struct device*)
2041 			((struct netiucv_priv*)dev->priv)->dev);
2042 		goto out_free_ndev;
2043 	}
2044 
2045 	PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
2046 
2047 	return count;
2048 
2049 out_free_ndev:
2050 	PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
2051 	IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
2052 	netiucv_free_netdevice(dev);
2053 	return ret;
2054 }
2055 
2056 DRIVER_ATTR(connection, 0200, NULL, conn_write);
2057 
2058 static ssize_t
2059 remove_write (struct device_driver *drv, const char *buf, size_t count)
2060 {
2061 	struct iucv_connection **clist = &iucv_conns.iucv_connections;
2062 	unsigned long flags;
2063         struct net_device *ndev;
2064         struct netiucv_priv *priv;
2065         struct device *dev;
2066         char name[IFNAMSIZ];
2067         char *p;
2068         int i;
2069 
2070         IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2071 
2072         if (count >= IFNAMSIZ)
2073                 count = IFNAMSIZ - 1;;
2074 
2075         for (i=0, p=(char *)buf; i<count && *p; i++, p++) {
2076                 if ((*p == '\n') || (*p == ' ')) {
2077                         /* trailing lf, grr */
2078                         break;
2079                 } else {
2080                         name[i]=*p;
2081                 }
2082         }
2083         name[i] = '\0';
2084 
2085 	read_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
2086         while (*clist) {
2087                 ndev = (*clist)->netdev;
2088                 priv = (struct netiucv_priv*)ndev->priv;
2089                 dev = priv->dev;
2090 
2091                 if (strncmp(name, ndev->name, count)) {
2092                         clist = &((*clist)->next);
2093                         continue;
2094                 }
2095 		read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
2096                 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2097                         PRINT_WARN(
2098                                 "netiucv: net device %s active with peer %s\n",
2099                                 ndev->name, priv->conn->userid);
2100                         PRINT_WARN("netiucv: %s cannot be removed\n",
2101                                 ndev->name);
2102 			IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2103                         return -EBUSY;
2104                 }
2105                 unregister_netdev(ndev);
2106                 netiucv_unregister_device(dev);
2107                 return count;
2108         }
2109 	read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
2110         PRINT_WARN("netiucv: net device %s unknown\n", name);
2111 	IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2112         return -EINVAL;
2113 }
2114 
2115 DRIVER_ATTR(remove, 0200, NULL, remove_write);
2116 
2117 static void
2118 netiucv_banner(void)
2119 {
2120 	PRINT_INFO("NETIUCV driver initialized\n");
2121 }
2122 
2123 static void __exit
2124 netiucv_exit(void)
2125 {
2126 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2127 	while (iucv_conns.iucv_connections) {
2128 		struct net_device *ndev = iucv_conns.iucv_connections->netdev;
2129 		struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv;
2130 		struct device *dev = priv->dev;
2131 
2132 		unregister_netdev(ndev);
2133 		netiucv_unregister_device(dev);
2134 	}
2135 
2136 	driver_remove_file(&netiucv_driver, &driver_attr_connection);
2137 	driver_remove_file(&netiucv_driver, &driver_attr_remove);
2138 	driver_unregister(&netiucv_driver);
2139 	iucv_unregister_dbf_views();
2140 
2141 	PRINT_INFO("NETIUCV driver unloaded\n");
2142 	return;
2143 }
2144 
2145 static int __init
2146 netiucv_init(void)
2147 {
2148 	int ret;
2149 
2150 	ret = iucv_register_dbf_views();
2151 	if (ret) {
2152 		PRINT_WARN("netiucv_init failed, "
2153 			"iucv_register_dbf_views rc = %d\n", ret);
2154 		return ret;
2155 	}
2156 	IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
2157 	ret = driver_register(&netiucv_driver);
2158 	if (ret) {
2159 		PRINT_ERR("NETIUCV: failed to register driver.\n");
2160 		IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret);
2161 		iucv_unregister_dbf_views();
2162 		return ret;
2163 	}
2164 
2165 	/* Add entry for specifying connections. */
2166 	ret = driver_create_file(&netiucv_driver, &driver_attr_connection);
2167 	if (!ret) {
2168 		ret = driver_create_file(&netiucv_driver, &driver_attr_remove);
2169 		netiucv_banner();
2170 		rwlock_init(&iucv_conns.iucv_rwlock);
2171 	} else {
2172 		PRINT_ERR("NETIUCV: failed to add driver attribute.\n");
2173 		IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret);
2174 		driver_unregister(&netiucv_driver);
2175 		iucv_unregister_dbf_views();
2176 	}
2177 	return ret;
2178 }
2179 
2180 module_init(netiucv_init);
2181 module_exit(netiucv_exit);
2182 MODULE_LICENSE("GPL");
2183