xref: /linux/net/iucv/af_iucv.c (revision 643d1f7fe3aa12c8bdea6fa5b4ba874ff6dd601d)
1 /*
2  *  linux/net/iucv/af_iucv.c
3  *
4  *  IUCV protocol stack for Linux on zSeries
5  *
6  *  Copyright 2006 IBM Corporation
7  *
8  *  Author(s):	Jennifer Hunt <jenhunt@us.ibm.com>
9  */
10 
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/poll.h>
21 #include <net/sock.h>
22 #include <asm/ebcdic.h>
23 #include <asm/cpcmd.h>
24 #include <linux/kmod.h>
25 
26 #include <net/iucv/iucv.h>
27 #include <net/iucv/af_iucv.h>
28 
29 #define CONFIG_IUCV_SOCK_DEBUG 1
30 
31 #define IPRMDATA 0x80
32 #define VERSION "1.0"
33 
34 static char iucv_userid[80];
35 
36 static struct proto_ops iucv_sock_ops;
37 
38 static struct proto iucv_proto = {
39 	.name		= "AF_IUCV",
40 	.owner		= THIS_MODULE,
41 	.obj_size	= sizeof(struct iucv_sock),
42 };
43 
44 static void iucv_sock_kill(struct sock *sk);
45 static void iucv_sock_close(struct sock *sk);
46 
47 /* Call Back functions */
48 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
49 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
50 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
51 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
52 				 u8 ipuser[16]);
53 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
54 
55 static struct iucv_sock_list iucv_sk_list = {
56 	.lock = RW_LOCK_UNLOCKED,
57 	.autobind_name = ATOMIC_INIT(0)
58 };
59 
60 static struct iucv_handler af_iucv_handler = {
61 	.path_pending	  = iucv_callback_connreq,
62 	.path_complete	  = iucv_callback_connack,
63 	.path_severed	  = iucv_callback_connrej,
64 	.message_pending  = iucv_callback_rx,
65 	.message_complete = iucv_callback_txdone
66 };
67 
68 static inline void high_nmcpy(unsigned char *dst, char *src)
69 {
70        memcpy(dst, src, 8);
71 }
72 
73 static inline void low_nmcpy(unsigned char *dst, char *src)
74 {
75        memcpy(&dst[8], src, 8);
76 }
77 
78 /* Timers */
79 static void iucv_sock_timeout(unsigned long arg)
80 {
81 	struct sock *sk = (struct sock *)arg;
82 
83 	bh_lock_sock(sk);
84 	sk->sk_err = ETIMEDOUT;
85 	sk->sk_state_change(sk);
86 	bh_unlock_sock(sk);
87 
88 	iucv_sock_kill(sk);
89 	sock_put(sk);
90 }
91 
92 static void iucv_sock_clear_timer(struct sock *sk)
93 {
94 	sk_stop_timer(sk, &sk->sk_timer);
95 }
96 
97 static struct sock *__iucv_get_sock_by_name(char *nm)
98 {
99 	struct sock *sk;
100 	struct hlist_node *node;
101 
102 	sk_for_each(sk, node, &iucv_sk_list.head)
103 		if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
104 			return sk;
105 
106 	return NULL;
107 }
108 
109 static void iucv_sock_destruct(struct sock *sk)
110 {
111 	skb_queue_purge(&sk->sk_receive_queue);
112 	skb_queue_purge(&sk->sk_write_queue);
113 }
114 
115 /* Cleanup Listen */
116 static void iucv_sock_cleanup_listen(struct sock *parent)
117 {
118 	struct sock *sk;
119 
120 	/* Close non-accepted connections */
121 	while ((sk = iucv_accept_dequeue(parent, NULL))) {
122 		iucv_sock_close(sk);
123 		iucv_sock_kill(sk);
124 	}
125 
126 	parent->sk_state = IUCV_CLOSED;
127 	sock_set_flag(parent, SOCK_ZAPPED);
128 }
129 
130 /* Kill socket */
131 static void iucv_sock_kill(struct sock *sk)
132 {
133 	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
134 		return;
135 
136 	iucv_sock_unlink(&iucv_sk_list, sk);
137 	sock_set_flag(sk, SOCK_DEAD);
138 	sock_put(sk);
139 }
140 
141 /* Close an IUCV socket */
142 static void iucv_sock_close(struct sock *sk)
143 {
144 	unsigned char user_data[16];
145 	struct iucv_sock *iucv = iucv_sk(sk);
146 	int err;
147 	unsigned long timeo;
148 
149 	iucv_sock_clear_timer(sk);
150 	lock_sock(sk);
151 
152 	switch (sk->sk_state) {
153 	case IUCV_LISTEN:
154 		iucv_sock_cleanup_listen(sk);
155 		break;
156 
157 	case IUCV_CONNECTED:
158 	case IUCV_DISCONN:
159 		err = 0;
160 
161 		sk->sk_state = IUCV_CLOSING;
162 		sk->sk_state_change(sk);
163 
164 		if (!skb_queue_empty(&iucv->send_skb_q)) {
165 			if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
166 				timeo = sk->sk_lingertime;
167 			else
168 				timeo = IUCV_DISCONN_TIMEOUT;
169 			err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
170 		}
171 
172 		sk->sk_state = IUCV_CLOSED;
173 		sk->sk_state_change(sk);
174 
175 		if (iucv->path) {
176 			low_nmcpy(user_data, iucv->src_name);
177 			high_nmcpy(user_data, iucv->dst_name);
178 			ASCEBC(user_data, sizeof(user_data));
179 			err = iucv_path_sever(iucv->path, user_data);
180 			iucv_path_free(iucv->path);
181 			iucv->path = NULL;
182 		}
183 
184 		sk->sk_err = ECONNRESET;
185 		sk->sk_state_change(sk);
186 
187 		skb_queue_purge(&iucv->send_skb_q);
188 		skb_queue_purge(&iucv->backlog_skb_q);
189 
190 		sock_set_flag(sk, SOCK_ZAPPED);
191 		break;
192 
193 	default:
194 		sock_set_flag(sk, SOCK_ZAPPED);
195 		break;
196 	}
197 
198 	release_sock(sk);
199 	iucv_sock_kill(sk);
200 }
201 
202 static void iucv_sock_init(struct sock *sk, struct sock *parent)
203 {
204 	if (parent)
205 		sk->sk_type = parent->sk_type;
206 }
207 
208 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
209 {
210 	struct sock *sk;
211 
212 	sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
213 	if (!sk)
214 		return NULL;
215 
216 	sock_init_data(sock, sk);
217 	INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
218 	spin_lock_init(&iucv_sk(sk)->accept_q_lock);
219 	skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
220 	INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
221 	spin_lock_init(&iucv_sk(sk)->message_q.lock);
222 	skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
223 	iucv_sk(sk)->send_tag = 0;
224 
225 	sk->sk_destruct = iucv_sock_destruct;
226 	sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
227 	sk->sk_allocation = GFP_DMA;
228 
229 	sock_reset_flag(sk, SOCK_ZAPPED);
230 
231 	sk->sk_protocol = proto;
232 	sk->sk_state	= IUCV_OPEN;
233 
234 	setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
235 
236 	iucv_sock_link(&iucv_sk_list, sk);
237 	return sk;
238 }
239 
240 /* Create an IUCV socket */
241 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
242 {
243 	struct sock *sk;
244 
245 	if (sock->type != SOCK_STREAM)
246 		return -ESOCKTNOSUPPORT;
247 
248 	sock->state = SS_UNCONNECTED;
249 	sock->ops = &iucv_sock_ops;
250 
251 	sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
252 	if (!sk)
253 		return -ENOMEM;
254 
255 	iucv_sock_init(sk, NULL);
256 
257 	return 0;
258 }
259 
260 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
261 {
262 	write_lock_bh(&l->lock);
263 	sk_add_node(sk, &l->head);
264 	write_unlock_bh(&l->lock);
265 }
266 
267 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
268 {
269 	write_lock_bh(&l->lock);
270 	sk_del_node_init(sk);
271 	write_unlock_bh(&l->lock);
272 }
273 
274 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
275 {
276 	unsigned long flags;
277 	struct iucv_sock *par = iucv_sk(parent);
278 
279 	sock_hold(sk);
280 	spin_lock_irqsave(&par->accept_q_lock, flags);
281 	list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
282 	spin_unlock_irqrestore(&par->accept_q_lock, flags);
283 	iucv_sk(sk)->parent = parent;
284 	parent->sk_ack_backlog++;
285 }
286 
287 void iucv_accept_unlink(struct sock *sk)
288 {
289 	unsigned long flags;
290 	struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
291 
292 	spin_lock_irqsave(&par->accept_q_lock, flags);
293 	list_del_init(&iucv_sk(sk)->accept_q);
294 	spin_unlock_irqrestore(&par->accept_q_lock, flags);
295 	iucv_sk(sk)->parent->sk_ack_backlog--;
296 	iucv_sk(sk)->parent = NULL;
297 	sock_put(sk);
298 }
299 
300 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
301 {
302 	struct iucv_sock *isk, *n;
303 	struct sock *sk;
304 
305 	list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
306 		sk = (struct sock *) isk;
307 		lock_sock(sk);
308 
309 		if (sk->sk_state == IUCV_CLOSED) {
310 			iucv_accept_unlink(sk);
311 			release_sock(sk);
312 			continue;
313 		}
314 
315 		if (sk->sk_state == IUCV_CONNECTED ||
316 		    sk->sk_state == IUCV_SEVERED ||
317 		    !newsock) {
318 			iucv_accept_unlink(sk);
319 			if (newsock)
320 				sock_graft(sk, newsock);
321 
322 			if (sk->sk_state == IUCV_SEVERED)
323 				sk->sk_state = IUCV_DISCONN;
324 
325 			release_sock(sk);
326 			return sk;
327 		}
328 
329 		release_sock(sk);
330 	}
331 	return NULL;
332 }
333 
334 int iucv_sock_wait_state(struct sock *sk, int state, int state2,
335 			 unsigned long timeo)
336 {
337 	DECLARE_WAITQUEUE(wait, current);
338 	int err = 0;
339 
340 	add_wait_queue(sk->sk_sleep, &wait);
341 	while (sk->sk_state != state && sk->sk_state != state2) {
342 		set_current_state(TASK_INTERRUPTIBLE);
343 
344 		if (!timeo) {
345 			err = -EAGAIN;
346 			break;
347 		}
348 
349 		if (signal_pending(current)) {
350 			err = sock_intr_errno(timeo);
351 			break;
352 		}
353 
354 		release_sock(sk);
355 		timeo = schedule_timeout(timeo);
356 		lock_sock(sk);
357 
358 		err = sock_error(sk);
359 		if (err)
360 			break;
361 	}
362 	set_current_state(TASK_RUNNING);
363 	remove_wait_queue(sk->sk_sleep, &wait);
364 	return err;
365 }
366 
367 /* Bind an unbound socket */
368 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
369 			  int addr_len)
370 {
371 	struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
372 	struct sock *sk = sock->sk;
373 	struct iucv_sock *iucv;
374 	int err;
375 
376 	/* Verify the input sockaddr */
377 	if (!addr || addr->sa_family != AF_IUCV)
378 		return -EINVAL;
379 
380 	lock_sock(sk);
381 	if (sk->sk_state != IUCV_OPEN) {
382 		err = -EBADFD;
383 		goto done;
384 	}
385 
386 	write_lock_bh(&iucv_sk_list.lock);
387 
388 	iucv = iucv_sk(sk);
389 	if (__iucv_get_sock_by_name(sa->siucv_name)) {
390 		err = -EADDRINUSE;
391 		goto done_unlock;
392 	}
393 	if (iucv->path) {
394 		err = 0;
395 		goto done_unlock;
396 	}
397 
398 	/* Bind the socket */
399 	memcpy(iucv->src_name, sa->siucv_name, 8);
400 
401 	/* Copy the user id */
402 	memcpy(iucv->src_user_id, iucv_userid, 8);
403 	sk->sk_state = IUCV_BOUND;
404 	err = 0;
405 
406 done_unlock:
407 	/* Release the socket list lock */
408 	write_unlock_bh(&iucv_sk_list.lock);
409 done:
410 	release_sock(sk);
411 	return err;
412 }
413 
414 /* Automatically bind an unbound socket */
415 static int iucv_sock_autobind(struct sock *sk)
416 {
417 	struct iucv_sock *iucv = iucv_sk(sk);
418 	char query_buffer[80];
419 	char name[12];
420 	int err = 0;
421 
422 	/* Set the userid and name */
423 	cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
424 	if (unlikely(err))
425 		return -EPROTO;
426 
427 	memcpy(iucv->src_user_id, query_buffer, 8);
428 
429 	write_lock_bh(&iucv_sk_list.lock);
430 
431 	sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
432 	while (__iucv_get_sock_by_name(name)) {
433 		sprintf(name, "%08x",
434 			atomic_inc_return(&iucv_sk_list.autobind_name));
435 	}
436 
437 	write_unlock_bh(&iucv_sk_list.lock);
438 
439 	memcpy(&iucv->src_name, name, 8);
440 
441 	return err;
442 }
443 
444 /* Connect an unconnected socket */
445 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
446 			     int alen, int flags)
447 {
448 	struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
449 	struct sock *sk = sock->sk;
450 	struct iucv_sock *iucv;
451 	unsigned char user_data[16];
452 	int err;
453 
454 	if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
455 		return -EINVAL;
456 
457 	if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
458 		return -EBADFD;
459 
460 	if (sk->sk_type != SOCK_STREAM)
461 		return -EINVAL;
462 
463 	iucv = iucv_sk(sk);
464 
465 	if (sk->sk_state == IUCV_OPEN) {
466 		err = iucv_sock_autobind(sk);
467 		if (unlikely(err))
468 			return err;
469 	}
470 
471 	lock_sock(sk);
472 
473 	/* Set the destination information */
474 	memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
475 	memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
476 
477 	high_nmcpy(user_data, sa->siucv_name);
478 	low_nmcpy(user_data, iucv_sk(sk)->src_name);
479 	ASCEBC(user_data, sizeof(user_data));
480 
481 	iucv = iucv_sk(sk);
482 	/* Create path. */
483 	iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
484 				     IPRMDATA, GFP_KERNEL);
485 	err = iucv_path_connect(iucv->path, &af_iucv_handler,
486 				sa->siucv_user_id, NULL, user_data, sk);
487 	if (err) {
488 		iucv_path_free(iucv->path);
489 		iucv->path = NULL;
490 		err = -ECONNREFUSED;
491 		goto done;
492 	}
493 
494 	if (sk->sk_state != IUCV_CONNECTED) {
495 		err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
496 				sock_sndtimeo(sk, flags & O_NONBLOCK));
497 	}
498 
499 	if (sk->sk_state == IUCV_DISCONN) {
500 		release_sock(sk);
501 		return -ECONNREFUSED;
502 	}
503 done:
504 	release_sock(sk);
505 	return err;
506 }
507 
508 /* Move a socket into listening state. */
509 static int iucv_sock_listen(struct socket *sock, int backlog)
510 {
511 	struct sock *sk = sock->sk;
512 	int err;
513 
514 	lock_sock(sk);
515 
516 	err = -EINVAL;
517 	if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
518 		goto done;
519 
520 	sk->sk_max_ack_backlog = backlog;
521 	sk->sk_ack_backlog = 0;
522 	sk->sk_state = IUCV_LISTEN;
523 	err = 0;
524 
525 done:
526 	release_sock(sk);
527 	return err;
528 }
529 
530 /* Accept a pending connection */
531 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
532 			    int flags)
533 {
534 	DECLARE_WAITQUEUE(wait, current);
535 	struct sock *sk = sock->sk, *nsk;
536 	long timeo;
537 	int err = 0;
538 
539 	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
540 
541 	if (sk->sk_state != IUCV_LISTEN) {
542 		err = -EBADFD;
543 		goto done;
544 	}
545 
546 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
547 
548 	/* Wait for an incoming connection */
549 	add_wait_queue_exclusive(sk->sk_sleep, &wait);
550 	while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
551 		set_current_state(TASK_INTERRUPTIBLE);
552 		if (!timeo) {
553 			err = -EAGAIN;
554 			break;
555 		}
556 
557 		release_sock(sk);
558 		timeo = schedule_timeout(timeo);
559 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
560 
561 		if (sk->sk_state != IUCV_LISTEN) {
562 			err = -EBADFD;
563 			break;
564 		}
565 
566 		if (signal_pending(current)) {
567 			err = sock_intr_errno(timeo);
568 			break;
569 		}
570 	}
571 
572 	set_current_state(TASK_RUNNING);
573 	remove_wait_queue(sk->sk_sleep, &wait);
574 
575 	if (err)
576 		goto done;
577 
578 	newsock->state = SS_CONNECTED;
579 
580 done:
581 	release_sock(sk);
582 	return err;
583 }
584 
585 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
586 			     int *len, int peer)
587 {
588 	struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
589 	struct sock *sk = sock->sk;
590 
591 	addr->sa_family = AF_IUCV;
592 	*len = sizeof(struct sockaddr_iucv);
593 
594 	if (peer) {
595 		memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
596 		memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
597 	} else {
598 		memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
599 		memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
600 	}
601 	memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
602 	memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
603 	memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
604 
605 	return 0;
606 }
607 
608 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
609 			     struct msghdr *msg, size_t len)
610 {
611 	struct sock *sk = sock->sk;
612 	struct iucv_sock *iucv = iucv_sk(sk);
613 	struct sk_buff *skb;
614 	struct iucv_message txmsg;
615 	int err;
616 
617 	err = sock_error(sk);
618 	if (err)
619 		return err;
620 
621 	if (msg->msg_flags & MSG_OOB)
622 		return -EOPNOTSUPP;
623 
624 	lock_sock(sk);
625 
626 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
627 		err = -EPIPE;
628 		goto out;
629 	}
630 
631 	if (sk->sk_state == IUCV_CONNECTED) {
632 		if (!(skb = sock_alloc_send_skb(sk, len,
633 						msg->msg_flags & MSG_DONTWAIT,
634 						&err)))
635 			goto out;
636 
637 		if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
638 			err = -EFAULT;
639 			goto fail;
640 		}
641 
642 		txmsg.class = 0;
643 		txmsg.tag = iucv->send_tag++;
644 		memcpy(skb->cb, &txmsg.tag, 4);
645 		skb_queue_tail(&iucv->send_skb_q, skb);
646 		err = iucv_message_send(iucv->path, &txmsg, 0, 0,
647 					(void *) skb->data, skb->len);
648 		if (err) {
649 			if (err == 3)
650 				printk(KERN_ERR "AF_IUCV msg limit exceeded\n");
651 			skb_unlink(skb, &iucv->send_skb_q);
652 			err = -EPIPE;
653 			goto fail;
654 		}
655 
656 	} else {
657 		err = -ENOTCONN;
658 		goto out;
659 	}
660 
661 	release_sock(sk);
662 	return len;
663 
664 fail:
665 	kfree_skb(skb);
666 out:
667 	release_sock(sk);
668 	return err;
669 }
670 
671 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
672 {
673 	int dataleft, size, copied = 0;
674 	struct sk_buff *nskb;
675 
676 	dataleft = len;
677 	while (dataleft) {
678 		if (dataleft >= sk->sk_rcvbuf / 4)
679 			size = sk->sk_rcvbuf / 4;
680 		else
681 			size = dataleft;
682 
683 		nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
684 		if (!nskb)
685 			return -ENOMEM;
686 
687 		memcpy(nskb->data, skb->data + copied, size);
688 		copied += size;
689 		dataleft -= size;
690 
691 		skb_reset_transport_header(nskb);
692 		skb_reset_network_header(nskb);
693 		nskb->len = size;
694 
695 		skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
696 	}
697 
698 	return 0;
699 }
700 
701 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
702 				 struct iucv_path *path,
703 				 struct iucv_message *msg)
704 {
705 	int rc;
706 
707 	if (msg->flags & IPRMDATA) {
708 		skb->data = NULL;
709 		skb->len = 0;
710 	} else {
711 		rc = iucv_message_receive(path, msg, 0, skb->data,
712 					  msg->length, NULL);
713 		if (rc) {
714 			kfree_skb(skb);
715 			return;
716 		}
717 		if (skb->truesize >= sk->sk_rcvbuf / 4) {
718 			rc = iucv_fragment_skb(sk, skb, msg->length);
719 			kfree_skb(skb);
720 			skb = NULL;
721 			if (rc) {
722 				iucv_path_sever(path, NULL);
723 				return;
724 			}
725 			skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
726 		} else {
727 			skb_reset_transport_header(skb);
728 			skb_reset_network_header(skb);
729 			skb->len = msg->length;
730 		}
731 	}
732 
733 	if (sock_queue_rcv_skb(sk, skb))
734 		skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
735 }
736 
737 static void iucv_process_message_q(struct sock *sk)
738 {
739 	struct iucv_sock *iucv = iucv_sk(sk);
740 	struct sk_buff *skb;
741 	struct sock_msg_q *p, *n;
742 
743 	list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
744 		skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA);
745 		if (!skb)
746 			break;
747 		iucv_process_message(sk, skb, p->path, &p->msg);
748 		list_del(&p->list);
749 		kfree(p);
750 		if (!skb_queue_empty(&iucv->backlog_skb_q))
751 			break;
752 	}
753 }
754 
755 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
756 			     struct msghdr *msg, size_t len, int flags)
757 {
758 	int noblock = flags & MSG_DONTWAIT;
759 	struct sock *sk = sock->sk;
760 	struct iucv_sock *iucv = iucv_sk(sk);
761 	int target, copied = 0;
762 	struct sk_buff *skb, *rskb, *cskb;
763 	int err = 0;
764 
765 	if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
766 	    skb_queue_empty(&iucv->backlog_skb_q) &&
767 	    skb_queue_empty(&sk->sk_receive_queue) &&
768 	    list_empty(&iucv->message_q.list))
769 		return 0;
770 
771 	if (flags & (MSG_OOB))
772 		return -EOPNOTSUPP;
773 
774 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
775 
776 	skb = skb_recv_datagram(sk, flags, noblock, &err);
777 	if (!skb) {
778 		if (sk->sk_shutdown & RCV_SHUTDOWN)
779 			return 0;
780 		return err;
781 	}
782 
783 	copied = min_t(unsigned int, skb->len, len);
784 
785 	cskb = skb;
786 	if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
787 		skb_queue_head(&sk->sk_receive_queue, skb);
788 		if (copied == 0)
789 			return -EFAULT;
790 		goto done;
791 	}
792 
793 	len -= copied;
794 
795 	/* Mark read part of skb as used */
796 	if (!(flags & MSG_PEEK)) {
797 		skb_pull(skb, copied);
798 
799 		if (skb->len) {
800 			skb_queue_head(&sk->sk_receive_queue, skb);
801 			goto done;
802 		}
803 
804 		kfree_skb(skb);
805 
806 		/* Queue backlog skbs */
807 		rskb = skb_dequeue(&iucv->backlog_skb_q);
808 		while (rskb) {
809 			if (sock_queue_rcv_skb(sk, rskb)) {
810 				skb_queue_head(&iucv->backlog_skb_q,
811 						rskb);
812 				break;
813 			} else {
814 				rskb = skb_dequeue(&iucv->backlog_skb_q);
815 			}
816 		}
817 		if (skb_queue_empty(&iucv->backlog_skb_q)) {
818 			spin_lock_bh(&iucv->message_q.lock);
819 			if (!list_empty(&iucv->message_q.list))
820 				iucv_process_message_q(sk);
821 			spin_unlock_bh(&iucv->message_q.lock);
822 		}
823 
824 	} else
825 		skb_queue_head(&sk->sk_receive_queue, skb);
826 
827 done:
828 	return err ? : copied;
829 }
830 
831 static inline unsigned int iucv_accept_poll(struct sock *parent)
832 {
833 	struct iucv_sock *isk, *n;
834 	struct sock *sk;
835 
836 	list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
837 		sk = (struct sock *) isk;
838 
839 		if (sk->sk_state == IUCV_CONNECTED)
840 			return POLLIN | POLLRDNORM;
841 	}
842 
843 	return 0;
844 }
845 
846 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
847 			    poll_table *wait)
848 {
849 	struct sock *sk = sock->sk;
850 	unsigned int mask = 0;
851 
852 	poll_wait(file, sk->sk_sleep, wait);
853 
854 	if (sk->sk_state == IUCV_LISTEN)
855 		return iucv_accept_poll(sk);
856 
857 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
858 		mask |= POLLERR;
859 
860 	if (sk->sk_shutdown & RCV_SHUTDOWN)
861 		mask |= POLLRDHUP;
862 
863 	if (sk->sk_shutdown == SHUTDOWN_MASK)
864 		mask |= POLLHUP;
865 
866 	if (!skb_queue_empty(&sk->sk_receive_queue) ||
867 	    (sk->sk_shutdown & RCV_SHUTDOWN))
868 		mask |= POLLIN | POLLRDNORM;
869 
870 	if (sk->sk_state == IUCV_CLOSED)
871 		mask |= POLLHUP;
872 
873 	if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
874 		mask |= POLLIN;
875 
876 	if (sock_writeable(sk))
877 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
878 	else
879 		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
880 
881 	return mask;
882 }
883 
884 static int iucv_sock_shutdown(struct socket *sock, int how)
885 {
886 	struct sock *sk = sock->sk;
887 	struct iucv_sock *iucv = iucv_sk(sk);
888 	struct iucv_message txmsg;
889 	int err = 0;
890 	u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
891 
892 	how++;
893 
894 	if ((how & ~SHUTDOWN_MASK) || !how)
895 		return -EINVAL;
896 
897 	lock_sock(sk);
898 	switch (sk->sk_state) {
899 	case IUCV_CLOSED:
900 		err = -ENOTCONN;
901 		goto fail;
902 
903 	default:
904 		sk->sk_shutdown |= how;
905 		break;
906 	}
907 
908 	if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
909 		txmsg.class = 0;
910 		txmsg.tag = 0;
911 		err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
912 					(void *) prmmsg, 8);
913 		if (err) {
914 			switch (err) {
915 			case 1:
916 				err = -ENOTCONN;
917 				break;
918 			case 2:
919 				err = -ECONNRESET;
920 				break;
921 			default:
922 				err = -ENOTCONN;
923 				break;
924 			}
925 		}
926 	}
927 
928 	if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
929 		err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
930 		if (err)
931 			err = -ENOTCONN;
932 
933 		skb_queue_purge(&sk->sk_receive_queue);
934 	}
935 
936 	/* Wake up anyone sleeping in poll */
937 	sk->sk_state_change(sk);
938 
939 fail:
940 	release_sock(sk);
941 	return err;
942 }
943 
944 static int iucv_sock_release(struct socket *sock)
945 {
946 	struct sock *sk = sock->sk;
947 	int err = 0;
948 
949 	if (!sk)
950 		return 0;
951 
952 	iucv_sock_close(sk);
953 
954 	/* Unregister with IUCV base support */
955 	if (iucv_sk(sk)->path) {
956 		iucv_path_sever(iucv_sk(sk)->path, NULL);
957 		iucv_path_free(iucv_sk(sk)->path);
958 		iucv_sk(sk)->path = NULL;
959 	}
960 
961 	sock_orphan(sk);
962 	iucv_sock_kill(sk);
963 	return err;
964 }
965 
966 /* Callback wrappers - called from iucv base support */
967 static int iucv_callback_connreq(struct iucv_path *path,
968 				 u8 ipvmid[8], u8 ipuser[16])
969 {
970 	unsigned char user_data[16];
971 	unsigned char nuser_data[16];
972 	unsigned char src_name[8];
973 	struct hlist_node *node;
974 	struct sock *sk, *nsk;
975 	struct iucv_sock *iucv, *niucv;
976 	int err;
977 
978 	memcpy(src_name, ipuser, 8);
979 	EBCASC(src_name, 8);
980 	/* Find out if this path belongs to af_iucv. */
981 	read_lock(&iucv_sk_list.lock);
982 	iucv = NULL;
983 	sk = NULL;
984 	sk_for_each(sk, node, &iucv_sk_list.head)
985 		if (sk->sk_state == IUCV_LISTEN &&
986 		    !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
987 			/*
988 			 * Found a listening socket with
989 			 * src_name == ipuser[0-7].
990 			 */
991 			iucv = iucv_sk(sk);
992 			break;
993 		}
994 	read_unlock(&iucv_sk_list.lock);
995 	if (!iucv)
996 		/* No socket found, not one of our paths. */
997 		return -EINVAL;
998 
999 	bh_lock_sock(sk);
1000 
1001 	/* Check if parent socket is listening */
1002 	low_nmcpy(user_data, iucv->src_name);
1003 	high_nmcpy(user_data, iucv->dst_name);
1004 	ASCEBC(user_data, sizeof(user_data));
1005 	if (sk->sk_state != IUCV_LISTEN) {
1006 		err = iucv_path_sever(path, user_data);
1007 		goto fail;
1008 	}
1009 
1010 	/* Check for backlog size */
1011 	if (sk_acceptq_is_full(sk)) {
1012 		err = iucv_path_sever(path, user_data);
1013 		goto fail;
1014 	}
1015 
1016 	/* Create the new socket */
1017 	nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
1018 	if (!nsk) {
1019 		err = iucv_path_sever(path, user_data);
1020 		goto fail;
1021 	}
1022 
1023 	niucv = iucv_sk(nsk);
1024 	iucv_sock_init(nsk, sk);
1025 
1026 	/* Set the new iucv_sock */
1027 	memcpy(niucv->dst_name, ipuser + 8, 8);
1028 	EBCASC(niucv->dst_name, 8);
1029 	memcpy(niucv->dst_user_id, ipvmid, 8);
1030 	memcpy(niucv->src_name, iucv->src_name, 8);
1031 	memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1032 	niucv->path = path;
1033 
1034 	/* Call iucv_accept */
1035 	high_nmcpy(nuser_data, ipuser + 8);
1036 	memcpy(nuser_data + 8, niucv->src_name, 8);
1037 	ASCEBC(nuser_data + 8, 8);
1038 
1039 	path->msglim = IUCV_QUEUELEN_DEFAULT;
1040 	err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1041 	if (err) {
1042 		err = iucv_path_sever(path, user_data);
1043 		goto fail;
1044 	}
1045 
1046 	iucv_accept_enqueue(sk, nsk);
1047 
1048 	/* Wake up accept */
1049 	nsk->sk_state = IUCV_CONNECTED;
1050 	sk->sk_data_ready(sk, 1);
1051 	err = 0;
1052 fail:
1053 	bh_unlock_sock(sk);
1054 	return 0;
1055 }
1056 
1057 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1058 {
1059 	struct sock *sk = path->private;
1060 
1061 	sk->sk_state = IUCV_CONNECTED;
1062 	sk->sk_state_change(sk);
1063 }
1064 
1065 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1066 {
1067 	struct sock *sk = path->private;
1068 	struct iucv_sock *iucv = iucv_sk(sk);
1069 	struct sk_buff *skb;
1070 	struct sock_msg_q *save_msg;
1071 	int len;
1072 
1073 	if (sk->sk_shutdown & RCV_SHUTDOWN)
1074 		return;
1075 
1076 	if (!list_empty(&iucv->message_q.list) ||
1077 	    !skb_queue_empty(&iucv->backlog_skb_q))
1078 		goto save_message;
1079 
1080 	len = atomic_read(&sk->sk_rmem_alloc);
1081 	len += msg->length + sizeof(struct sk_buff);
1082 	if (len > sk->sk_rcvbuf)
1083 		goto save_message;
1084 
1085 	skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
1086 	if (!skb)
1087 		goto save_message;
1088 
1089 	spin_lock(&iucv->message_q.lock);
1090 	iucv_process_message(sk, skb, path, msg);
1091 	spin_unlock(&iucv->message_q.lock);
1092 
1093 	return;
1094 
1095 save_message:
1096 	save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1097 	save_msg->path = path;
1098 	save_msg->msg = *msg;
1099 
1100 	spin_lock(&iucv->message_q.lock);
1101 	list_add_tail(&save_msg->list, &iucv->message_q.list);
1102 	spin_unlock(&iucv->message_q.lock);
1103 }
1104 
1105 static void iucv_callback_txdone(struct iucv_path *path,
1106 				 struct iucv_message *msg)
1107 {
1108 	struct sock *sk = path->private;
1109 	struct sk_buff *this;
1110 	struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1111 	struct sk_buff *list_skb = list->next;
1112 	unsigned long flags;
1113 
1114 	if (list_skb) {
1115 		spin_lock_irqsave(&list->lock, flags);
1116 
1117 		do {
1118 			this = list_skb;
1119 			list_skb = list_skb->next;
1120 		} while (memcmp(&msg->tag, this->cb, 4) && list_skb);
1121 
1122 		spin_unlock_irqrestore(&list->lock, flags);
1123 
1124 		skb_unlink(this, &iucv_sk(sk)->send_skb_q);
1125 		kfree_skb(this);
1126 	}
1127 
1128 	if (sk->sk_state == IUCV_CLOSING) {
1129 		if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1130 			sk->sk_state = IUCV_CLOSED;
1131 			sk->sk_state_change(sk);
1132 		}
1133 	}
1134 
1135 }
1136 
1137 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1138 {
1139 	struct sock *sk = path->private;
1140 
1141 	if (!list_empty(&iucv_sk(sk)->accept_q))
1142 		sk->sk_state = IUCV_SEVERED;
1143 	else
1144 		sk->sk_state = IUCV_DISCONN;
1145 
1146 	sk->sk_state_change(sk);
1147 }
1148 
1149 static struct proto_ops iucv_sock_ops = {
1150 	.family		= PF_IUCV,
1151 	.owner		= THIS_MODULE,
1152 	.release	= iucv_sock_release,
1153 	.bind		= iucv_sock_bind,
1154 	.connect	= iucv_sock_connect,
1155 	.listen		= iucv_sock_listen,
1156 	.accept		= iucv_sock_accept,
1157 	.getname	= iucv_sock_getname,
1158 	.sendmsg	= iucv_sock_sendmsg,
1159 	.recvmsg	= iucv_sock_recvmsg,
1160 	.poll		= iucv_sock_poll,
1161 	.ioctl		= sock_no_ioctl,
1162 	.mmap		= sock_no_mmap,
1163 	.socketpair	= sock_no_socketpair,
1164 	.shutdown	= iucv_sock_shutdown,
1165 	.setsockopt	= sock_no_setsockopt,
1166 	.getsockopt	= sock_no_getsockopt
1167 };
1168 
1169 static struct net_proto_family iucv_sock_family_ops = {
1170 	.family	= AF_IUCV,
1171 	.owner	= THIS_MODULE,
1172 	.create	= iucv_sock_create,
1173 };
1174 
1175 static int __init afiucv_init(void)
1176 {
1177 	int err;
1178 
1179 	if (!MACHINE_IS_VM) {
1180 		printk(KERN_ERR "AF_IUCV connection needs VM as base\n");
1181 		err = -EPROTONOSUPPORT;
1182 		goto out;
1183 	}
1184 	cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1185 	if (unlikely(err)) {
1186 		printk(KERN_ERR "AF_IUCV needs the VM userid\n");
1187 		err = -EPROTONOSUPPORT;
1188 		goto out;
1189 	}
1190 
1191 	err = iucv_register(&af_iucv_handler, 0);
1192 	if (err)
1193 		goto out;
1194 	err = proto_register(&iucv_proto, 0);
1195 	if (err)
1196 		goto out_iucv;
1197 	err = sock_register(&iucv_sock_family_ops);
1198 	if (err)
1199 		goto out_proto;
1200 	printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n");
1201 	return 0;
1202 
1203 out_proto:
1204 	proto_unregister(&iucv_proto);
1205 out_iucv:
1206 	iucv_unregister(&af_iucv_handler, 0);
1207 out:
1208 	return err;
1209 }
1210 
1211 static void __exit afiucv_exit(void)
1212 {
1213 	sock_unregister(PF_IUCV);
1214 	proto_unregister(&iucv_proto);
1215 	iucv_unregister(&af_iucv_handler, 0);
1216 
1217 	printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n");
1218 }
1219 
1220 module_init(afiucv_init);
1221 module_exit(afiucv_exit);
1222 
1223 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1224 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1225 MODULE_VERSION(VERSION);
1226 MODULE_LICENSE("GPL");
1227 MODULE_ALIAS_NETPROTO(PF_IUCV);
1228