1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * IUCV protocol stack for Linux on zSeries 4 * 5 * Copyright IBM Corp. 2006, 2009 6 * 7 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> 8 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 9 * PM functions: 10 * Ursula Braun <ursula.braun@de.ibm.com> 11 */ 12 13 #define KMSG_COMPONENT "af_iucv" 14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15 16 #include <linux/filter.h> 17 #include <linux/module.h> 18 #include <linux/netdevice.h> 19 #include <linux/types.h> 20 #include <linux/limits.h> 21 #include <linux/list.h> 22 #include <linux/errno.h> 23 #include <linux/kernel.h> 24 #include <linux/sched/signal.h> 25 #include <linux/slab.h> 26 #include <linux/skbuff.h> 27 #include <linux/init.h> 28 #include <linux/poll.h> 29 #include <linux/security.h> 30 #include <net/sock.h> 31 #include <asm/machine.h> 32 #include <asm/ebcdic.h> 33 #include <asm/cpcmd.h> 34 #include <linux/kmod.h> 35 36 #include <net/iucv/af_iucv.h> 37 38 #define VERSION "1.2" 39 40 static char iucv_userid[80]; 41 42 static struct proto iucv_proto = { 43 .name = "AF_IUCV", 44 .owner = THIS_MODULE, 45 .obj_size = sizeof(struct iucv_sock), 46 }; 47 48 static struct iucv_interface *pr_iucv; 49 static struct iucv_handler af_iucv_handler; 50 51 /* special AF_IUCV IPRM messages */ 52 static const u8 iprm_shutdown[8] = 53 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; 54 55 #define TRGCLS_SIZE sizeof_field(struct iucv_message, class) 56 57 #define __iucv_sock_wait(sk, condition, timeo, ret) \ 58 do { \ 59 DEFINE_WAIT(__wait); \ 60 long __timeo = timeo; \ 61 ret = 0; \ 62 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \ 63 while (!(condition)) { \ 64 if (!__timeo) { \ 65 ret = -EAGAIN; \ 66 break; \ 67 } \ 68 if (signal_pending(current)) { \ 69 ret = sock_intr_errno(__timeo); \ 70 break; \ 71 } \ 72 release_sock(sk); \ 73 __timeo = schedule_timeout(__timeo); \ 74 lock_sock(sk); \ 75 ret = sock_error(sk); \ 76 if (ret) \ 77 break; \ 78 } \ 79 finish_wait(sk_sleep(sk), &__wait); \ 80 } while (0) 81 82 #define iucv_sock_wait(sk, condition, timeo) \ 83 ({ \ 84 int __ret = 0; \ 85 if (!(condition)) \ 86 __iucv_sock_wait(sk, condition, timeo, __ret); \ 87 __ret; \ 88 }) 89 90 static struct sock *iucv_accept_dequeue(struct sock *parent, 91 struct socket *newsock); 92 static void iucv_sock_kill(struct sock *sk); 93 static void iucv_sock_close(struct sock *sk); 94 95 static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify); 96 97 static struct iucv_sock_list iucv_sk_list = { 98 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock), 99 .autobind_name = ATOMIC_INIT(0) 100 }; 101 102 static inline void high_nmcpy(unsigned char *dst, char *src) 103 { 104 memcpy(dst, src, 8); 105 } 106 107 static inline void low_nmcpy(unsigned char *dst, char *src) 108 { 109 memcpy(&dst[8], src, 8); 110 } 111 112 /** 113 * iucv_msg_length() - Returns the length of an iucv message. 114 * @msg: Pointer to struct iucv_message, MUST NOT be NULL 115 * 116 * The function returns the length of the specified iucv message @msg of data 117 * stored in a buffer and of data stored in the parameter list (PRMDATA). 118 * 119 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket 120 * data: 121 * PRMDATA[0..6] socket data (max 7 bytes); 122 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7]) 123 * 124 * The socket data length is computed by subtracting the socket data length 125 * value from 0xFF. 126 * If the socket data len is greater 7, then PRMDATA can be used for special 127 * notifications (see iucv_sock_shutdown); and further, 128 * if the socket data len is > 7, the function returns 8. 129 * 130 * Use this function to allocate socket buffers to store iucv message data. 131 */ 132 static inline size_t iucv_msg_length(struct iucv_message *msg) 133 { 134 size_t datalen; 135 136 if (msg->flags & IUCV_IPRMDATA) { 137 datalen = 0xff - msg->rmmsg[7]; 138 return (datalen < 8) ? datalen : 8; 139 } 140 return msg->length; 141 } 142 143 /** 144 * iucv_sock_in_state() - check for specific states 145 * @sk: sock structure 146 * @state: first iucv sk state 147 * @state2: second iucv sk state 148 * 149 * Returns true if the socket in either in the first or second state. 150 */ 151 static int iucv_sock_in_state(struct sock *sk, int state, int state2) 152 { 153 return (sk->sk_state == state || sk->sk_state == state2); 154 } 155 156 /** 157 * iucv_below_msglim() - function to check if messages can be sent 158 * @sk: sock structure 159 * 160 * Returns true if the send queue length is lower than the message limit. 161 * Always returns true if the socket is not connected (no iucv path for 162 * checking the message limit). 163 */ 164 static inline int iucv_below_msglim(struct sock *sk) 165 { 166 struct iucv_sock *iucv = iucv_sk(sk); 167 168 if (sk->sk_state != IUCV_CONNECTED) 169 return 1; 170 if (iucv->transport == AF_IUCV_TRANS_IUCV) 171 return (atomic_read(&iucv->skbs_in_xmit) < iucv->path->msglim); 172 else 173 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && 174 (atomic_read(&iucv->pendings) <= 0)); 175 } 176 177 /* 178 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit 179 */ 180 static void iucv_sock_wake_msglim(struct sock *sk) 181 { 182 struct socket_wq *wq; 183 184 rcu_read_lock(); 185 wq = rcu_dereference(sk->sk_wq); 186 if (skwq_has_sleeper(wq)) 187 wake_up_interruptible_all(&wq->wait); 188 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); 189 rcu_read_unlock(); 190 } 191 192 /* 193 * afiucv_hs_send() - send a message through HiperSockets transport 194 */ 195 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, 196 struct sk_buff *skb, u8 flags) 197 { 198 struct iucv_sock *iucv = iucv_sk(sock); 199 struct af_iucv_trans_hdr *phs_hdr; 200 int err, confirm_recv = 0; 201 202 phs_hdr = skb_push(skb, sizeof(*phs_hdr)); 203 memset(phs_hdr, 0, sizeof(*phs_hdr)); 204 skb_reset_network_header(skb); 205 206 phs_hdr->magic = ETH_P_AF_IUCV; 207 phs_hdr->version = 1; 208 phs_hdr->flags = flags; 209 if (flags == AF_IUCV_FLAG_SYN) 210 phs_hdr->window = iucv->msglimit; 211 else if ((flags == AF_IUCV_FLAG_WIN) || !flags) { 212 confirm_recv = atomic_read(&iucv->msg_recv); 213 phs_hdr->window = confirm_recv; 214 if (confirm_recv) 215 phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN; 216 } 217 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8); 218 memcpy(phs_hdr->destAppName, iucv->dst_name, 8); 219 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8); 220 memcpy(phs_hdr->srcAppName, iucv->src_name, 8); 221 ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID)); 222 ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName)); 223 ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID)); 224 ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName)); 225 if (imsg) 226 memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); 227 228 skb->dev = iucv->hs_dev; 229 if (!skb->dev) { 230 err = -ENODEV; 231 goto err_free; 232 } 233 234 dev_hard_header(skb, skb->dev, ETH_P_AF_IUCV, NULL, NULL, skb->len); 235 236 if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) { 237 err = -ENETDOWN; 238 goto err_free; 239 } 240 if (skb->len > skb->dev->mtu) { 241 if (sock->sk_type == SOCK_SEQPACKET) { 242 err = -EMSGSIZE; 243 goto err_free; 244 } 245 err = pskb_trim(skb, skb->dev->mtu); 246 if (err) 247 goto err_free; 248 } 249 skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); 250 251 atomic_inc(&iucv->skbs_in_xmit); 252 err = dev_queue_xmit(skb); 253 if (net_xmit_eval(err)) { 254 atomic_dec(&iucv->skbs_in_xmit); 255 } else { 256 atomic_sub(confirm_recv, &iucv->msg_recv); 257 WARN_ON(atomic_read(&iucv->msg_recv) < 0); 258 } 259 return net_xmit_eval(err); 260 261 err_free: 262 kfree_skb(skb); 263 return err; 264 } 265 266 static struct sock *__iucv_get_sock_by_name(char *nm) 267 { 268 struct sock *sk; 269 270 sk_for_each(sk, &iucv_sk_list.head) 271 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) 272 return sk; 273 274 return NULL; 275 } 276 277 static void iucv_sock_destruct(struct sock *sk) 278 { 279 skb_queue_purge(&sk->sk_receive_queue); 280 skb_queue_purge(&sk->sk_error_queue); 281 282 if (!sock_flag(sk, SOCK_DEAD)) { 283 pr_err("Attempt to release alive iucv socket %p\n", sk); 284 return; 285 } 286 287 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 288 WARN_ON(refcount_read(&sk->sk_wmem_alloc)); 289 WARN_ON(sk->sk_wmem_queued); 290 WARN_ON(sk->sk_forward_alloc); 291 } 292 293 /* Cleanup Listen */ 294 static void iucv_sock_cleanup_listen(struct sock *parent) 295 { 296 struct sock *sk; 297 298 /* Close non-accepted connections */ 299 while ((sk = iucv_accept_dequeue(parent, NULL))) { 300 iucv_sock_close(sk); 301 iucv_sock_kill(sk); 302 } 303 304 parent->sk_state = IUCV_CLOSED; 305 } 306 307 static void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) 308 { 309 write_lock_bh(&l->lock); 310 sk_add_node(sk, &l->head); 311 write_unlock_bh(&l->lock); 312 } 313 314 static void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) 315 { 316 write_lock_bh(&l->lock); 317 sk_del_node_init(sk); 318 write_unlock_bh(&l->lock); 319 } 320 321 /* Kill socket (only if zapped and orphaned) */ 322 static void iucv_sock_kill(struct sock *sk) 323 { 324 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) 325 return; 326 327 iucv_sock_unlink(&iucv_sk_list, sk); 328 sock_set_flag(sk, SOCK_DEAD); 329 sock_put(sk); 330 } 331 332 /* Terminate an IUCV path */ 333 static void iucv_sever_path(struct sock *sk, int with_user_data) 334 { 335 unsigned char user_data[16]; 336 struct iucv_sock *iucv = iucv_sk(sk); 337 struct iucv_path *path = iucv->path; 338 339 /* Whoever resets the path pointer, must sever and free it. */ 340 if (xchg(&iucv->path, NULL)) { 341 if (with_user_data) { 342 low_nmcpy(user_data, iucv->src_name); 343 high_nmcpy(user_data, iucv->dst_name); 344 ASCEBC(user_data, sizeof(user_data)); 345 pr_iucv->path_sever(path, user_data); 346 } else 347 pr_iucv->path_sever(path, NULL); 348 iucv_path_free(path); 349 } 350 } 351 352 /* Send controlling flags through an IUCV socket for HIPER transport */ 353 static int iucv_send_ctrl(struct sock *sk, u8 flags) 354 { 355 struct iucv_sock *iucv = iucv_sk(sk); 356 int err = 0; 357 int blen; 358 struct sk_buff *skb; 359 u8 shutdown = 0; 360 361 blen = sizeof(struct af_iucv_trans_hdr) + 362 LL_RESERVED_SPACE(iucv->hs_dev); 363 if (sk->sk_shutdown & SEND_SHUTDOWN) { 364 /* controlling flags should be sent anyway */ 365 shutdown = sk->sk_shutdown; 366 sk->sk_shutdown &= RCV_SHUTDOWN; 367 } 368 skb = sock_alloc_send_skb(sk, blen, 1, &err); 369 if (skb) { 370 skb_reserve(skb, blen); 371 err = afiucv_hs_send(NULL, sk, skb, flags); 372 } 373 if (shutdown) 374 sk->sk_shutdown = shutdown; 375 return err; 376 } 377 378 /* Close an IUCV socket */ 379 static void iucv_sock_close(struct sock *sk) 380 { 381 struct iucv_sock *iucv = iucv_sk(sk); 382 unsigned long timeo; 383 int err = 0; 384 385 lock_sock(sk); 386 387 switch (sk->sk_state) { 388 case IUCV_LISTEN: 389 iucv_sock_cleanup_listen(sk); 390 break; 391 392 case IUCV_CONNECTED: 393 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 394 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); 395 sk->sk_state = IUCV_DISCONN; 396 sk->sk_state_change(sk); 397 } 398 fallthrough; 399 400 case IUCV_DISCONN: 401 sk->sk_state = IUCV_CLOSING; 402 sk->sk_state_change(sk); 403 404 if (!err && atomic_read(&iucv->skbs_in_xmit) > 0) { 405 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 406 timeo = sk->sk_lingertime; 407 else 408 timeo = IUCV_DISCONN_TIMEOUT; 409 iucv_sock_wait(sk, 410 iucv_sock_in_state(sk, IUCV_CLOSED, 0), 411 timeo); 412 } 413 fallthrough; 414 415 case IUCV_CLOSING: 416 sk->sk_state = IUCV_CLOSED; 417 sk->sk_state_change(sk); 418 419 sk->sk_err = ECONNRESET; 420 sk->sk_state_change(sk); 421 422 skb_queue_purge(&iucv->send_skb_q); 423 skb_queue_purge(&iucv->backlog_skb_q); 424 fallthrough; 425 426 default: 427 iucv_sever_path(sk, 1); 428 } 429 430 if (iucv->hs_dev) { 431 dev_put(iucv->hs_dev); 432 iucv->hs_dev = NULL; 433 sk->sk_bound_dev_if = 0; 434 } 435 436 /* mark socket for deletion by iucv_sock_kill() */ 437 sock_set_flag(sk, SOCK_ZAPPED); 438 439 release_sock(sk); 440 } 441 442 static void iucv_sock_init(struct sock *sk, struct sock *parent) 443 { 444 if (parent) { 445 sk->sk_type = parent->sk_type; 446 security_sk_clone(parent, sk); 447 } 448 } 449 450 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern) 451 { 452 struct sock *sk; 453 struct iucv_sock *iucv; 454 455 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern); 456 if (!sk) 457 return NULL; 458 iucv = iucv_sk(sk); 459 460 sock_init_data(sock, sk); 461 INIT_LIST_HEAD(&iucv->accept_q); 462 spin_lock_init(&iucv->accept_q_lock); 463 skb_queue_head_init(&iucv->send_skb_q); 464 INIT_LIST_HEAD(&iucv->message_q.list); 465 spin_lock_init(&iucv->message_q.lock); 466 skb_queue_head_init(&iucv->backlog_skb_q); 467 iucv->send_tag = 0; 468 atomic_set(&iucv->pendings, 0); 469 iucv->flags = 0; 470 iucv->msglimit = 0; 471 atomic_set(&iucv->skbs_in_xmit, 0); 472 atomic_set(&iucv->msg_sent, 0); 473 atomic_set(&iucv->msg_recv, 0); 474 iucv->path = NULL; 475 iucv->sk_txnotify = afiucv_hs_callback_txnotify; 476 memset(&iucv->init, 0, sizeof(iucv->init)); 477 if (pr_iucv) 478 iucv->transport = AF_IUCV_TRANS_IUCV; 479 else 480 iucv->transport = AF_IUCV_TRANS_HIPER; 481 482 sk->sk_destruct = iucv_sock_destruct; 483 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 484 485 sock_reset_flag(sk, SOCK_ZAPPED); 486 487 sk->sk_protocol = proto; 488 sk->sk_state = IUCV_OPEN; 489 490 iucv_sock_link(&iucv_sk_list, sk); 491 return sk; 492 } 493 494 static void iucv_accept_enqueue(struct sock *parent, struct sock *sk) 495 { 496 unsigned long flags; 497 struct iucv_sock *par = iucv_sk(parent); 498 499 sock_hold(sk); 500 spin_lock_irqsave(&par->accept_q_lock, flags); 501 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); 502 spin_unlock_irqrestore(&par->accept_q_lock, flags); 503 iucv_sk(sk)->parent = parent; 504 sk_acceptq_added(parent); 505 } 506 507 static void iucv_accept_unlink(struct sock *sk) 508 { 509 unsigned long flags; 510 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); 511 512 spin_lock_irqsave(&par->accept_q_lock, flags); 513 list_del_init(&iucv_sk(sk)->accept_q); 514 spin_unlock_irqrestore(&par->accept_q_lock, flags); 515 sk_acceptq_removed(iucv_sk(sk)->parent); 516 iucv_sk(sk)->parent = NULL; 517 sock_put(sk); 518 } 519 520 static struct sock *iucv_accept_dequeue(struct sock *parent, 521 struct socket *newsock) 522 { 523 struct iucv_sock *isk, *n; 524 struct sock *sk; 525 526 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { 527 sk = (struct sock *) isk; 528 lock_sock(sk); 529 530 if (sk->sk_state == IUCV_CLOSED) { 531 iucv_accept_unlink(sk); 532 release_sock(sk); 533 continue; 534 } 535 536 if (sk->sk_state == IUCV_CONNECTED || 537 sk->sk_state == IUCV_DISCONN || 538 !newsock) { 539 iucv_accept_unlink(sk); 540 if (newsock) 541 sock_graft(sk, newsock); 542 543 release_sock(sk); 544 return sk; 545 } 546 547 release_sock(sk); 548 } 549 return NULL; 550 } 551 552 static void __iucv_auto_name(struct iucv_sock *iucv) 553 { 554 char name[12]; 555 556 scnprintf(name, sizeof(name), 557 "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); 558 while (__iucv_get_sock_by_name(name)) { 559 scnprintf(name, sizeof(name), "%08x", 560 atomic_inc_return(&iucv_sk_list.autobind_name)); 561 } 562 memcpy(iucv->src_name, name, 8); 563 } 564 565 /* Bind an unbound socket */ 566 static int iucv_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, 567 int addr_len) 568 { 569 DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr); 570 char uid[sizeof(sa->siucv_user_id)]; 571 struct sock *sk = sock->sk; 572 struct iucv_sock *iucv; 573 int err = 0; 574 struct net_device *dev; 575 576 /* Verify the input sockaddr */ 577 if (addr_len < sizeof(struct sockaddr_iucv) || 578 addr->sa_family != AF_IUCV) 579 return -EINVAL; 580 581 lock_sock(sk); 582 if (sk->sk_state != IUCV_OPEN) { 583 err = -EBADFD; 584 goto done; 585 } 586 587 write_lock_bh(&iucv_sk_list.lock); 588 589 iucv = iucv_sk(sk); 590 if (__iucv_get_sock_by_name(sa->siucv_name)) { 591 err = -EADDRINUSE; 592 goto done_unlock; 593 } 594 if (iucv->path) 595 goto done_unlock; 596 597 /* Bind the socket */ 598 if (pr_iucv) 599 if (!memcmp(sa->siucv_user_id, iucv_userid, 8)) 600 goto vm_bind; /* VM IUCV transport */ 601 602 /* try hiper transport */ 603 memcpy(uid, sa->siucv_user_id, sizeof(uid)); 604 ASCEBC(uid, 8); 605 rcu_read_lock(); 606 for_each_netdev_rcu(&init_net, dev) { 607 if (!memcmp(dev->perm_addr, uid, 8)) { 608 memcpy(iucv->src_user_id, sa->siucv_user_id, 8); 609 /* Check for uninitialized siucv_name */ 610 if (strncmp(sa->siucv_name, " ", 8) == 0) 611 __iucv_auto_name(iucv); 612 else 613 memcpy(iucv->src_name, sa->siucv_name, 8); 614 sk->sk_bound_dev_if = dev->ifindex; 615 iucv->hs_dev = dev; 616 dev_hold(dev); 617 sk->sk_state = IUCV_BOUND; 618 iucv->transport = AF_IUCV_TRANS_HIPER; 619 if (!iucv->msglimit) 620 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT; 621 rcu_read_unlock(); 622 goto done_unlock; 623 } 624 } 625 rcu_read_unlock(); 626 vm_bind: 627 if (pr_iucv) { 628 /* use local userid for backward compat */ 629 memcpy(iucv->src_name, sa->siucv_name, 8); 630 memcpy(iucv->src_user_id, iucv_userid, 8); 631 sk->sk_state = IUCV_BOUND; 632 iucv->transport = AF_IUCV_TRANS_IUCV; 633 sk->sk_allocation |= GFP_DMA; 634 if (!iucv->msglimit) 635 iucv->msglimit = IUCV_QUEUELEN_DEFAULT; 636 goto done_unlock; 637 } 638 /* found no dev to bind */ 639 err = -ENODEV; 640 done_unlock: 641 /* Release the socket list lock */ 642 write_unlock_bh(&iucv_sk_list.lock); 643 done: 644 release_sock(sk); 645 return err; 646 } 647 648 /* Automatically bind an unbound socket */ 649 static int iucv_sock_autobind(struct sock *sk) 650 { 651 struct iucv_sock *iucv = iucv_sk(sk); 652 int err = 0; 653 654 if (unlikely(!pr_iucv)) 655 return -EPROTO; 656 657 memcpy(iucv->src_user_id, iucv_userid, 8); 658 iucv->transport = AF_IUCV_TRANS_IUCV; 659 sk->sk_allocation |= GFP_DMA; 660 661 write_lock_bh(&iucv_sk_list.lock); 662 __iucv_auto_name(iucv); 663 write_unlock_bh(&iucv_sk_list.lock); 664 665 if (!iucv->msglimit) 666 iucv->msglimit = IUCV_QUEUELEN_DEFAULT; 667 668 return err; 669 } 670 671 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr) 672 { 673 DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr); 674 struct sock *sk = sock->sk; 675 struct iucv_sock *iucv = iucv_sk(sk); 676 unsigned char user_data[16]; 677 int err; 678 679 high_nmcpy(user_data, sa->siucv_name); 680 low_nmcpy(user_data, iucv->src_name); 681 ASCEBC(user_data, sizeof(user_data)); 682 683 /* Create path. */ 684 iucv->path = iucv_path_alloc(iucv->msglimit, 685 IUCV_IPRMDATA, GFP_KERNEL); 686 if (!iucv->path) { 687 err = -ENOMEM; 688 goto done; 689 } 690 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler, 691 sa->siucv_user_id, NULL, user_data, 692 sk); 693 if (err) { 694 iucv_path_free(iucv->path); 695 iucv->path = NULL; 696 switch (err) { 697 case 0x0b: /* Target communicator is not logged on */ 698 err = -ENETUNREACH; 699 break; 700 case 0x0d: /* Max connections for this guest exceeded */ 701 case 0x0e: /* Max connections for target guest exceeded */ 702 err = -EAGAIN; 703 break; 704 case 0x0f: /* Missing IUCV authorization */ 705 err = -EACCES; 706 break; 707 default: 708 err = -ECONNREFUSED; 709 break; 710 } 711 } 712 done: 713 return err; 714 } 715 716 /* Connect an unconnected socket */ 717 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, 718 int alen, int flags) 719 { 720 DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr); 721 struct sock *sk = sock->sk; 722 struct iucv_sock *iucv = iucv_sk(sk); 723 int err; 724 725 if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV) 726 return -EINVAL; 727 728 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) 729 return -EBADFD; 730 731 if (sk->sk_state == IUCV_OPEN && 732 iucv->transport == AF_IUCV_TRANS_HIPER) 733 return -EBADFD; /* explicit bind required */ 734 735 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) 736 return -EINVAL; 737 738 if (sk->sk_state == IUCV_OPEN) { 739 err = iucv_sock_autobind(sk); 740 if (unlikely(err)) 741 return err; 742 } 743 744 lock_sock(sk); 745 746 /* Set the destination information */ 747 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8); 748 memcpy(iucv->dst_name, sa->siucv_name, 8); 749 750 if (iucv->transport == AF_IUCV_TRANS_HIPER) 751 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN); 752 else 753 err = afiucv_path_connect(sock, addr); 754 if (err) 755 goto done; 756 757 if (sk->sk_state != IUCV_CONNECTED) 758 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, 759 IUCV_DISCONN), 760 sock_sndtimeo(sk, flags & O_NONBLOCK)); 761 762 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) 763 err = -ECONNREFUSED; 764 765 if (err && iucv->transport == AF_IUCV_TRANS_IUCV) 766 iucv_sever_path(sk, 0); 767 768 done: 769 release_sock(sk); 770 return err; 771 } 772 773 /* Move a socket into listening state. */ 774 static int iucv_sock_listen(struct socket *sock, int backlog) 775 { 776 struct sock *sk = sock->sk; 777 int err; 778 779 lock_sock(sk); 780 781 err = -EINVAL; 782 if (sk->sk_state != IUCV_BOUND) 783 goto done; 784 785 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) 786 goto done; 787 788 sk->sk_max_ack_backlog = backlog; 789 sk->sk_ack_backlog = 0; 790 sk->sk_state = IUCV_LISTEN; 791 err = 0; 792 793 done: 794 release_sock(sk); 795 return err; 796 } 797 798 /* Accept a pending connection */ 799 static int iucv_sock_accept(struct socket *sock, struct socket *newsock, 800 struct proto_accept_arg *arg) 801 { 802 DECLARE_WAITQUEUE(wait, current); 803 struct sock *sk = sock->sk, *nsk; 804 long timeo; 805 int err = 0; 806 807 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 808 809 if (sk->sk_state != IUCV_LISTEN) { 810 err = -EBADFD; 811 goto done; 812 } 813 814 timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK); 815 816 /* Wait for an incoming connection */ 817 add_wait_queue_exclusive(sk_sleep(sk), &wait); 818 while (!(nsk = iucv_accept_dequeue(sk, newsock))) { 819 set_current_state(TASK_INTERRUPTIBLE); 820 if (!timeo) { 821 err = -EAGAIN; 822 break; 823 } 824 825 release_sock(sk); 826 timeo = schedule_timeout(timeo); 827 lock_sock_nested(sk, SINGLE_DEPTH_NESTING); 828 829 if (sk->sk_state != IUCV_LISTEN) { 830 err = -EBADFD; 831 break; 832 } 833 834 if (signal_pending(current)) { 835 err = sock_intr_errno(timeo); 836 break; 837 } 838 } 839 840 set_current_state(TASK_RUNNING); 841 remove_wait_queue(sk_sleep(sk), &wait); 842 843 if (err) 844 goto done; 845 846 newsock->state = SS_CONNECTED; 847 848 done: 849 release_sock(sk); 850 return err; 851 } 852 853 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, 854 int peer) 855 { 856 DECLARE_SOCKADDR(struct sockaddr_iucv *, siucv, addr); 857 struct sock *sk = sock->sk; 858 struct iucv_sock *iucv = iucv_sk(sk); 859 860 addr->sa_family = AF_IUCV; 861 862 if (peer) { 863 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8); 864 memcpy(siucv->siucv_name, iucv->dst_name, 8); 865 } else { 866 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8); 867 memcpy(siucv->siucv_name, iucv->src_name, 8); 868 } 869 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); 870 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); 871 memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); 872 873 return sizeof(struct sockaddr_iucv); 874 } 875 876 /** 877 * iucv_send_iprm() - Send socket data in parameter list of an iucv message. 878 * @path: IUCV path 879 * @msg: Pointer to a struct iucv_message 880 * @skb: The socket data to send, skb->len MUST BE <= 7 881 * 882 * Send the socket data in the parameter list in the iucv message 883 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter 884 * list and the socket data len at index 7 (last byte). 885 * See also iucv_msg_length(). 886 * 887 * Returns the error code from the iucv_message_send() call. 888 */ 889 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg, 890 struct sk_buff *skb) 891 { 892 u8 prmdata[8]; 893 894 memcpy(prmdata, (void *) skb->data, skb->len); 895 prmdata[7] = 0xff - (u8) skb->len; 896 return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0, 897 (void *) prmdata, 8); 898 } 899 900 static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, 901 size_t len) 902 { 903 struct sock *sk = sock->sk; 904 struct iucv_sock *iucv = iucv_sk(sk); 905 size_t headroom = 0; 906 size_t linear; 907 struct sk_buff *skb; 908 struct iucv_message txmsg = {0}; 909 struct cmsghdr *cmsg; 910 int cmsg_done; 911 long timeo; 912 char user_id[9]; 913 char appl_id[9]; 914 int err; 915 int noblock = msg->msg_flags & MSG_DONTWAIT; 916 917 err = sock_error(sk); 918 if (err) 919 return err; 920 921 if (msg->msg_flags & MSG_OOB) 922 return -EOPNOTSUPP; 923 924 /* SOCK_SEQPACKET: we do not support segmented records */ 925 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) 926 return -EOPNOTSUPP; 927 928 lock_sock(sk); 929 930 if (sk->sk_shutdown & SEND_SHUTDOWN) { 931 err = -EPIPE; 932 goto out; 933 } 934 935 /* Return if the socket is not in connected state */ 936 if (sk->sk_state != IUCV_CONNECTED) { 937 err = -ENOTCONN; 938 goto out; 939 } 940 941 /* initialize defaults */ 942 cmsg_done = 0; /* check for duplicate headers */ 943 944 /* iterate over control messages */ 945 for_each_cmsghdr(cmsg, msg) { 946 if (!CMSG_OK(msg, cmsg)) { 947 err = -EINVAL; 948 goto out; 949 } 950 951 if (cmsg->cmsg_level != SOL_IUCV) 952 continue; 953 954 if (cmsg->cmsg_type & cmsg_done) { 955 err = -EINVAL; 956 goto out; 957 } 958 cmsg_done |= cmsg->cmsg_type; 959 960 switch (cmsg->cmsg_type) { 961 case SCM_IUCV_TRGCLS: 962 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) { 963 err = -EINVAL; 964 goto out; 965 } 966 967 /* set iucv message target class */ 968 memcpy(&txmsg.class, 969 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); 970 971 break; 972 973 default: 974 err = -EINVAL; 975 goto out; 976 } 977 } 978 979 /* allocate one skb for each iucv message: 980 * this is fine for SOCK_SEQPACKET (unless we want to support 981 * segmented records using the MSG_EOR flag), but 982 * for SOCK_STREAM we might want to improve it in future */ 983 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 984 headroom = sizeof(struct af_iucv_trans_hdr) + 985 LL_RESERVED_SPACE(iucv->hs_dev); 986 linear = min(len, PAGE_SIZE - headroom); 987 } else { 988 if (len < PAGE_SIZE) { 989 linear = len; 990 } else { 991 /* In nonlinear "classic" iucv skb, 992 * reserve space for iucv_array 993 */ 994 headroom = sizeof(struct iucv_array) * 995 (MAX_SKB_FRAGS + 1); 996 linear = PAGE_SIZE - headroom; 997 } 998 } 999 skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear, 1000 noblock, &err, 0); 1001 if (!skb) 1002 goto out; 1003 if (headroom) 1004 skb_reserve(skb, headroom); 1005 skb_put(skb, linear); 1006 skb->len = len; 1007 skb->data_len = len - linear; 1008 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len); 1009 if (err) 1010 goto fail; 1011 1012 /* wait if outstanding messages for iucv path has reached */ 1013 timeo = sock_sndtimeo(sk, noblock); 1014 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); 1015 if (err) 1016 goto fail; 1017 1018 /* return -ECONNRESET if the socket is no longer connected */ 1019 if (sk->sk_state != IUCV_CONNECTED) { 1020 err = -ECONNRESET; 1021 goto fail; 1022 } 1023 1024 /* increment and save iucv message tag for msg_completion cbk */ 1025 txmsg.tag = iucv->send_tag++; 1026 IUCV_SKB_CB(skb)->tag = txmsg.tag; 1027 1028 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1029 atomic_inc(&iucv->msg_sent); 1030 err = afiucv_hs_send(&txmsg, sk, skb, 0); 1031 if (err) { 1032 atomic_dec(&iucv->msg_sent); 1033 goto out; 1034 } 1035 } else { /* Classic VM IUCV transport */ 1036 skb_queue_tail(&iucv->send_skb_q, skb); 1037 atomic_inc(&iucv->skbs_in_xmit); 1038 1039 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) && 1040 skb->len <= 7) { 1041 err = iucv_send_iprm(iucv->path, &txmsg, skb); 1042 1043 /* on success: there is no message_complete callback */ 1044 /* for an IPRMDATA msg; remove skb from send queue */ 1045 if (err == 0) { 1046 atomic_dec(&iucv->skbs_in_xmit); 1047 skb_unlink(skb, &iucv->send_skb_q); 1048 consume_skb(skb); 1049 } 1050 1051 /* this error should never happen since the */ 1052 /* IUCV_IPRMDATA path flag is set... sever path */ 1053 if (err == 0x15) { 1054 pr_iucv->path_sever(iucv->path, NULL); 1055 atomic_dec(&iucv->skbs_in_xmit); 1056 skb_unlink(skb, &iucv->send_skb_q); 1057 err = -EPIPE; 1058 goto fail; 1059 } 1060 } else if (skb_is_nonlinear(skb)) { 1061 struct iucv_array *iba = (struct iucv_array *)skb->head; 1062 int i; 1063 1064 /* skip iucv_array lying in the headroom */ 1065 iba[0].address = virt_to_dma32(skb->data); 1066 iba[0].length = (u32)skb_headlen(skb); 1067 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1068 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1069 1070 iba[i + 1].address = virt_to_dma32(skb_frag_address(frag)); 1071 iba[i + 1].length = (u32)skb_frag_size(frag); 1072 } 1073 err = pr_iucv->message_send(iucv->path, &txmsg, 1074 IUCV_IPBUFLST, 0, 1075 (void *)iba, skb->len); 1076 } else { /* non-IPRM Linear skb */ 1077 err = pr_iucv->message_send(iucv->path, &txmsg, 1078 0, 0, (void *)skb->data, skb->len); 1079 } 1080 if (err) { 1081 if (err == 3) { 1082 user_id[8] = 0; 1083 memcpy(user_id, iucv->dst_user_id, 8); 1084 appl_id[8] = 0; 1085 memcpy(appl_id, iucv->dst_name, 8); 1086 pr_err( 1087 "Application %s on z/VM guest %s exceeds message limit\n", 1088 appl_id, user_id); 1089 err = -EAGAIN; 1090 } else { 1091 err = -EPIPE; 1092 } 1093 1094 atomic_dec(&iucv->skbs_in_xmit); 1095 skb_unlink(skb, &iucv->send_skb_q); 1096 goto fail; 1097 } 1098 } 1099 1100 release_sock(sk); 1101 return len; 1102 1103 fail: 1104 kfree_skb(skb); 1105 out: 1106 release_sock(sk); 1107 return err; 1108 } 1109 1110 static struct sk_buff *alloc_iucv_recv_skb(unsigned long len) 1111 { 1112 size_t headroom, linear; 1113 struct sk_buff *skb; 1114 int err; 1115 1116 if (len < PAGE_SIZE) { 1117 headroom = 0; 1118 linear = len; 1119 } else { 1120 headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1); 1121 linear = PAGE_SIZE - headroom; 1122 } 1123 skb = alloc_skb_with_frags(headroom + linear, len - linear, 1124 0, &err, GFP_ATOMIC | GFP_DMA); 1125 WARN_ONCE(!skb, 1126 "alloc of recv iucv skb len=%lu failed with errcode=%d\n", 1127 len, err); 1128 if (skb) { 1129 if (headroom) 1130 skb_reserve(skb, headroom); 1131 skb_put(skb, linear); 1132 skb->len = len; 1133 skb->data_len = len - linear; 1134 } 1135 return skb; 1136 } 1137 1138 /* iucv_process_message() - Receive a single outstanding IUCV message 1139 * 1140 * Locking: must be called with message_q.lock held 1141 */ 1142 static void iucv_process_message(struct sock *sk, struct sk_buff *skb, 1143 struct iucv_path *path, 1144 struct iucv_message *msg) 1145 { 1146 int rc; 1147 unsigned int len; 1148 1149 len = iucv_msg_length(msg); 1150 1151 /* store msg target class in the second 4 bytes of skb ctrl buffer */ 1152 /* Note: the first 4 bytes are reserved for msg tag */ 1153 IUCV_SKB_CB(skb)->class = msg->class; 1154 1155 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ 1156 if ((msg->flags & IUCV_IPRMDATA) && len > 7) { 1157 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) { 1158 skb->data = NULL; 1159 skb->len = 0; 1160 } 1161 } else { 1162 if (skb_is_nonlinear(skb)) { 1163 struct iucv_array *iba = (struct iucv_array *)skb->head; 1164 int i; 1165 1166 iba[0].address = virt_to_dma32(skb->data); 1167 iba[0].length = (u32)skb_headlen(skb); 1168 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1169 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1170 1171 iba[i + 1].address = virt_to_dma32(skb_frag_address(frag)); 1172 iba[i + 1].length = (u32)skb_frag_size(frag); 1173 } 1174 rc = pr_iucv->message_receive(path, msg, 1175 IUCV_IPBUFLST, 1176 (void *)iba, len, NULL); 1177 } else { 1178 rc = pr_iucv->message_receive(path, msg, 1179 msg->flags & IUCV_IPRMDATA, 1180 skb->data, len, NULL); 1181 } 1182 if (rc) { 1183 kfree_skb(skb); 1184 return; 1185 } 1186 WARN_ON_ONCE(skb->len != len); 1187 } 1188 1189 IUCV_SKB_CB(skb)->offset = 0; 1190 if (sk_filter(sk, skb)) { 1191 sk_drops_inc(sk); /* skb rejected by filter */ 1192 kfree_skb(skb); 1193 return; 1194 } 1195 if (__sock_queue_rcv_skb(sk, skb)) /* handle rcv queue full */ 1196 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); 1197 } 1198 1199 /* iucv_process_message_q() - Process outstanding IUCV messages 1200 * 1201 * Locking: must be called with message_q.lock held 1202 */ 1203 static void iucv_process_message_q(struct sock *sk) 1204 { 1205 struct iucv_sock *iucv = iucv_sk(sk); 1206 struct sk_buff *skb; 1207 struct sock_msg_q *p, *n; 1208 1209 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { 1210 skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg)); 1211 if (!skb) 1212 break; 1213 iucv_process_message(sk, skb, p->path, &p->msg); 1214 list_del(&p->list); 1215 kfree(p); 1216 if (!skb_queue_empty(&iucv->backlog_skb_q)) 1217 break; 1218 } 1219 } 1220 1221 static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, 1222 size_t len, int flags) 1223 { 1224 struct sock *sk = sock->sk; 1225 struct iucv_sock *iucv = iucv_sk(sk); 1226 unsigned int copied, rlen; 1227 struct sk_buff *skb, *rskb, *cskb; 1228 int err = 0; 1229 u32 offset; 1230 1231 if ((sk->sk_state == IUCV_DISCONN) && 1232 skb_queue_empty(&iucv->backlog_skb_q) && 1233 skb_queue_empty(&sk->sk_receive_queue) && 1234 list_empty(&iucv->message_q.list)) 1235 return 0; 1236 1237 if (flags & (MSG_OOB)) 1238 return -EOPNOTSUPP; 1239 1240 /* receive/dequeue next skb: 1241 * the function understands MSG_PEEK and, thus, does not dequeue skb 1242 * only refcount is increased. 1243 */ 1244 skb = skb_recv_datagram(sk, flags, &err); 1245 if (!skb) { 1246 if (sk->sk_shutdown & RCV_SHUTDOWN) 1247 return 0; 1248 return err; 1249 } 1250 1251 offset = IUCV_SKB_CB(skb)->offset; 1252 rlen = skb->len - offset; /* real length of skb */ 1253 copied = min_t(unsigned int, rlen, len); 1254 if (!rlen) 1255 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; 1256 1257 cskb = skb; 1258 if (skb_copy_datagram_msg(cskb, offset, msg, copied)) { 1259 err = -EFAULT; 1260 goto err_out; 1261 } 1262 1263 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */ 1264 if (sk->sk_type == SOCK_SEQPACKET) { 1265 if (copied < rlen) 1266 msg->msg_flags |= MSG_TRUNC; 1267 /* each iucv message contains a complete record */ 1268 msg->msg_flags |= MSG_EOR; 1269 } 1270 1271 /* create control message to store iucv msg target class: 1272 * get the trgcls from the control buffer of the skb due to 1273 * fragmentation of original iucv message. */ 1274 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, 1275 sizeof(IUCV_SKB_CB(skb)->class), 1276 (void *)&IUCV_SKB_CB(skb)->class); 1277 if (err) 1278 goto err_out; 1279 1280 /* Mark read part of skb as used */ 1281 if (!(flags & MSG_PEEK)) { 1282 1283 /* SOCK_STREAM: re-queue skb if it contains unreceived data */ 1284 if (sk->sk_type == SOCK_STREAM) { 1285 if (copied < rlen) { 1286 IUCV_SKB_CB(skb)->offset = offset + copied; 1287 skb_queue_head(&sk->sk_receive_queue, skb); 1288 goto done; 1289 } 1290 } 1291 1292 consume_skb(skb); 1293 if (iucv->transport == AF_IUCV_TRANS_HIPER) { 1294 atomic_inc(&iucv->msg_recv); 1295 if (atomic_read(&iucv->msg_recv) > iucv->msglimit) { 1296 WARN_ON(1); 1297 iucv_sock_close(sk); 1298 return -EFAULT; 1299 } 1300 } 1301 1302 /* Queue backlog skbs */ 1303 spin_lock_bh(&iucv->message_q.lock); 1304 rskb = skb_dequeue(&iucv->backlog_skb_q); 1305 while (rskb) { 1306 IUCV_SKB_CB(rskb)->offset = 0; 1307 if (__sock_queue_rcv_skb(sk, rskb)) { 1308 /* handle rcv queue full */ 1309 skb_queue_head(&iucv->backlog_skb_q, 1310 rskb); 1311 break; 1312 } 1313 rskb = skb_dequeue(&iucv->backlog_skb_q); 1314 } 1315 if (skb_queue_empty(&iucv->backlog_skb_q)) { 1316 if (!list_empty(&iucv->message_q.list)) 1317 iucv_process_message_q(sk); 1318 if (atomic_read(&iucv->msg_recv) >= 1319 iucv->msglimit / 2) { 1320 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); 1321 if (err) { 1322 sk->sk_state = IUCV_DISCONN; 1323 sk->sk_state_change(sk); 1324 } 1325 } 1326 } 1327 spin_unlock_bh(&iucv->message_q.lock); 1328 } 1329 1330 done: 1331 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ 1332 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) 1333 copied = rlen; 1334 if (flags & MSG_PEEK) 1335 skb_unref(skb); 1336 1337 return copied; 1338 1339 err_out: 1340 if (!(flags & MSG_PEEK)) 1341 skb_queue_head(&sk->sk_receive_queue, skb); 1342 else 1343 skb_unref(skb); 1344 1345 return err; 1346 } 1347 1348 static inline __poll_t iucv_accept_poll(struct sock *parent) 1349 { 1350 struct iucv_sock *isk, *n; 1351 struct sock *sk; 1352 1353 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { 1354 sk = (struct sock *) isk; 1355 1356 if (sk->sk_state == IUCV_CONNECTED) 1357 return EPOLLIN | EPOLLRDNORM; 1358 } 1359 1360 return 0; 1361 } 1362 1363 static __poll_t iucv_sock_poll(struct file *file, struct socket *sock, 1364 poll_table *wait) 1365 { 1366 struct sock *sk = sock->sk; 1367 __poll_t mask = 0; 1368 1369 sock_poll_wait(file, sock, wait); 1370 1371 if (sk->sk_state == IUCV_LISTEN) 1372 return iucv_accept_poll(sk); 1373 1374 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 1375 mask |= EPOLLERR | 1376 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); 1377 1378 if (sk->sk_shutdown & RCV_SHUTDOWN) 1379 mask |= EPOLLRDHUP; 1380 1381 if (sk->sk_shutdown == SHUTDOWN_MASK) 1382 mask |= EPOLLHUP; 1383 1384 if (!skb_queue_empty(&sk->sk_receive_queue) || 1385 (sk->sk_shutdown & RCV_SHUTDOWN)) 1386 mask |= EPOLLIN | EPOLLRDNORM; 1387 1388 if (sk->sk_state == IUCV_CLOSED) 1389 mask |= EPOLLHUP; 1390 1391 if (sk->sk_state == IUCV_DISCONN) 1392 mask |= EPOLLIN; 1393 1394 if (sock_writeable(sk) && iucv_below_msglim(sk)) 1395 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; 1396 else 1397 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1398 1399 return mask; 1400 } 1401 1402 static int iucv_sock_shutdown(struct socket *sock, int how) 1403 { 1404 struct sock *sk = sock->sk; 1405 struct iucv_sock *iucv = iucv_sk(sk); 1406 struct iucv_message txmsg; 1407 int err = 0; 1408 1409 how++; 1410 1411 if ((how & ~SHUTDOWN_MASK) || !how) 1412 return -EINVAL; 1413 1414 lock_sock(sk); 1415 switch (sk->sk_state) { 1416 case IUCV_LISTEN: 1417 case IUCV_DISCONN: 1418 case IUCV_CLOSING: 1419 case IUCV_CLOSED: 1420 err = -ENOTCONN; 1421 goto fail; 1422 default: 1423 break; 1424 } 1425 1426 if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) && 1427 sk->sk_state == IUCV_CONNECTED) { 1428 if (iucv->transport == AF_IUCV_TRANS_IUCV) { 1429 txmsg.class = 0; 1430 txmsg.tag = 0; 1431 err = pr_iucv->message_send(iucv->path, &txmsg, 1432 IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8); 1433 if (err) { 1434 switch (err) { 1435 case 1: 1436 err = -ENOTCONN; 1437 break; 1438 case 2: 1439 err = -ECONNRESET; 1440 break; 1441 default: 1442 err = -ENOTCONN; 1443 break; 1444 } 1445 } 1446 } else 1447 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT); 1448 } 1449 1450 sk->sk_shutdown |= how; 1451 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { 1452 if ((iucv->transport == AF_IUCV_TRANS_IUCV) && 1453 iucv->path) { 1454 err = pr_iucv->path_quiesce(iucv->path, NULL); 1455 if (err) 1456 err = -ENOTCONN; 1457 /* skb_queue_purge(&sk->sk_receive_queue); */ 1458 } 1459 skb_queue_purge(&sk->sk_receive_queue); 1460 } 1461 1462 /* Wake up anyone sleeping in poll */ 1463 sk->sk_state_change(sk); 1464 1465 fail: 1466 release_sock(sk); 1467 return err; 1468 } 1469 1470 static int iucv_sock_release(struct socket *sock) 1471 { 1472 struct sock *sk = sock->sk; 1473 int err = 0; 1474 1475 if (!sk) 1476 return 0; 1477 1478 iucv_sock_close(sk); 1479 1480 sock_orphan(sk); 1481 iucv_sock_kill(sk); 1482 return err; 1483 } 1484 1485 /* getsockopt and setsockopt */ 1486 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, 1487 sockptr_t optval, unsigned int optlen) 1488 { 1489 struct sock *sk = sock->sk; 1490 struct iucv_sock *iucv = iucv_sk(sk); 1491 int val; 1492 int rc; 1493 1494 if (level != SOL_IUCV) 1495 return -ENOPROTOOPT; 1496 1497 if (optlen < sizeof(int)) 1498 return -EINVAL; 1499 1500 if (copy_from_sockptr(&val, optval, sizeof(int))) 1501 return -EFAULT; 1502 1503 rc = 0; 1504 1505 lock_sock(sk); 1506 switch (optname) { 1507 case SO_IPRMDATA_MSG: 1508 if (val) 1509 iucv->flags |= IUCV_IPRMDATA; 1510 else 1511 iucv->flags &= ~IUCV_IPRMDATA; 1512 break; 1513 case SO_MSGLIMIT: 1514 switch (sk->sk_state) { 1515 case IUCV_OPEN: 1516 case IUCV_BOUND: 1517 if (val < 1 || val > U16_MAX) 1518 rc = -EINVAL; 1519 else 1520 iucv->msglimit = val; 1521 break; 1522 default: 1523 rc = -EINVAL; 1524 break; 1525 } 1526 break; 1527 default: 1528 rc = -ENOPROTOOPT; 1529 break; 1530 } 1531 release_sock(sk); 1532 1533 return rc; 1534 } 1535 1536 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname, 1537 char __user *optval, int __user *optlen) 1538 { 1539 struct sock *sk = sock->sk; 1540 struct iucv_sock *iucv = iucv_sk(sk); 1541 unsigned int val; 1542 int len; 1543 1544 if (level != SOL_IUCV) 1545 return -ENOPROTOOPT; 1546 1547 if (get_user(len, optlen)) 1548 return -EFAULT; 1549 1550 if (len < 0) 1551 return -EINVAL; 1552 1553 len = min_t(unsigned int, len, sizeof(int)); 1554 1555 switch (optname) { 1556 case SO_IPRMDATA_MSG: 1557 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0; 1558 break; 1559 case SO_MSGLIMIT: 1560 lock_sock(sk); 1561 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */ 1562 : iucv->msglimit; /* default */ 1563 release_sock(sk); 1564 break; 1565 case SO_MSGSIZE: 1566 if (sk->sk_state == IUCV_OPEN) 1567 return -EBADFD; 1568 val = (iucv->hs_dev) ? iucv->hs_dev->mtu - 1569 sizeof(struct af_iucv_trans_hdr) - ETH_HLEN : 1570 0x7fffffff; 1571 break; 1572 default: 1573 return -ENOPROTOOPT; 1574 } 1575 1576 if (put_user(len, optlen)) 1577 return -EFAULT; 1578 if (copy_to_user(optval, &val, len)) 1579 return -EFAULT; 1580 1581 return 0; 1582 } 1583 1584 1585 /* Callback wrappers - called from iucv base support */ 1586 static int iucv_callback_connreq(struct iucv_path *path, 1587 u8 ipvmid[8], u8 ipuser[16]) 1588 { 1589 unsigned char user_data[16]; 1590 unsigned char nuser_data[16]; 1591 unsigned char src_name[8]; 1592 struct sock *sk, *nsk; 1593 struct iucv_sock *iucv, *niucv; 1594 int err; 1595 1596 memcpy(src_name, ipuser, 8); 1597 EBCASC(src_name, 8); 1598 /* Find out if this path belongs to af_iucv. */ 1599 read_lock(&iucv_sk_list.lock); 1600 iucv = NULL; 1601 sk = NULL; 1602 sk_for_each(sk, &iucv_sk_list.head) 1603 if (sk->sk_state == IUCV_LISTEN && 1604 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { 1605 /* 1606 * Found a listening socket with 1607 * src_name == ipuser[0-7]. 1608 */ 1609 iucv = iucv_sk(sk); 1610 break; 1611 } 1612 read_unlock(&iucv_sk_list.lock); 1613 if (!iucv) 1614 /* No socket found, not one of our paths. */ 1615 return -EINVAL; 1616 1617 bh_lock_sock(sk); 1618 1619 /* Check if parent socket is listening */ 1620 low_nmcpy(user_data, iucv->src_name); 1621 high_nmcpy(user_data, iucv->dst_name); 1622 ASCEBC(user_data, sizeof(user_data)); 1623 if (sk->sk_state != IUCV_LISTEN) { 1624 err = pr_iucv->path_sever(path, user_data); 1625 iucv_path_free(path); 1626 goto fail; 1627 } 1628 1629 /* Check for backlog size */ 1630 if (sk_acceptq_is_full(sk)) { 1631 err = pr_iucv->path_sever(path, user_data); 1632 iucv_path_free(path); 1633 goto fail; 1634 } 1635 1636 /* Create the new socket */ 1637 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); 1638 if (!nsk) { 1639 err = pr_iucv->path_sever(path, user_data); 1640 iucv_path_free(path); 1641 goto fail; 1642 } 1643 1644 niucv = iucv_sk(nsk); 1645 iucv_sock_init(nsk, sk); 1646 niucv->transport = AF_IUCV_TRANS_IUCV; 1647 nsk->sk_allocation |= GFP_DMA; 1648 1649 /* Set the new iucv_sock */ 1650 memcpy(niucv->dst_name, ipuser + 8, 8); 1651 EBCASC(niucv->dst_name, 8); 1652 memcpy(niucv->dst_user_id, ipvmid, 8); 1653 memcpy(niucv->src_name, iucv->src_name, 8); 1654 memcpy(niucv->src_user_id, iucv->src_user_id, 8); 1655 niucv->path = path; 1656 1657 /* Call iucv_accept */ 1658 high_nmcpy(nuser_data, ipuser + 8); 1659 memcpy(nuser_data + 8, niucv->src_name, 8); 1660 ASCEBC(nuser_data + 8, 8); 1661 1662 /* set message limit for path based on msglimit of accepting socket */ 1663 niucv->msglimit = iucv->msglimit; 1664 path->msglim = iucv->msglimit; 1665 err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk); 1666 if (err) { 1667 iucv_sever_path(nsk, 1); 1668 iucv_sock_kill(nsk); 1669 goto fail; 1670 } 1671 1672 iucv_accept_enqueue(sk, nsk); 1673 1674 /* Wake up accept */ 1675 nsk->sk_state = IUCV_CONNECTED; 1676 sk->sk_data_ready(sk); 1677 err = 0; 1678 fail: 1679 bh_unlock_sock(sk); 1680 return 0; 1681 } 1682 1683 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) 1684 { 1685 struct sock *sk = path->private; 1686 1687 sk->sk_state = IUCV_CONNECTED; 1688 sk->sk_state_change(sk); 1689 } 1690 1691 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) 1692 { 1693 struct sock *sk = path->private; 1694 struct iucv_sock *iucv = iucv_sk(sk); 1695 struct sk_buff *skb; 1696 struct sock_msg_q *save_msg; 1697 int len; 1698 1699 if (sk->sk_shutdown & RCV_SHUTDOWN) { 1700 pr_iucv->message_reject(path, msg); 1701 return; 1702 } 1703 1704 spin_lock(&iucv->message_q.lock); 1705 1706 if (!list_empty(&iucv->message_q.list) || 1707 !skb_queue_empty(&iucv->backlog_skb_q)) 1708 goto save_message; 1709 1710 len = atomic_read(&sk->sk_rmem_alloc); 1711 len += SKB_TRUESIZE(iucv_msg_length(msg)); 1712 if (len > sk->sk_rcvbuf) 1713 goto save_message; 1714 1715 skb = alloc_iucv_recv_skb(iucv_msg_length(msg)); 1716 if (!skb) 1717 goto save_message; 1718 1719 iucv_process_message(sk, skb, path, msg); 1720 goto out_unlock; 1721 1722 save_message: 1723 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); 1724 if (!save_msg) 1725 goto out_unlock; 1726 save_msg->path = path; 1727 save_msg->msg = *msg; 1728 1729 list_add_tail(&save_msg->list, &iucv->message_q.list); 1730 1731 out_unlock: 1732 spin_unlock(&iucv->message_q.lock); 1733 } 1734 1735 static void iucv_callback_txdone(struct iucv_path *path, 1736 struct iucv_message *msg) 1737 { 1738 struct sock *sk = path->private; 1739 struct sk_buff *this = NULL; 1740 struct sk_buff_head *list; 1741 struct sk_buff *list_skb; 1742 struct iucv_sock *iucv; 1743 unsigned long flags; 1744 1745 iucv = iucv_sk(sk); 1746 list = &iucv->send_skb_q; 1747 1748 bh_lock_sock(sk); 1749 1750 spin_lock_irqsave(&list->lock, flags); 1751 skb_queue_walk(list, list_skb) { 1752 if (msg->tag == IUCV_SKB_CB(list_skb)->tag) { 1753 this = list_skb; 1754 break; 1755 } 1756 } 1757 if (this) { 1758 atomic_dec(&iucv->skbs_in_xmit); 1759 __skb_unlink(this, list); 1760 } 1761 1762 spin_unlock_irqrestore(&list->lock, flags); 1763 1764 if (this) { 1765 consume_skb(this); 1766 /* wake up any process waiting for sending */ 1767 iucv_sock_wake_msglim(sk); 1768 } 1769 1770 if (sk->sk_state == IUCV_CLOSING) { 1771 if (atomic_read(&iucv->skbs_in_xmit) == 0) { 1772 sk->sk_state = IUCV_CLOSED; 1773 sk->sk_state_change(sk); 1774 } 1775 } 1776 bh_unlock_sock(sk); 1777 1778 } 1779 1780 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) 1781 { 1782 struct sock *sk = path->private; 1783 1784 if (sk->sk_state == IUCV_CLOSED) 1785 return; 1786 1787 bh_lock_sock(sk); 1788 iucv_sever_path(sk, 1); 1789 sk->sk_state = IUCV_DISCONN; 1790 1791 sk->sk_state_change(sk); 1792 bh_unlock_sock(sk); 1793 } 1794 1795 /* called if the other communication side shuts down its RECV direction; 1796 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data. 1797 */ 1798 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16]) 1799 { 1800 struct sock *sk = path->private; 1801 1802 bh_lock_sock(sk); 1803 if (sk->sk_state != IUCV_CLOSED) { 1804 sk->sk_shutdown |= SEND_SHUTDOWN; 1805 sk->sk_state_change(sk); 1806 } 1807 bh_unlock_sock(sk); 1808 } 1809 1810 static struct iucv_handler af_iucv_handler = { 1811 .path_pending = iucv_callback_connreq, 1812 .path_complete = iucv_callback_connack, 1813 .path_severed = iucv_callback_connrej, 1814 .message_pending = iucv_callback_rx, 1815 .message_complete = iucv_callback_txdone, 1816 .path_quiesced = iucv_callback_shutdown, 1817 }; 1818 1819 /***************** HiperSockets transport callbacks ********************/ 1820 static void afiucv_swap_src_dest(struct sk_buff *skb) 1821 { 1822 struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb); 1823 char tmpID[8]; 1824 char tmpName[8]; 1825 1826 ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); 1827 ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); 1828 ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); 1829 ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); 1830 memcpy(tmpID, trans_hdr->srcUserID, 8); 1831 memcpy(tmpName, trans_hdr->srcAppName, 8); 1832 memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8); 1833 memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8); 1834 memcpy(trans_hdr->destUserID, tmpID, 8); 1835 memcpy(trans_hdr->destAppName, tmpName, 8); 1836 skb_push(skb, ETH_HLEN); 1837 memset(skb->data, 0, ETH_HLEN); 1838 } 1839 1840 /* 1841 * afiucv_hs_callback_syn - react on received SYN 1842 */ 1843 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) 1844 { 1845 struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb); 1846 struct sock *nsk; 1847 struct iucv_sock *iucv, *niucv; 1848 int err; 1849 1850 iucv = iucv_sk(sk); 1851 if (!iucv) { 1852 /* no sock - connection refused */ 1853 afiucv_swap_src_dest(skb); 1854 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; 1855 err = dev_queue_xmit(skb); 1856 goto out; 1857 } 1858 1859 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0); 1860 bh_lock_sock(sk); 1861 if ((sk->sk_state != IUCV_LISTEN) || 1862 sk_acceptq_is_full(sk) || 1863 !nsk) { 1864 /* error on server socket - connection refused */ 1865 afiucv_swap_src_dest(skb); 1866 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; 1867 err = dev_queue_xmit(skb); 1868 iucv_sock_kill(nsk); 1869 bh_unlock_sock(sk); 1870 goto out; 1871 } 1872 1873 niucv = iucv_sk(nsk); 1874 iucv_sock_init(nsk, sk); 1875 niucv->transport = AF_IUCV_TRANS_HIPER; 1876 niucv->msglimit = iucv->msglimit; 1877 if (!trans_hdr->window) 1878 niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT; 1879 else 1880 niucv->msglimit_peer = trans_hdr->window; 1881 memcpy(niucv->dst_name, trans_hdr->srcAppName, 8); 1882 memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8); 1883 memcpy(niucv->src_name, iucv->src_name, 8); 1884 memcpy(niucv->src_user_id, iucv->src_user_id, 8); 1885 nsk->sk_bound_dev_if = sk->sk_bound_dev_if; 1886 niucv->hs_dev = iucv->hs_dev; 1887 dev_hold(niucv->hs_dev); 1888 afiucv_swap_src_dest(skb); 1889 trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK; 1890 trans_hdr->window = niucv->msglimit; 1891 /* if receiver acks the xmit connection is established */ 1892 err = dev_queue_xmit(skb); 1893 if (!err) { 1894 iucv_accept_enqueue(sk, nsk); 1895 nsk->sk_state = IUCV_CONNECTED; 1896 sk->sk_data_ready(sk); 1897 } else 1898 iucv_sock_kill(nsk); 1899 bh_unlock_sock(sk); 1900 1901 out: 1902 return NET_RX_SUCCESS; 1903 } 1904 1905 /* 1906 * afiucv_hs_callback_synack() - react on received SYN-ACK 1907 */ 1908 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) 1909 { 1910 struct iucv_sock *iucv = iucv_sk(sk); 1911 1912 if (!iucv || sk->sk_state != IUCV_BOUND) { 1913 kfree_skb(skb); 1914 return NET_RX_SUCCESS; 1915 } 1916 1917 bh_lock_sock(sk); 1918 iucv->msglimit_peer = iucv_trans_hdr(skb)->window; 1919 sk->sk_state = IUCV_CONNECTED; 1920 sk->sk_state_change(sk); 1921 bh_unlock_sock(sk); 1922 consume_skb(skb); 1923 return NET_RX_SUCCESS; 1924 } 1925 1926 /* 1927 * afiucv_hs_callback_synfin() - react on received SYN_FIN 1928 */ 1929 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) 1930 { 1931 struct iucv_sock *iucv = iucv_sk(sk); 1932 1933 if (!iucv || sk->sk_state != IUCV_BOUND) { 1934 kfree_skb(skb); 1935 return NET_RX_SUCCESS; 1936 } 1937 1938 bh_lock_sock(sk); 1939 sk->sk_state = IUCV_DISCONN; 1940 sk->sk_state_change(sk); 1941 bh_unlock_sock(sk); 1942 consume_skb(skb); 1943 return NET_RX_SUCCESS; 1944 } 1945 1946 /* 1947 * afiucv_hs_callback_fin() - react on received FIN 1948 */ 1949 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) 1950 { 1951 struct iucv_sock *iucv = iucv_sk(sk); 1952 1953 /* other end of connection closed */ 1954 if (!iucv) { 1955 kfree_skb(skb); 1956 return NET_RX_SUCCESS; 1957 } 1958 1959 bh_lock_sock(sk); 1960 if (sk->sk_state == IUCV_CONNECTED) { 1961 sk->sk_state = IUCV_DISCONN; 1962 sk->sk_state_change(sk); 1963 } 1964 bh_unlock_sock(sk); 1965 consume_skb(skb); 1966 return NET_RX_SUCCESS; 1967 } 1968 1969 /* 1970 * afiucv_hs_callback_win() - react on received WIN 1971 */ 1972 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) 1973 { 1974 struct iucv_sock *iucv = iucv_sk(sk); 1975 1976 if (!iucv) 1977 return NET_RX_SUCCESS; 1978 1979 if (sk->sk_state != IUCV_CONNECTED) 1980 return NET_RX_SUCCESS; 1981 1982 atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent); 1983 iucv_sock_wake_msglim(sk); 1984 return NET_RX_SUCCESS; 1985 } 1986 1987 /* 1988 * afiucv_hs_callback_rx() - react on received data 1989 */ 1990 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) 1991 { 1992 struct iucv_sock *iucv = iucv_sk(sk); 1993 1994 if (!iucv) { 1995 kfree_skb(skb); 1996 return NET_RX_SUCCESS; 1997 } 1998 1999 if (sk->sk_state != IUCV_CONNECTED) { 2000 kfree_skb(skb); 2001 return NET_RX_SUCCESS; 2002 } 2003 2004 if (sk->sk_shutdown & RCV_SHUTDOWN) { 2005 kfree_skb(skb); 2006 return NET_RX_SUCCESS; 2007 } 2008 2009 /* write stuff from iucv_msg to skb cb */ 2010 skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); 2011 skb_reset_transport_header(skb); 2012 skb_reset_network_header(skb); 2013 IUCV_SKB_CB(skb)->offset = 0; 2014 if (sk_filter(sk, skb)) { 2015 sk_drops_inc(sk); /* skb rejected by filter */ 2016 kfree_skb(skb); 2017 return NET_RX_SUCCESS; 2018 } 2019 2020 spin_lock(&iucv->message_q.lock); 2021 if (skb_queue_empty(&iucv->backlog_skb_q)) { 2022 if (__sock_queue_rcv_skb(sk, skb)) 2023 /* handle rcv queue full */ 2024 skb_queue_tail(&iucv->backlog_skb_q, skb); 2025 } else 2026 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); 2027 spin_unlock(&iucv->message_q.lock); 2028 return NET_RX_SUCCESS; 2029 } 2030 2031 /* 2032 * afiucv_hs_rcv() - base function for arriving data through HiperSockets 2033 * transport 2034 * called from netif RX softirq 2035 */ 2036 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, 2037 struct packet_type *pt, struct net_device *orig_dev) 2038 { 2039 struct sock *sk; 2040 struct iucv_sock *iucv; 2041 struct af_iucv_trans_hdr *trans_hdr; 2042 int err = NET_RX_SUCCESS; 2043 char nullstring[8]; 2044 2045 if (!pskb_may_pull(skb, sizeof(*trans_hdr))) { 2046 kfree_skb(skb); 2047 return NET_RX_SUCCESS; 2048 } 2049 2050 trans_hdr = iucv_trans_hdr(skb); 2051 EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); 2052 EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); 2053 EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); 2054 EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); 2055 memset(nullstring, 0, sizeof(nullstring)); 2056 iucv = NULL; 2057 sk = NULL; 2058 read_lock(&iucv_sk_list.lock); 2059 sk_for_each(sk, &iucv_sk_list.head) { 2060 if (trans_hdr->flags == AF_IUCV_FLAG_SYN) { 2061 if ((!memcmp(&iucv_sk(sk)->src_name, 2062 trans_hdr->destAppName, 8)) && 2063 (!memcmp(&iucv_sk(sk)->src_user_id, 2064 trans_hdr->destUserID, 8)) && 2065 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) && 2066 (!memcmp(&iucv_sk(sk)->dst_user_id, 2067 nullstring, 8))) { 2068 iucv = iucv_sk(sk); 2069 break; 2070 } 2071 } else { 2072 if ((!memcmp(&iucv_sk(sk)->src_name, 2073 trans_hdr->destAppName, 8)) && 2074 (!memcmp(&iucv_sk(sk)->src_user_id, 2075 trans_hdr->destUserID, 8)) && 2076 (!memcmp(&iucv_sk(sk)->dst_name, 2077 trans_hdr->srcAppName, 8)) && 2078 (!memcmp(&iucv_sk(sk)->dst_user_id, 2079 trans_hdr->srcUserID, 8))) { 2080 iucv = iucv_sk(sk); 2081 break; 2082 } 2083 } 2084 } 2085 read_unlock(&iucv_sk_list.lock); 2086 if (!iucv) 2087 sk = NULL; 2088 2089 /* no sock 2090 how should we send with no sock 2091 1) send without sock no send rc checking? 2092 2) introduce default sock to handle this cases 2093 2094 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case 2095 data -> send FIN 2096 SYN|ACK, SYN|FIN, FIN -> no action? */ 2097 2098 switch (trans_hdr->flags) { 2099 case AF_IUCV_FLAG_SYN: 2100 /* connect request */ 2101 err = afiucv_hs_callback_syn(sk, skb); 2102 break; 2103 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK): 2104 /* connect request confirmed */ 2105 err = afiucv_hs_callback_synack(sk, skb); 2106 break; 2107 case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN): 2108 /* connect request refused */ 2109 err = afiucv_hs_callback_synfin(sk, skb); 2110 break; 2111 case (AF_IUCV_FLAG_FIN): 2112 /* close request */ 2113 err = afiucv_hs_callback_fin(sk, skb); 2114 break; 2115 case (AF_IUCV_FLAG_WIN): 2116 err = afiucv_hs_callback_win(sk, skb); 2117 if (skb->len == sizeof(struct af_iucv_trans_hdr)) { 2118 consume_skb(skb); 2119 break; 2120 } 2121 fallthrough; /* and receive non-zero length data */ 2122 case (AF_IUCV_FLAG_SHT): 2123 /* shutdown request */ 2124 fallthrough; /* and receive zero length data */ 2125 case 0: 2126 /* plain data frame */ 2127 IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; 2128 err = afiucv_hs_callback_rx(sk, skb); 2129 break; 2130 default: 2131 kfree_skb(skb); 2132 } 2133 2134 return err; 2135 } 2136 2137 /* 2138 * afiucv_hs_callback_txnotify() - handle send notifications from HiperSockets 2139 * transport 2140 */ 2141 static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n) 2142 { 2143 struct iucv_sock *iucv = iucv_sk(sk); 2144 2145 if (sock_flag(sk, SOCK_ZAPPED)) 2146 return; 2147 2148 switch (n) { 2149 case TX_NOTIFY_OK: 2150 atomic_dec(&iucv->skbs_in_xmit); 2151 iucv_sock_wake_msglim(sk); 2152 break; 2153 case TX_NOTIFY_PENDING: 2154 atomic_inc(&iucv->pendings); 2155 break; 2156 case TX_NOTIFY_DELAYED_OK: 2157 atomic_dec(&iucv->skbs_in_xmit); 2158 if (atomic_dec_return(&iucv->pendings) <= 0) 2159 iucv_sock_wake_msglim(sk); 2160 break; 2161 default: 2162 atomic_dec(&iucv->skbs_in_xmit); 2163 if (sk->sk_state == IUCV_CONNECTED) { 2164 sk->sk_state = IUCV_DISCONN; 2165 sk->sk_state_change(sk); 2166 } 2167 } 2168 2169 if (sk->sk_state == IUCV_CLOSING) { 2170 if (atomic_read(&iucv->skbs_in_xmit) == 0) { 2171 sk->sk_state = IUCV_CLOSED; 2172 sk->sk_state_change(sk); 2173 } 2174 } 2175 } 2176 2177 /* 2178 * afiucv_netdev_event: handle netdev notifier chain events 2179 */ 2180 static int afiucv_netdev_event(struct notifier_block *this, 2181 unsigned long event, void *ptr) 2182 { 2183 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); 2184 struct sock *sk; 2185 struct iucv_sock *iucv; 2186 2187 switch (event) { 2188 case NETDEV_REBOOT: 2189 case NETDEV_GOING_DOWN: 2190 sk_for_each(sk, &iucv_sk_list.head) { 2191 iucv = iucv_sk(sk); 2192 if ((iucv->hs_dev == event_dev) && 2193 (sk->sk_state == IUCV_CONNECTED)) { 2194 if (event == NETDEV_GOING_DOWN) 2195 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); 2196 sk->sk_state = IUCV_DISCONN; 2197 sk->sk_state_change(sk); 2198 } 2199 } 2200 break; 2201 case NETDEV_DOWN: 2202 case NETDEV_UNREGISTER: 2203 default: 2204 break; 2205 } 2206 return NOTIFY_DONE; 2207 } 2208 2209 static struct notifier_block afiucv_netdev_notifier = { 2210 .notifier_call = afiucv_netdev_event, 2211 }; 2212 2213 static const struct proto_ops iucv_sock_ops = { 2214 .family = PF_IUCV, 2215 .owner = THIS_MODULE, 2216 .release = iucv_sock_release, 2217 .bind = iucv_sock_bind, 2218 .connect = iucv_sock_connect, 2219 .listen = iucv_sock_listen, 2220 .accept = iucv_sock_accept, 2221 .getname = iucv_sock_getname, 2222 .sendmsg = iucv_sock_sendmsg, 2223 .recvmsg = iucv_sock_recvmsg, 2224 .poll = iucv_sock_poll, 2225 .ioctl = sock_no_ioctl, 2226 .mmap = sock_no_mmap, 2227 .socketpair = sock_no_socketpair, 2228 .shutdown = iucv_sock_shutdown, 2229 .setsockopt = iucv_sock_setsockopt, 2230 .getsockopt = iucv_sock_getsockopt, 2231 }; 2232 2233 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol, 2234 int kern) 2235 { 2236 struct sock *sk; 2237 2238 if (protocol && protocol != PF_IUCV) 2239 return -EPROTONOSUPPORT; 2240 2241 sock->state = SS_UNCONNECTED; 2242 2243 switch (sock->type) { 2244 case SOCK_STREAM: 2245 case SOCK_SEQPACKET: 2246 /* currently, proto ops can handle both sk types */ 2247 sock->ops = &iucv_sock_ops; 2248 break; 2249 default: 2250 return -ESOCKTNOSUPPORT; 2251 } 2252 2253 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern); 2254 if (!sk) 2255 return -ENOMEM; 2256 2257 iucv_sock_init(sk, NULL); 2258 2259 return 0; 2260 } 2261 2262 static const struct net_proto_family iucv_sock_family_ops = { 2263 .family = AF_IUCV, 2264 .owner = THIS_MODULE, 2265 .create = iucv_sock_create, 2266 }; 2267 2268 static struct packet_type iucv_packet_type = { 2269 .type = cpu_to_be16(ETH_P_AF_IUCV), 2270 .func = afiucv_hs_rcv, 2271 }; 2272 2273 static int __init afiucv_init(void) 2274 { 2275 int err; 2276 2277 if (machine_is_vm() && IS_ENABLED(CONFIG_IUCV)) { 2278 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); 2279 if (unlikely(err)) { 2280 WARN_ON(err); 2281 err = -EPROTONOSUPPORT; 2282 goto out; 2283 } 2284 2285 pr_iucv = &iucv_if; 2286 } else { 2287 memset(&iucv_userid, 0, sizeof(iucv_userid)); 2288 pr_iucv = NULL; 2289 } 2290 2291 err = proto_register(&iucv_proto, 0); 2292 if (err) 2293 goto out; 2294 err = sock_register(&iucv_sock_family_ops); 2295 if (err) 2296 goto out_proto; 2297 2298 if (pr_iucv) { 2299 err = pr_iucv->iucv_register(&af_iucv_handler, 0); 2300 if (err) 2301 goto out_sock; 2302 } 2303 2304 err = register_netdevice_notifier(&afiucv_netdev_notifier); 2305 if (err) 2306 goto out_notifier; 2307 2308 dev_add_pack(&iucv_packet_type); 2309 return 0; 2310 2311 out_notifier: 2312 if (pr_iucv) 2313 pr_iucv->iucv_unregister(&af_iucv_handler, 0); 2314 out_sock: 2315 sock_unregister(PF_IUCV); 2316 out_proto: 2317 proto_unregister(&iucv_proto); 2318 out: 2319 return err; 2320 } 2321 2322 static void __exit afiucv_exit(void) 2323 { 2324 if (pr_iucv) 2325 pr_iucv->iucv_unregister(&af_iucv_handler, 0); 2326 2327 unregister_netdevice_notifier(&afiucv_netdev_notifier); 2328 dev_remove_pack(&iucv_packet_type); 2329 sock_unregister(PF_IUCV); 2330 proto_unregister(&iucv_proto); 2331 } 2332 2333 module_init(afiucv_init); 2334 module_exit(afiucv_exit); 2335 2336 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>"); 2337 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION); 2338 MODULE_VERSION(VERSION); 2339 MODULE_LICENSE("GPL"); 2340 MODULE_ALIAS_NETPROTO(PF_IUCV); 2341