1 /* 2 * File: pep.c 3 * 4 * Phonet pipe protocol end point socket 5 * 6 * Copyright (C) 2008 Nokia Corporation. 7 * 8 * Author: Rémi Denis-Courmont 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * version 2 as published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; if not, write to the Free Software 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 22 * 02110-1301 USA 23 */ 24 25 #include <linux/kernel.h> 26 #include <linux/slab.h> 27 #include <linux/socket.h> 28 #include <net/sock.h> 29 #include <net/tcp_states.h> 30 #include <asm/ioctls.h> 31 32 #include <linux/phonet.h> 33 #include <linux/module.h> 34 #include <net/phonet/phonet.h> 35 #include <net/phonet/pep.h> 36 #include <net/phonet/gprs.h> 37 38 /* sk_state values: 39 * TCP_CLOSE sock not in use yet 40 * TCP_CLOSE_WAIT disconnected pipe 41 * TCP_LISTEN listening pipe endpoint 42 * TCP_SYN_RECV connected pipe in disabled state 43 * TCP_ESTABLISHED connected pipe in enabled state 44 * 45 * pep_sock locking: 46 * - sk_state, hlist: sock lock needed 47 * - listener: read only 48 * - pipe_handle: read only 49 */ 50 51 #define CREDITS_MAX 10 52 #define CREDITS_THR 7 53 54 #define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */ 55 56 /* Get the next TLV sub-block. */ 57 static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen, 58 void *buf) 59 { 60 void *data = NULL; 61 struct { 62 u8 sb_type; 63 u8 sb_len; 64 } *ph, h; 65 int buflen = *plen; 66 67 ph = skb_header_pointer(skb, 0, 2, &h); 68 if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len)) 69 return NULL; 70 ph->sb_len -= 2; 71 *ptype = ph->sb_type; 72 *plen = ph->sb_len; 73 74 if (buflen > ph->sb_len) 75 buflen = ph->sb_len; 76 data = skb_header_pointer(skb, 2, buflen, buf); 77 __skb_pull(skb, 2 + ph->sb_len); 78 return data; 79 } 80 81 static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload, 82 int len, gfp_t priority) 83 { 84 struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority); 85 if (!skb) 86 return NULL; 87 skb_set_owner_w(skb, sk); 88 89 skb_reserve(skb, MAX_PNPIPE_HEADER); 90 __skb_put(skb, len); 91 skb_copy_to_linear_data(skb, payload, len); 92 __skb_push(skb, sizeof(struct pnpipehdr)); 93 skb_reset_transport_header(skb); 94 return skb; 95 } 96 97 static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code, 98 const void *data, int len, gfp_t priority) 99 { 100 const struct pnpipehdr *oph = pnp_hdr(oskb); 101 struct pnpipehdr *ph; 102 struct sk_buff *skb; 103 struct sockaddr_pn peer; 104 105 skb = pep_alloc_skb(sk, data, len, priority); 106 if (!skb) 107 return -ENOMEM; 108 109 ph = pnp_hdr(skb); 110 ph->utid = oph->utid; 111 ph->message_id = oph->message_id + 1; /* REQ -> RESP */ 112 ph->pipe_handle = oph->pipe_handle; 113 ph->error_code = code; 114 115 pn_skb_get_src_sockaddr(oskb, &peer); 116 return pn_skb_send(sk, skb, &peer); 117 } 118 119 static int pep_indicate(struct sock *sk, u8 id, u8 code, 120 const void *data, int len, gfp_t priority) 121 { 122 struct pep_sock *pn = pep_sk(sk); 123 struct pnpipehdr *ph; 124 struct sk_buff *skb; 125 126 skb = pep_alloc_skb(sk, data, len, priority); 127 if (!skb) 128 return -ENOMEM; 129 130 ph = pnp_hdr(skb); 131 ph->utid = 0; 132 ph->message_id = id; 133 ph->pipe_handle = pn->pipe_handle; 134 ph->data[0] = code; 135 return pn_skb_send(sk, skb, NULL); 136 } 137 138 #define PAD 0x00 139 140 static int pipe_handler_request(struct sock *sk, u8 id, u8 code, 141 const void *data, int len) 142 { 143 struct pep_sock *pn = pep_sk(sk); 144 struct pnpipehdr *ph; 145 struct sk_buff *skb; 146 147 skb = pep_alloc_skb(sk, data, len, GFP_KERNEL); 148 if (!skb) 149 return -ENOMEM; 150 151 ph = pnp_hdr(skb); 152 ph->utid = id; /* whatever */ 153 ph->message_id = id; 154 ph->pipe_handle = pn->pipe_handle; 155 ph->data[0] = code; 156 return pn_skb_send(sk, skb, NULL); 157 } 158 159 static int pipe_handler_send_created_ind(struct sock *sk) 160 { 161 struct pep_sock *pn = pep_sk(sk); 162 u8 data[4] = { 163 PN_PIPE_SB_NEGOTIATED_FC, pep_sb_size(2), 164 pn->tx_fc, pn->rx_fc, 165 }; 166 167 return pep_indicate(sk, PNS_PIPE_CREATED_IND, 1 /* sub-blocks */, 168 data, 4, GFP_ATOMIC); 169 } 170 171 static int pep_accept_conn(struct sock *sk, struct sk_buff *skb) 172 { 173 static const u8 data[20] = { 174 PAD, PAD, PAD, 2 /* sub-blocks */, 175 PN_PIPE_SB_REQUIRED_FC_TX, pep_sb_size(5), 3, PAD, 176 PN_MULTI_CREDIT_FLOW_CONTROL, 177 PN_ONE_CREDIT_FLOW_CONTROL, 178 PN_LEGACY_FLOW_CONTROL, 179 PAD, 180 PN_PIPE_SB_PREFERRED_FC_RX, pep_sb_size(5), 3, PAD, 181 PN_MULTI_CREDIT_FLOW_CONTROL, 182 PN_ONE_CREDIT_FLOW_CONTROL, 183 PN_LEGACY_FLOW_CONTROL, 184 PAD, 185 }; 186 187 might_sleep(); 188 return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data), 189 GFP_KERNEL); 190 } 191 192 static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code, 193 gfp_t priority) 194 { 195 static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ }; 196 WARN_ON(code == PN_PIPE_NO_ERROR); 197 return pep_reply(sk, skb, code, data, sizeof(data), priority); 198 } 199 200 /* Control requests are not sent by the pipe service and have a specific 201 * message format. */ 202 static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code, 203 gfp_t priority) 204 { 205 const struct pnpipehdr *oph = pnp_hdr(oskb); 206 struct sk_buff *skb; 207 struct pnpipehdr *ph; 208 struct sockaddr_pn dst; 209 u8 data[4] = { 210 oph->data[0], /* PEP type */ 211 code, /* error code, at an unusual offset */ 212 PAD, PAD, 213 }; 214 215 skb = pep_alloc_skb(sk, data, 4, priority); 216 if (!skb) 217 return -ENOMEM; 218 219 ph = pnp_hdr(skb); 220 ph->utid = oph->utid; 221 ph->message_id = PNS_PEP_CTRL_RESP; 222 ph->pipe_handle = oph->pipe_handle; 223 ph->data[0] = oph->data[1]; /* CTRL id */ 224 225 pn_skb_get_src_sockaddr(oskb, &dst); 226 return pn_skb_send(sk, skb, &dst); 227 } 228 229 static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority) 230 { 231 u8 data[4] = { type, PAD, PAD, status }; 232 233 return pep_indicate(sk, PNS_PEP_STATUS_IND, PN_PEP_TYPE_COMMON, 234 data, 4, priority); 235 } 236 237 /* Send our RX flow control information to the sender. 238 * Socket must be locked. */ 239 static void pipe_grant_credits(struct sock *sk, gfp_t priority) 240 { 241 struct pep_sock *pn = pep_sk(sk); 242 243 BUG_ON(sk->sk_state != TCP_ESTABLISHED); 244 245 switch (pn->rx_fc) { 246 case PN_LEGACY_FLOW_CONTROL: /* TODO */ 247 break; 248 case PN_ONE_CREDIT_FLOW_CONTROL: 249 if (pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL, 250 PEP_IND_READY, priority) == 0) 251 pn->rx_credits = 1; 252 break; 253 case PN_MULTI_CREDIT_FLOW_CONTROL: 254 if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX) 255 break; 256 if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS, 257 CREDITS_MAX - pn->rx_credits, 258 priority) == 0) 259 pn->rx_credits = CREDITS_MAX; 260 break; 261 } 262 } 263 264 static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) 265 { 266 struct pep_sock *pn = pep_sk(sk); 267 struct pnpipehdr *hdr; 268 int wake = 0; 269 270 if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) 271 return -EINVAL; 272 273 hdr = pnp_hdr(skb); 274 if (hdr->data[0] != PN_PEP_TYPE_COMMON) { 275 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", 276 (unsigned int)hdr->data[0]); 277 return -EOPNOTSUPP; 278 } 279 280 switch (hdr->data[1]) { 281 case PN_PEP_IND_FLOW_CONTROL: 282 switch (pn->tx_fc) { 283 case PN_LEGACY_FLOW_CONTROL: 284 switch (hdr->data[4]) { 285 case PEP_IND_BUSY: 286 atomic_set(&pn->tx_credits, 0); 287 break; 288 case PEP_IND_READY: 289 atomic_set(&pn->tx_credits, wake = 1); 290 break; 291 } 292 break; 293 case PN_ONE_CREDIT_FLOW_CONTROL: 294 if (hdr->data[4] == PEP_IND_READY) 295 atomic_set(&pn->tx_credits, wake = 1); 296 break; 297 } 298 break; 299 300 case PN_PEP_IND_ID_MCFC_GRANT_CREDITS: 301 if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL) 302 break; 303 atomic_add(wake = hdr->data[4], &pn->tx_credits); 304 break; 305 306 default: 307 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n", 308 (unsigned int)hdr->data[1]); 309 return -EOPNOTSUPP; 310 } 311 if (wake) 312 sk->sk_write_space(sk); 313 return 0; 314 } 315 316 static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb) 317 { 318 struct pep_sock *pn = pep_sk(sk); 319 struct pnpipehdr *hdr = pnp_hdr(skb); 320 u8 n_sb = hdr->data[0]; 321 322 pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL; 323 __skb_pull(skb, sizeof(*hdr)); 324 while (n_sb > 0) { 325 u8 type, buf[2], len = sizeof(buf); 326 u8 *data = pep_get_sb(skb, &type, &len, buf); 327 328 if (data == NULL) 329 return -EINVAL; 330 switch (type) { 331 case PN_PIPE_SB_NEGOTIATED_FC: 332 if (len < 2 || (data[0] | data[1]) > 3) 333 break; 334 pn->tx_fc = data[0] & 3; 335 pn->rx_fc = data[1] & 3; 336 break; 337 } 338 n_sb--; 339 } 340 return 0; 341 } 342 343 /* Queue an skb to a connected sock. 344 * Socket lock must be held. */ 345 static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) 346 { 347 struct pep_sock *pn = pep_sk(sk); 348 struct pnpipehdr *hdr = pnp_hdr(skb); 349 struct sk_buff_head *queue; 350 int err = 0; 351 352 BUG_ON(sk->sk_state == TCP_CLOSE_WAIT); 353 354 switch (hdr->message_id) { 355 case PNS_PEP_CONNECT_REQ: 356 pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC); 357 break; 358 359 case PNS_PEP_DISCONNECT_REQ: 360 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); 361 sk->sk_state = TCP_CLOSE_WAIT; 362 if (!sock_flag(sk, SOCK_DEAD)) 363 sk->sk_state_change(sk); 364 break; 365 366 case PNS_PEP_ENABLE_REQ: 367 /* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */ 368 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); 369 break; 370 371 case PNS_PEP_RESET_REQ: 372 switch (hdr->state_after_reset) { 373 case PN_PIPE_DISABLE: 374 pn->init_enable = 0; 375 break; 376 case PN_PIPE_ENABLE: 377 pn->init_enable = 1; 378 break; 379 default: /* not allowed to send an error here!? */ 380 err = -EINVAL; 381 goto out; 382 } 383 /* fall through */ 384 case PNS_PEP_DISABLE_REQ: 385 atomic_set(&pn->tx_credits, 0); 386 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); 387 break; 388 389 case PNS_PEP_CTRL_REQ: 390 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) { 391 atomic_inc(&sk->sk_drops); 392 break; 393 } 394 __skb_pull(skb, 4); 395 queue = &pn->ctrlreq_queue; 396 goto queue; 397 398 case PNS_PIPE_ALIGNED_DATA: 399 __skb_pull(skb, 1); 400 /* fall through */ 401 case PNS_PIPE_DATA: 402 __skb_pull(skb, 3); /* Pipe data header */ 403 if (!pn_flow_safe(pn->rx_fc)) { 404 err = sock_queue_rcv_skb(sk, skb); 405 if (!err) 406 return NET_RX_SUCCESS; 407 err = -ENOBUFS; 408 break; 409 } 410 411 if (pn->rx_credits == 0) { 412 atomic_inc(&sk->sk_drops); 413 err = -ENOBUFS; 414 break; 415 } 416 pn->rx_credits--; 417 queue = &sk->sk_receive_queue; 418 goto queue; 419 420 case PNS_PEP_STATUS_IND: 421 pipe_rcv_status(sk, skb); 422 break; 423 424 case PNS_PIPE_REDIRECTED_IND: 425 err = pipe_rcv_created(sk, skb); 426 break; 427 428 case PNS_PIPE_CREATED_IND: 429 err = pipe_rcv_created(sk, skb); 430 if (err) 431 break; 432 /* fall through */ 433 case PNS_PIPE_RESET_IND: 434 if (!pn->init_enable) 435 break; 436 /* fall through */ 437 case PNS_PIPE_ENABLED_IND: 438 if (!pn_flow_safe(pn->tx_fc)) { 439 atomic_set(&pn->tx_credits, 1); 440 sk->sk_write_space(sk); 441 } 442 if (sk->sk_state == TCP_ESTABLISHED) 443 break; /* Nothing to do */ 444 sk->sk_state = TCP_ESTABLISHED; 445 pipe_grant_credits(sk, GFP_ATOMIC); 446 break; 447 448 case PNS_PIPE_DISABLED_IND: 449 sk->sk_state = TCP_SYN_RECV; 450 pn->rx_credits = 0; 451 break; 452 453 default: 454 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP message: %u\n", 455 hdr->message_id); 456 err = -EINVAL; 457 } 458 out: 459 kfree_skb(skb); 460 return (err == -ENOBUFS) ? NET_RX_DROP : NET_RX_SUCCESS; 461 462 queue: 463 skb->dev = NULL; 464 skb_set_owner_r(skb, sk); 465 err = skb->len; 466 skb_queue_tail(queue, skb); 467 if (!sock_flag(sk, SOCK_DEAD)) 468 sk->sk_data_ready(sk, err); 469 return NET_RX_SUCCESS; 470 } 471 472 /* Destroy connected sock. */ 473 static void pipe_destruct(struct sock *sk) 474 { 475 struct pep_sock *pn = pep_sk(sk); 476 477 skb_queue_purge(&sk->sk_receive_queue); 478 skb_queue_purge(&pn->ctrlreq_queue); 479 } 480 481 static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n) 482 { 483 unsigned int i; 484 u8 final_fc = PN_NO_FLOW_CONTROL; 485 486 for (i = 0; i < n; i++) { 487 u8 fc = fcs[i]; 488 489 if (fc > final_fc && fc < PN_MAX_FLOW_CONTROL) 490 final_fc = fc; 491 } 492 return final_fc; 493 } 494 495 static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb) 496 { 497 struct pep_sock *pn = pep_sk(sk); 498 struct pnpipehdr *hdr; 499 u8 n_sb; 500 501 if (!pskb_pull(skb, sizeof(*hdr) + 4)) 502 return -EINVAL; 503 504 hdr = pnp_hdr(skb); 505 if (hdr->error_code != PN_PIPE_NO_ERROR) 506 return -ECONNREFUSED; 507 508 /* Parse sub-blocks */ 509 n_sb = hdr->data[4]; 510 while (n_sb > 0) { 511 u8 type, buf[6], len = sizeof(buf); 512 const u8 *data = pep_get_sb(skb, &type, &len, buf); 513 514 if (data == NULL) 515 return -EINVAL; 516 517 switch (type) { 518 case PN_PIPE_SB_REQUIRED_FC_TX: 519 if (len < 2 || len < data[0]) 520 break; 521 pn->tx_fc = pipe_negotiate_fc(data + 2, len - 2); 522 break; 523 524 case PN_PIPE_SB_PREFERRED_FC_RX: 525 if (len < 2 || len < data[0]) 526 break; 527 pn->rx_fc = pipe_negotiate_fc(data + 2, len - 2); 528 break; 529 530 } 531 n_sb--; 532 } 533 534 return pipe_handler_send_created_ind(sk); 535 } 536 537 static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb) 538 { 539 struct pnpipehdr *hdr = pnp_hdr(skb); 540 541 if (hdr->error_code != PN_PIPE_NO_ERROR) 542 return -ECONNREFUSED; 543 544 return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */, 545 NULL, 0, GFP_ATOMIC); 546 547 } 548 549 static void pipe_start_flow_control(struct sock *sk) 550 { 551 struct pep_sock *pn = pep_sk(sk); 552 553 if (!pn_flow_safe(pn->tx_fc)) { 554 atomic_set(&pn->tx_credits, 1); 555 sk->sk_write_space(sk); 556 } 557 pipe_grant_credits(sk, GFP_ATOMIC); 558 } 559 560 /* Queue an skb to an actively connected sock. 561 * Socket lock must be held. */ 562 static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb) 563 { 564 struct pep_sock *pn = pep_sk(sk); 565 struct pnpipehdr *hdr = pnp_hdr(skb); 566 int err = NET_RX_SUCCESS; 567 568 switch (hdr->message_id) { 569 case PNS_PIPE_ALIGNED_DATA: 570 __skb_pull(skb, 1); 571 /* fall through */ 572 case PNS_PIPE_DATA: 573 __skb_pull(skb, 3); /* Pipe data header */ 574 if (!pn_flow_safe(pn->rx_fc)) { 575 err = sock_queue_rcv_skb(sk, skb); 576 if (!err) 577 return NET_RX_SUCCESS; 578 err = NET_RX_DROP; 579 break; 580 } 581 582 if (pn->rx_credits == 0) { 583 atomic_inc(&sk->sk_drops); 584 err = NET_RX_DROP; 585 break; 586 } 587 pn->rx_credits--; 588 skb->dev = NULL; 589 skb_set_owner_r(skb, sk); 590 err = skb->len; 591 skb_queue_tail(&sk->sk_receive_queue, skb); 592 if (!sock_flag(sk, SOCK_DEAD)) 593 sk->sk_data_ready(sk, err); 594 return NET_RX_SUCCESS; 595 596 case PNS_PEP_CONNECT_RESP: 597 if (sk->sk_state != TCP_SYN_SENT) 598 break; 599 if (!sock_flag(sk, SOCK_DEAD)) 600 sk->sk_state_change(sk); 601 if (pep_connresp_rcv(sk, skb)) { 602 sk->sk_state = TCP_CLOSE_WAIT; 603 break; 604 } 605 if (pn->init_enable == PN_PIPE_DISABLE) 606 sk->sk_state = TCP_SYN_RECV; 607 else { 608 sk->sk_state = TCP_ESTABLISHED; 609 pipe_start_flow_control(sk); 610 } 611 break; 612 613 case PNS_PEP_ENABLE_RESP: 614 if (sk->sk_state != TCP_SYN_SENT) 615 break; 616 617 if (pep_enableresp_rcv(sk, skb)) { 618 sk->sk_state = TCP_CLOSE_WAIT; 619 break; 620 } 621 622 sk->sk_state = TCP_ESTABLISHED; 623 pipe_start_flow_control(sk); 624 break; 625 626 case PNS_PEP_DISCONNECT_RESP: 627 /* sock should already be dead, nothing to do */ 628 break; 629 630 case PNS_PEP_STATUS_IND: 631 pipe_rcv_status(sk, skb); 632 break; 633 } 634 kfree_skb(skb); 635 return err; 636 } 637 638 /* Listening sock must be locked */ 639 static struct sock *pep_find_pipe(const struct hlist_head *hlist, 640 const struct sockaddr_pn *dst, 641 u8 pipe_handle) 642 { 643 struct hlist_node *node; 644 struct sock *sknode; 645 u16 dobj = pn_sockaddr_get_object(dst); 646 647 sk_for_each(sknode, node, hlist) { 648 struct pep_sock *pnnode = pep_sk(sknode); 649 650 /* Ports match, but addresses might not: */ 651 if (pnnode->pn_sk.sobject != dobj) 652 continue; 653 if (pnnode->pipe_handle != pipe_handle) 654 continue; 655 if (sknode->sk_state == TCP_CLOSE_WAIT) 656 continue; 657 658 sock_hold(sknode); 659 return sknode; 660 } 661 return NULL; 662 } 663 664 /* 665 * Deliver an skb to a listening sock. 666 * Socket lock must be held. 667 * We then queue the skb to the right connected sock (if any). 668 */ 669 static int pep_do_rcv(struct sock *sk, struct sk_buff *skb) 670 { 671 struct pep_sock *pn = pep_sk(sk); 672 struct sock *sknode; 673 struct pnpipehdr *hdr; 674 struct sockaddr_pn dst; 675 u8 pipe_handle; 676 677 if (!pskb_may_pull(skb, sizeof(*hdr))) 678 goto drop; 679 680 hdr = pnp_hdr(skb); 681 pipe_handle = hdr->pipe_handle; 682 if (pipe_handle == PN_PIPE_INVALID_HANDLE) 683 goto drop; 684 685 pn_skb_get_dst_sockaddr(skb, &dst); 686 687 /* Look for an existing pipe handle */ 688 sknode = pep_find_pipe(&pn->hlist, &dst, pipe_handle); 689 if (sknode) 690 return sk_receive_skb(sknode, skb, 1); 691 692 switch (hdr->message_id) { 693 case PNS_PEP_CONNECT_REQ: 694 if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) { 695 pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, 696 GFP_ATOMIC); 697 break; 698 } 699 skb_queue_head(&sk->sk_receive_queue, skb); 700 sk_acceptq_added(sk); 701 if (!sock_flag(sk, SOCK_DEAD)) 702 sk->sk_data_ready(sk, 0); 703 return NET_RX_SUCCESS; 704 705 case PNS_PEP_DISCONNECT_REQ: 706 pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC); 707 break; 708 709 case PNS_PEP_CTRL_REQ: 710 pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC); 711 break; 712 713 case PNS_PEP_RESET_REQ: 714 case PNS_PEP_ENABLE_REQ: 715 case PNS_PEP_DISABLE_REQ: 716 /* invalid handle is not even allowed here! */ 717 break; 718 719 default: 720 if ((1 << sk->sk_state) 721 & ~(TCPF_CLOSE|TCPF_LISTEN|TCPF_CLOSE_WAIT)) 722 /* actively connected socket */ 723 return pipe_handler_do_rcv(sk, skb); 724 } 725 drop: 726 kfree_skb(skb); 727 return NET_RX_SUCCESS; 728 } 729 730 static int pipe_do_remove(struct sock *sk) 731 { 732 struct pep_sock *pn = pep_sk(sk); 733 struct pnpipehdr *ph; 734 struct sk_buff *skb; 735 736 skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL); 737 if (!skb) 738 return -ENOMEM; 739 740 ph = pnp_hdr(skb); 741 ph->utid = 0; 742 ph->message_id = PNS_PIPE_REMOVE_REQ; 743 ph->pipe_handle = pn->pipe_handle; 744 ph->data[0] = PAD; 745 return pn_skb_send(sk, skb, NULL); 746 } 747 748 /* associated socket ceases to exist */ 749 static void pep_sock_close(struct sock *sk, long timeout) 750 { 751 struct pep_sock *pn = pep_sk(sk); 752 int ifindex = 0; 753 754 sock_hold(sk); /* keep a reference after sk_common_release() */ 755 sk_common_release(sk); 756 757 lock_sock(sk); 758 if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) { 759 if (sk->sk_backlog_rcv == pipe_do_rcv) 760 /* Forcefully remove dangling Phonet pipe */ 761 pipe_do_remove(sk); 762 else 763 pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD, 764 NULL, 0); 765 } 766 sk->sk_state = TCP_CLOSE; 767 768 ifindex = pn->ifindex; 769 pn->ifindex = 0; 770 release_sock(sk); 771 772 if (ifindex) 773 gprs_detach(sk); 774 sock_put(sk); 775 } 776 777 static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp) 778 { 779 struct pep_sock *pn = pep_sk(sk), *newpn; 780 struct sock *newsk = NULL; 781 struct sk_buff *skb; 782 struct pnpipehdr *hdr; 783 struct sockaddr_pn dst, src; 784 int err; 785 u16 peer_type; 786 u8 pipe_handle, enabled, n_sb; 787 u8 aligned = 0; 788 789 skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp); 790 if (!skb) 791 return NULL; 792 793 lock_sock(sk); 794 if (sk->sk_state != TCP_LISTEN) { 795 err = -EINVAL; 796 goto drop; 797 } 798 sk_acceptq_removed(sk); 799 800 err = -EPROTO; 801 if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) 802 goto drop; 803 804 hdr = pnp_hdr(skb); 805 pipe_handle = hdr->pipe_handle; 806 switch (hdr->state_after_connect) { 807 case PN_PIPE_DISABLE: 808 enabled = 0; 809 break; 810 case PN_PIPE_ENABLE: 811 enabled = 1; 812 break; 813 default: 814 pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM, 815 GFP_KERNEL); 816 goto drop; 817 } 818 peer_type = hdr->other_pep_type << 8; 819 820 /* Parse sub-blocks (options) */ 821 n_sb = hdr->data[4]; 822 while (n_sb > 0) { 823 u8 type, buf[1], len = sizeof(buf); 824 const u8 *data = pep_get_sb(skb, &type, &len, buf); 825 826 if (data == NULL) 827 goto drop; 828 switch (type) { 829 case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE: 830 if (len < 1) 831 goto drop; 832 peer_type = (peer_type & 0xff00) | data[0]; 833 break; 834 case PN_PIPE_SB_ALIGNED_DATA: 835 aligned = data[0] != 0; 836 break; 837 } 838 n_sb--; 839 } 840 841 /* Check for duplicate pipe handle */ 842 newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle); 843 if (unlikely(newsk)) { 844 __sock_put(newsk); 845 newsk = NULL; 846 pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL); 847 goto drop; 848 } 849 850 /* Create a new to-be-accepted sock */ 851 newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot); 852 if (!newsk) { 853 pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL); 854 err = -ENOBUFS; 855 goto drop; 856 } 857 858 sock_init_data(NULL, newsk); 859 newsk->sk_state = TCP_SYN_RECV; 860 newsk->sk_backlog_rcv = pipe_do_rcv; 861 newsk->sk_protocol = sk->sk_protocol; 862 newsk->sk_destruct = pipe_destruct; 863 864 newpn = pep_sk(newsk); 865 pn_skb_get_dst_sockaddr(skb, &dst); 866 pn_skb_get_src_sockaddr(skb, &src); 867 newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst); 868 newpn->pn_sk.dobject = pn_sockaddr_get_object(&src); 869 newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst); 870 sock_hold(sk); 871 newpn->listener = sk; 872 skb_queue_head_init(&newpn->ctrlreq_queue); 873 newpn->pipe_handle = pipe_handle; 874 atomic_set(&newpn->tx_credits, 0); 875 newpn->ifindex = 0; 876 newpn->peer_type = peer_type; 877 newpn->rx_credits = 0; 878 newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL; 879 newpn->init_enable = enabled; 880 newpn->aligned = aligned; 881 882 err = pep_accept_conn(newsk, skb); 883 if (err) { 884 sock_put(newsk); 885 newsk = NULL; 886 goto drop; 887 } 888 sk_add_node(newsk, &pn->hlist); 889 drop: 890 release_sock(sk); 891 kfree_skb(skb); 892 *errp = err; 893 return newsk; 894 } 895 896 static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len) 897 { 898 struct pep_sock *pn = pep_sk(sk); 899 int err; 900 u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD }; 901 902 if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE) 903 pn->pipe_handle = 1; /* anything but INVALID_HANDLE */ 904 905 err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ, 906 pn->init_enable, data, 4); 907 if (err) { 908 pn->pipe_handle = PN_PIPE_INVALID_HANDLE; 909 return err; 910 } 911 912 sk->sk_state = TCP_SYN_SENT; 913 914 return 0; 915 } 916 917 static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len) 918 { 919 int err; 920 921 err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD, 922 NULL, 0); 923 if (err) 924 return err; 925 926 sk->sk_state = TCP_SYN_SENT; 927 928 return 0; 929 } 930 931 static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg) 932 { 933 struct pep_sock *pn = pep_sk(sk); 934 int answ; 935 int ret = -ENOIOCTLCMD; 936 937 switch (cmd) { 938 case SIOCINQ: 939 if (sk->sk_state == TCP_LISTEN) { 940 ret = -EINVAL; 941 break; 942 } 943 944 lock_sock(sk); 945 if (sock_flag(sk, SOCK_URGINLINE) && 946 !skb_queue_empty(&pn->ctrlreq_queue)) 947 answ = skb_peek(&pn->ctrlreq_queue)->len; 948 else if (!skb_queue_empty(&sk->sk_receive_queue)) 949 answ = skb_peek(&sk->sk_receive_queue)->len; 950 else 951 answ = 0; 952 release_sock(sk); 953 ret = put_user(answ, (int __user *)arg); 954 break; 955 956 case SIOCPNENABLEPIPE: 957 lock_sock(sk); 958 if (sk->sk_state == TCP_SYN_SENT) 959 ret = -EBUSY; 960 else if (sk->sk_state == TCP_ESTABLISHED) 961 ret = -EISCONN; 962 else 963 ret = pep_sock_enable(sk, NULL, 0); 964 release_sock(sk); 965 break; 966 } 967 968 return ret; 969 } 970 971 static int pep_init(struct sock *sk) 972 { 973 struct pep_sock *pn = pep_sk(sk); 974 975 sk->sk_destruct = pipe_destruct; 976 INIT_HLIST_HEAD(&pn->hlist); 977 pn->listener = NULL; 978 skb_queue_head_init(&pn->ctrlreq_queue); 979 atomic_set(&pn->tx_credits, 0); 980 pn->ifindex = 0; 981 pn->peer_type = 0; 982 pn->pipe_handle = PN_PIPE_INVALID_HANDLE; 983 pn->rx_credits = 0; 984 pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL; 985 pn->init_enable = 1; 986 pn->aligned = 0; 987 return 0; 988 } 989 990 static int pep_setsockopt(struct sock *sk, int level, int optname, 991 char __user *optval, unsigned int optlen) 992 { 993 struct pep_sock *pn = pep_sk(sk); 994 int val = 0, err = 0; 995 996 if (level != SOL_PNPIPE) 997 return -ENOPROTOOPT; 998 if (optlen >= sizeof(int)) { 999 if (get_user(val, (int __user *) optval)) 1000 return -EFAULT; 1001 } 1002 1003 lock_sock(sk); 1004 switch (optname) { 1005 case PNPIPE_ENCAP: 1006 if (val && val != PNPIPE_ENCAP_IP) { 1007 err = -EINVAL; 1008 break; 1009 } 1010 if (!pn->ifindex == !val) 1011 break; /* Nothing to do! */ 1012 if (!capable(CAP_NET_ADMIN)) { 1013 err = -EPERM; 1014 break; 1015 } 1016 if (val) { 1017 release_sock(sk); 1018 err = gprs_attach(sk); 1019 if (err > 0) { 1020 pn->ifindex = err; 1021 err = 0; 1022 } 1023 } else { 1024 pn->ifindex = 0; 1025 release_sock(sk); 1026 gprs_detach(sk); 1027 err = 0; 1028 } 1029 goto out_norel; 1030 1031 case PNPIPE_HANDLE: 1032 if ((sk->sk_state == TCP_CLOSE) && 1033 (val >= 0) && (val < PN_PIPE_INVALID_HANDLE)) 1034 pn->pipe_handle = val; 1035 else 1036 err = -EINVAL; 1037 break; 1038 1039 case PNPIPE_INITSTATE: 1040 pn->init_enable = !!val; 1041 break; 1042 1043 default: 1044 err = -ENOPROTOOPT; 1045 } 1046 release_sock(sk); 1047 1048 out_norel: 1049 return err; 1050 } 1051 1052 static int pep_getsockopt(struct sock *sk, int level, int optname, 1053 char __user *optval, int __user *optlen) 1054 { 1055 struct pep_sock *pn = pep_sk(sk); 1056 int len, val; 1057 1058 if (level != SOL_PNPIPE) 1059 return -ENOPROTOOPT; 1060 if (get_user(len, optlen)) 1061 return -EFAULT; 1062 1063 switch (optname) { 1064 case PNPIPE_ENCAP: 1065 val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE; 1066 break; 1067 1068 case PNPIPE_IFINDEX: 1069 val = pn->ifindex; 1070 break; 1071 1072 case PNPIPE_HANDLE: 1073 val = pn->pipe_handle; 1074 if (val == PN_PIPE_INVALID_HANDLE) 1075 return -EINVAL; 1076 break; 1077 1078 case PNPIPE_INITSTATE: 1079 val = pn->init_enable; 1080 break; 1081 1082 default: 1083 return -ENOPROTOOPT; 1084 } 1085 1086 len = min_t(unsigned int, sizeof(int), len); 1087 if (put_user(len, optlen)) 1088 return -EFAULT; 1089 if (put_user(val, (int __user *) optval)) 1090 return -EFAULT; 1091 return 0; 1092 } 1093 1094 static int pipe_skb_send(struct sock *sk, struct sk_buff *skb) 1095 { 1096 struct pep_sock *pn = pep_sk(sk); 1097 struct pnpipehdr *ph; 1098 int err; 1099 1100 if (pn_flow_safe(pn->tx_fc) && 1101 !atomic_add_unless(&pn->tx_credits, -1, 0)) { 1102 kfree_skb(skb); 1103 return -ENOBUFS; 1104 } 1105 1106 skb_push(skb, 3 + pn->aligned); 1107 skb_reset_transport_header(skb); 1108 ph = pnp_hdr(skb); 1109 ph->utid = 0; 1110 if (pn->aligned) { 1111 ph->message_id = PNS_PIPE_ALIGNED_DATA; 1112 ph->data[0] = 0; /* padding */ 1113 } else 1114 ph->message_id = PNS_PIPE_DATA; 1115 ph->pipe_handle = pn->pipe_handle; 1116 err = pn_skb_send(sk, skb, NULL); 1117 1118 if (err && pn_flow_safe(pn->tx_fc)) 1119 atomic_inc(&pn->tx_credits); 1120 return err; 1121 1122 } 1123 1124 static int pep_sendmsg(struct kiocb *iocb, struct sock *sk, 1125 struct msghdr *msg, size_t len) 1126 { 1127 struct pep_sock *pn = pep_sk(sk); 1128 struct sk_buff *skb; 1129 long timeo; 1130 int flags = msg->msg_flags; 1131 int err, done; 1132 1133 if (len > USHRT_MAX) 1134 return -EMSGSIZE; 1135 1136 if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL| 1137 MSG_CMSG_COMPAT)) || 1138 !(msg->msg_flags & MSG_EOR)) 1139 return -EOPNOTSUPP; 1140 1141 skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len, 1142 flags & MSG_DONTWAIT, &err); 1143 if (!skb) 1144 return err; 1145 1146 skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned); 1147 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 1148 if (err < 0) 1149 goto outfree; 1150 1151 lock_sock(sk); 1152 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1153 if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) { 1154 err = -ENOTCONN; 1155 goto out; 1156 } 1157 if (sk->sk_state != TCP_ESTABLISHED) { 1158 /* Wait until the pipe gets to enabled state */ 1159 disabled: 1160 err = sk_stream_wait_connect(sk, &timeo); 1161 if (err) 1162 goto out; 1163 1164 if (sk->sk_state == TCP_CLOSE_WAIT) { 1165 err = -ECONNRESET; 1166 goto out; 1167 } 1168 } 1169 BUG_ON(sk->sk_state != TCP_ESTABLISHED); 1170 1171 /* Wait until flow control allows TX */ 1172 done = atomic_read(&pn->tx_credits); 1173 while (!done) { 1174 DEFINE_WAIT(wait); 1175 1176 if (!timeo) { 1177 err = -EAGAIN; 1178 goto out; 1179 } 1180 if (signal_pending(current)) { 1181 err = sock_intr_errno(timeo); 1182 goto out; 1183 } 1184 1185 prepare_to_wait(sk_sleep(sk), &wait, 1186 TASK_INTERRUPTIBLE); 1187 done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits)); 1188 finish_wait(sk_sleep(sk), &wait); 1189 1190 if (sk->sk_state != TCP_ESTABLISHED) 1191 goto disabled; 1192 } 1193 1194 err = pipe_skb_send(sk, skb); 1195 if (err >= 0) 1196 err = len; /* success! */ 1197 skb = NULL; 1198 out: 1199 release_sock(sk); 1200 outfree: 1201 kfree_skb(skb); 1202 return err; 1203 } 1204 1205 int pep_writeable(struct sock *sk) 1206 { 1207 struct pep_sock *pn = pep_sk(sk); 1208 1209 return atomic_read(&pn->tx_credits); 1210 } 1211 1212 int pep_write(struct sock *sk, struct sk_buff *skb) 1213 { 1214 struct sk_buff *rskb, *fs; 1215 int flen = 0; 1216 1217 if (pep_sk(sk)->aligned) 1218 return pipe_skb_send(sk, skb); 1219 1220 rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC); 1221 if (!rskb) { 1222 kfree_skb(skb); 1223 return -ENOMEM; 1224 } 1225 skb_shinfo(rskb)->frag_list = skb; 1226 rskb->len += skb->len; 1227 rskb->data_len += rskb->len; 1228 rskb->truesize += rskb->len; 1229 1230 /* Avoid nested fragments */ 1231 skb_walk_frags(skb, fs) 1232 flen += fs->len; 1233 skb->next = skb_shinfo(skb)->frag_list; 1234 skb_frag_list_init(skb); 1235 skb->len -= flen; 1236 skb->data_len -= flen; 1237 skb->truesize -= flen; 1238 1239 skb_reserve(rskb, MAX_PHONET_HEADER + 3); 1240 return pipe_skb_send(sk, rskb); 1241 } 1242 1243 struct sk_buff *pep_read(struct sock *sk) 1244 { 1245 struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue); 1246 1247 if (sk->sk_state == TCP_ESTABLISHED) 1248 pipe_grant_credits(sk, GFP_ATOMIC); 1249 return skb; 1250 } 1251 1252 static int pep_recvmsg(struct kiocb *iocb, struct sock *sk, 1253 struct msghdr *msg, size_t len, int noblock, 1254 int flags, int *addr_len) 1255 { 1256 struct sk_buff *skb; 1257 int err; 1258 1259 if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL| 1260 MSG_NOSIGNAL|MSG_CMSG_COMPAT)) 1261 return -EOPNOTSUPP; 1262 1263 if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE))) 1264 return -ENOTCONN; 1265 1266 if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) { 1267 /* Dequeue and acknowledge control request */ 1268 struct pep_sock *pn = pep_sk(sk); 1269 1270 if (flags & MSG_PEEK) 1271 return -EOPNOTSUPP; 1272 skb = skb_dequeue(&pn->ctrlreq_queue); 1273 if (skb) { 1274 pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR, 1275 GFP_KERNEL); 1276 msg->msg_flags |= MSG_OOB; 1277 goto copy; 1278 } 1279 if (flags & MSG_OOB) 1280 return -EINVAL; 1281 } 1282 1283 skb = skb_recv_datagram(sk, flags, noblock, &err); 1284 lock_sock(sk); 1285 if (skb == NULL) { 1286 if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT) 1287 err = -ECONNRESET; 1288 release_sock(sk); 1289 return err; 1290 } 1291 1292 if (sk->sk_state == TCP_ESTABLISHED) 1293 pipe_grant_credits(sk, GFP_KERNEL); 1294 release_sock(sk); 1295 copy: 1296 msg->msg_flags |= MSG_EOR; 1297 if (skb->len > len) 1298 msg->msg_flags |= MSG_TRUNC; 1299 else 1300 len = skb->len; 1301 1302 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); 1303 if (!err) 1304 err = (flags & MSG_TRUNC) ? skb->len : len; 1305 1306 skb_free_datagram(sk, skb); 1307 return err; 1308 } 1309 1310 static void pep_sock_unhash(struct sock *sk) 1311 { 1312 struct pep_sock *pn = pep_sk(sk); 1313 struct sock *skparent = NULL; 1314 1315 lock_sock(sk); 1316 1317 if (pn->listener != NULL) { 1318 skparent = pn->listener; 1319 pn->listener = NULL; 1320 release_sock(sk); 1321 1322 pn = pep_sk(skparent); 1323 lock_sock(skparent); 1324 sk_del_node_init(sk); 1325 sk = skparent; 1326 } 1327 1328 /* Unhash a listening sock only when it is closed 1329 * and all of its active connected pipes are closed. */ 1330 if (hlist_empty(&pn->hlist)) 1331 pn_sock_unhash(&pn->pn_sk.sk); 1332 release_sock(sk); 1333 1334 if (skparent) 1335 sock_put(skparent); 1336 } 1337 1338 static struct proto pep_proto = { 1339 .close = pep_sock_close, 1340 .accept = pep_sock_accept, 1341 .connect = pep_sock_connect, 1342 .ioctl = pep_ioctl, 1343 .init = pep_init, 1344 .setsockopt = pep_setsockopt, 1345 .getsockopt = pep_getsockopt, 1346 .sendmsg = pep_sendmsg, 1347 .recvmsg = pep_recvmsg, 1348 .backlog_rcv = pep_do_rcv, 1349 .hash = pn_sock_hash, 1350 .unhash = pep_sock_unhash, 1351 .get_port = pn_sock_get_port, 1352 .obj_size = sizeof(struct pep_sock), 1353 .owner = THIS_MODULE, 1354 .name = "PNPIPE", 1355 }; 1356 1357 static struct phonet_protocol pep_pn_proto = { 1358 .ops = &phonet_stream_ops, 1359 .prot = &pep_proto, 1360 .sock_type = SOCK_SEQPACKET, 1361 }; 1362 1363 static int __init pep_register(void) 1364 { 1365 return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto); 1366 } 1367 1368 static void __exit pep_unregister(void) 1369 { 1370 phonet_proto_unregister(PN_PROTO_PIPE, &pep_pn_proto); 1371 } 1372 1373 module_init(pep_register); 1374 module_exit(pep_unregister); 1375 MODULE_AUTHOR("Remi Denis-Courmont, Nokia"); 1376 MODULE_DESCRIPTION("Phonet pipe protocol"); 1377 MODULE_LICENSE("GPL"); 1378 MODULE_ALIAS_NET_PF_PROTO(PF_PHONET, PN_PROTO_PIPE); 1379