xref: /linux/net/phonet/pep.c (revision 60e13231561b3a4c5269bfa1ef6c0569ad6f28ec)
1 /*
2  * File: pep.c
3  *
4  * Phonet pipe protocol end point socket
5  *
6  * Copyright (C) 2008 Nokia Corporation.
7  *
8  * Author: Rémi Denis-Courmont <remi.denis-courmont@nokia.com>
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * version 2 as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22  * 02110-1301 USA
23  */
24 
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/socket.h>
28 #include <net/sock.h>
29 #include <net/tcp_states.h>
30 #include <asm/ioctls.h>
31 
32 #include <linux/phonet.h>
33 #include <net/phonet/phonet.h>
34 #include <net/phonet/pep.h>
35 #include <net/phonet/gprs.h>
36 
37 /* sk_state values:
38  * TCP_CLOSE		sock not in use yet
39  * TCP_CLOSE_WAIT	disconnected pipe
40  * TCP_LISTEN		listening pipe endpoint
41  * TCP_SYN_RECV		connected pipe in disabled state
42  * TCP_ESTABLISHED	connected pipe in enabled state
43  *
44  * pep_sock locking:
45  *  - sk_state, hlist: sock lock needed
46  *  - listener: read only
47  *  - pipe_handle: read only
48  */
49 
50 #define CREDITS_MAX	10
51 #define CREDITS_THR	7
52 
53 #define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */
54 
55 /* Get the next TLV sub-block. */
56 static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen,
57 					void *buf)
58 {
59 	void *data = NULL;
60 	struct {
61 		u8 sb_type;
62 		u8 sb_len;
63 	} *ph, h;
64 	int buflen = *plen;
65 
66 	ph = skb_header_pointer(skb, 0, 2, &h);
67 	if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len))
68 		return NULL;
69 	ph->sb_len -= 2;
70 	*ptype = ph->sb_type;
71 	*plen = ph->sb_len;
72 
73 	if (buflen > ph->sb_len)
74 		buflen = ph->sb_len;
75 	data = skb_header_pointer(skb, 2, buflen, buf);
76 	__skb_pull(skb, 2 + ph->sb_len);
77 	return data;
78 }
79 
80 static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload,
81 					int len, gfp_t priority)
82 {
83 	struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
84 	if (!skb)
85 		return NULL;
86 	skb_set_owner_w(skb, sk);
87 
88 	skb_reserve(skb, MAX_PNPIPE_HEADER);
89 	__skb_put(skb, len);
90 	skb_copy_to_linear_data(skb, payload, len);
91 	__skb_push(skb, sizeof(struct pnpipehdr));
92 	skb_reset_transport_header(skb);
93 	return skb;
94 }
95 
96 static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code,
97 			const void *data, int len, gfp_t priority)
98 {
99 	const struct pnpipehdr *oph = pnp_hdr(oskb);
100 	struct pnpipehdr *ph;
101 	struct sk_buff *skb;
102 	struct sockaddr_pn peer;
103 
104 	skb = pep_alloc_skb(sk, data, len, priority);
105 	if (!skb)
106 		return -ENOMEM;
107 
108 	ph = pnp_hdr(skb);
109 	ph->utid = oph->utid;
110 	ph->message_id = oph->message_id + 1; /* REQ -> RESP */
111 	ph->pipe_handle = oph->pipe_handle;
112 	ph->error_code = code;
113 
114 	pn_skb_get_src_sockaddr(oskb, &peer);
115 	return pn_skb_send(sk, skb, &peer);
116 }
117 
118 static int pep_indicate(struct sock *sk, u8 id, u8 code,
119 			const void *data, int len, gfp_t priority)
120 {
121 	struct pep_sock *pn = pep_sk(sk);
122 	struct pnpipehdr *ph;
123 	struct sk_buff *skb;
124 
125 	skb = pep_alloc_skb(sk, data, len, priority);
126 	if (!skb)
127 		return -ENOMEM;
128 
129 	ph = pnp_hdr(skb);
130 	ph->utid = 0;
131 	ph->message_id = id;
132 	ph->pipe_handle = pn->pipe_handle;
133 	ph->data[0] = code;
134 	return pn_skb_send(sk, skb, NULL);
135 }
136 
137 #define PAD 0x00
138 
139 static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
140 				const void *data, int len)
141 {
142 	struct pep_sock *pn = pep_sk(sk);
143 	struct pnpipehdr *ph;
144 	struct sk_buff *skb;
145 
146 	skb = pep_alloc_skb(sk, data, len, GFP_KERNEL);
147 	if (!skb)
148 		return -ENOMEM;
149 
150 	ph = pnp_hdr(skb);
151 	ph->utid = id; /* whatever */
152 	ph->message_id = id;
153 	ph->pipe_handle = pn->pipe_handle;
154 	ph->data[0] = code;
155 	return pn_skb_send(sk, skb, NULL);
156 }
157 
158 static int pipe_handler_send_created_ind(struct sock *sk)
159 {
160 	struct pep_sock *pn = pep_sk(sk);
161 	u8 data[4] = {
162 		PN_PIPE_SB_NEGOTIATED_FC, pep_sb_size(2),
163 		pn->tx_fc, pn->rx_fc,
164 	};
165 
166 	return pep_indicate(sk, PNS_PIPE_CREATED_IND, 1 /* sub-blocks */,
167 				data, 4, GFP_ATOMIC);
168 }
169 
170 static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
171 {
172 	static const u8 data[20] = {
173 		PAD, PAD, PAD, 2 /* sub-blocks */,
174 		PN_PIPE_SB_REQUIRED_FC_TX, pep_sb_size(5), 3, PAD,
175 			PN_MULTI_CREDIT_FLOW_CONTROL,
176 			PN_ONE_CREDIT_FLOW_CONTROL,
177 			PN_LEGACY_FLOW_CONTROL,
178 			PAD,
179 		PN_PIPE_SB_PREFERRED_FC_RX, pep_sb_size(5), 3, PAD,
180 			PN_MULTI_CREDIT_FLOW_CONTROL,
181 			PN_ONE_CREDIT_FLOW_CONTROL,
182 			PN_LEGACY_FLOW_CONTROL,
183 			PAD,
184 	};
185 
186 	might_sleep();
187 	return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data),
188 				GFP_KERNEL);
189 }
190 
191 static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code,
192 				gfp_t priority)
193 {
194 	static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ };
195 	WARN_ON(code == PN_PIPE_NO_ERROR);
196 	return pep_reply(sk, skb, code, data, sizeof(data), priority);
197 }
198 
199 /* Control requests are not sent by the pipe service and have a specific
200  * message format. */
201 static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
202 				gfp_t priority)
203 {
204 	const struct pnpipehdr *oph = pnp_hdr(oskb);
205 	struct sk_buff *skb;
206 	struct pnpipehdr *ph;
207 	struct sockaddr_pn dst;
208 	u8 data[4] = {
209 		oph->data[0], /* PEP type */
210 		code, /* error code, at an unusual offset */
211 		PAD, PAD,
212 	};
213 
214 	skb = pep_alloc_skb(sk, data, 4, priority);
215 	if (!skb)
216 		return -ENOMEM;
217 
218 	ph = pnp_hdr(skb);
219 	ph->utid = oph->utid;
220 	ph->message_id = PNS_PEP_CTRL_RESP;
221 	ph->pipe_handle = oph->pipe_handle;
222 	ph->data[0] = oph->data[1]; /* CTRL id */
223 
224 	pn_skb_get_src_sockaddr(oskb, &dst);
225 	return pn_skb_send(sk, skb, &dst);
226 }
227 
228 static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
229 {
230 	u8 data[4] = { type, PAD, PAD, status };
231 
232 	return pep_indicate(sk, PNS_PEP_STATUS_IND, PN_PEP_TYPE_COMMON,
233 				data, 4, priority);
234 }
235 
236 /* Send our RX flow control information to the sender.
237  * Socket must be locked. */
238 static void pipe_grant_credits(struct sock *sk, gfp_t priority)
239 {
240 	struct pep_sock *pn = pep_sk(sk);
241 
242 	BUG_ON(sk->sk_state != TCP_ESTABLISHED);
243 
244 	switch (pn->rx_fc) {
245 	case PN_LEGACY_FLOW_CONTROL: /* TODO */
246 		break;
247 	case PN_ONE_CREDIT_FLOW_CONTROL:
248 		if (pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
249 					PEP_IND_READY, priority) == 0)
250 			pn->rx_credits = 1;
251 		break;
252 	case PN_MULTI_CREDIT_FLOW_CONTROL:
253 		if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX)
254 			break;
255 		if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS,
256 					CREDITS_MAX - pn->rx_credits,
257 					priority) == 0)
258 			pn->rx_credits = CREDITS_MAX;
259 		break;
260 	}
261 }
262 
263 static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
264 {
265 	struct pep_sock *pn = pep_sk(sk);
266 	struct pnpipehdr *hdr;
267 	int wake = 0;
268 
269 	if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
270 		return -EINVAL;
271 
272 	hdr = pnp_hdr(skb);
273 	if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
274 		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
275 				(unsigned)hdr->data[0]);
276 		return -EOPNOTSUPP;
277 	}
278 
279 	switch (hdr->data[1]) {
280 	case PN_PEP_IND_FLOW_CONTROL:
281 		switch (pn->tx_fc) {
282 		case PN_LEGACY_FLOW_CONTROL:
283 			switch (hdr->data[4]) {
284 			case PEP_IND_BUSY:
285 				atomic_set(&pn->tx_credits, 0);
286 				break;
287 			case PEP_IND_READY:
288 				atomic_set(&pn->tx_credits, wake = 1);
289 				break;
290 			}
291 			break;
292 		case PN_ONE_CREDIT_FLOW_CONTROL:
293 			if (hdr->data[4] == PEP_IND_READY)
294 				atomic_set(&pn->tx_credits, wake = 1);
295 			break;
296 		}
297 		break;
298 
299 	case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
300 		if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
301 			break;
302 		atomic_add(wake = hdr->data[4], &pn->tx_credits);
303 		break;
304 
305 	default:
306 		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n",
307 				(unsigned)hdr->data[1]);
308 		return -EOPNOTSUPP;
309 	}
310 	if (wake)
311 		sk->sk_write_space(sk);
312 	return 0;
313 }
314 
315 static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
316 {
317 	struct pep_sock *pn = pep_sk(sk);
318 	struct pnpipehdr *hdr = pnp_hdr(skb);
319 	u8 n_sb = hdr->data[0];
320 
321 	pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
322 	__skb_pull(skb, sizeof(*hdr));
323 	while (n_sb > 0) {
324 		u8 type, buf[2], len = sizeof(buf);
325 		u8 *data = pep_get_sb(skb, &type, &len, buf);
326 
327 		if (data == NULL)
328 			return -EINVAL;
329 		switch (type) {
330 		case PN_PIPE_SB_NEGOTIATED_FC:
331 			if (len < 2 || (data[0] | data[1]) > 3)
332 				break;
333 			pn->tx_fc = data[0] & 3;
334 			pn->rx_fc = data[1] & 3;
335 			break;
336 		}
337 		n_sb--;
338 	}
339 	return 0;
340 }
341 
342 /* Queue an skb to a connected sock.
343  * Socket lock must be held. */
344 static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
345 {
346 	struct pep_sock *pn = pep_sk(sk);
347 	struct pnpipehdr *hdr = pnp_hdr(skb);
348 	struct sk_buff_head *queue;
349 	int err = 0;
350 
351 	BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);
352 
353 	switch (hdr->message_id) {
354 	case PNS_PEP_CONNECT_REQ:
355 		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC);
356 		break;
357 
358 	case PNS_PEP_DISCONNECT_REQ:
359 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
360 		sk->sk_state = TCP_CLOSE_WAIT;
361 		if (!sock_flag(sk, SOCK_DEAD))
362 			sk->sk_state_change(sk);
363 		break;
364 
365 	case PNS_PEP_ENABLE_REQ:
366 		/* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
367 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
368 		break;
369 
370 	case PNS_PEP_RESET_REQ:
371 		switch (hdr->state_after_reset) {
372 		case PN_PIPE_DISABLE:
373 			pn->init_enable = 0;
374 			break;
375 		case PN_PIPE_ENABLE:
376 			pn->init_enable = 1;
377 			break;
378 		default: /* not allowed to send an error here!? */
379 			err = -EINVAL;
380 			goto out;
381 		}
382 		/* fall through */
383 	case PNS_PEP_DISABLE_REQ:
384 		atomic_set(&pn->tx_credits, 0);
385 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
386 		break;
387 
388 	case PNS_PEP_CTRL_REQ:
389 		if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
390 			atomic_inc(&sk->sk_drops);
391 			break;
392 		}
393 		__skb_pull(skb, 4);
394 		queue = &pn->ctrlreq_queue;
395 		goto queue;
396 
397 	case PNS_PIPE_ALIGNED_DATA:
398 		__skb_pull(skb, 1);
399 		/* fall through */
400 	case PNS_PIPE_DATA:
401 		__skb_pull(skb, 3); /* Pipe data header */
402 		if (!pn_flow_safe(pn->rx_fc)) {
403 			err = sock_queue_rcv_skb(sk, skb);
404 			if (!err)
405 				return NET_RX_SUCCESS;
406 			err = -ENOBUFS;
407 			break;
408 		}
409 
410 		if (pn->rx_credits == 0) {
411 			atomic_inc(&sk->sk_drops);
412 			err = -ENOBUFS;
413 			break;
414 		}
415 		pn->rx_credits--;
416 		queue = &sk->sk_receive_queue;
417 		goto queue;
418 
419 	case PNS_PEP_STATUS_IND:
420 		pipe_rcv_status(sk, skb);
421 		break;
422 
423 	case PNS_PIPE_REDIRECTED_IND:
424 		err = pipe_rcv_created(sk, skb);
425 		break;
426 
427 	case PNS_PIPE_CREATED_IND:
428 		err = pipe_rcv_created(sk, skb);
429 		if (err)
430 			break;
431 		/* fall through */
432 	case PNS_PIPE_RESET_IND:
433 		if (!pn->init_enable)
434 			break;
435 		/* fall through */
436 	case PNS_PIPE_ENABLED_IND:
437 		if (!pn_flow_safe(pn->tx_fc)) {
438 			atomic_set(&pn->tx_credits, 1);
439 			sk->sk_write_space(sk);
440 		}
441 		if (sk->sk_state == TCP_ESTABLISHED)
442 			break; /* Nothing to do */
443 		sk->sk_state = TCP_ESTABLISHED;
444 		pipe_grant_credits(sk, GFP_ATOMIC);
445 		break;
446 
447 	case PNS_PIPE_DISABLED_IND:
448 		sk->sk_state = TCP_SYN_RECV;
449 		pn->rx_credits = 0;
450 		break;
451 
452 	default:
453 		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP message: %u\n",
454 				hdr->message_id);
455 		err = -EINVAL;
456 	}
457 out:
458 	kfree_skb(skb);
459 	return (err == -ENOBUFS) ? NET_RX_DROP : NET_RX_SUCCESS;
460 
461 queue:
462 	skb->dev = NULL;
463 	skb_set_owner_r(skb, sk);
464 	err = skb->len;
465 	skb_queue_tail(queue, skb);
466 	if (!sock_flag(sk, SOCK_DEAD))
467 		sk->sk_data_ready(sk, err);
468 	return NET_RX_SUCCESS;
469 }
470 
471 /* Destroy connected sock. */
472 static void pipe_destruct(struct sock *sk)
473 {
474 	struct pep_sock *pn = pep_sk(sk);
475 
476 	skb_queue_purge(&sk->sk_receive_queue);
477 	skb_queue_purge(&pn->ctrlreq_queue);
478 }
479 
480 static u8 pipe_negotiate_fc(const u8 *fcs, unsigned n)
481 {
482 	unsigned i;
483 	u8 final_fc = PN_NO_FLOW_CONTROL;
484 
485 	for (i = 0; i < n; i++) {
486 		u8 fc = fcs[i];
487 
488 		if (fc > final_fc && fc < PN_MAX_FLOW_CONTROL)
489 			final_fc = fc;
490 	}
491 	return final_fc;
492 }
493 
494 static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
495 {
496 	struct pep_sock *pn = pep_sk(sk);
497 	struct pnpipehdr *hdr;
498 	u8 n_sb;
499 
500 	if (!pskb_pull(skb, sizeof(*hdr) + 4))
501 		return -EINVAL;
502 
503 	hdr = pnp_hdr(skb);
504 	if (hdr->error_code != PN_PIPE_NO_ERROR)
505 		return -ECONNREFUSED;
506 
507 	/* Parse sub-blocks */
508 	n_sb = hdr->data[4];
509 	while (n_sb > 0) {
510 		u8 type, buf[6], len = sizeof(buf);
511 		const u8 *data = pep_get_sb(skb, &type, &len, buf);
512 
513 		if (data == NULL)
514 			return -EINVAL;
515 
516 		switch (type) {
517 		case PN_PIPE_SB_REQUIRED_FC_TX:
518 			if (len < 2 || len < data[0])
519 				break;
520 			pn->tx_fc = pipe_negotiate_fc(data + 2, len - 2);
521 			break;
522 
523 		case PN_PIPE_SB_PREFERRED_FC_RX:
524 			if (len < 2 || len < data[0])
525 				break;
526 			pn->rx_fc = pipe_negotiate_fc(data + 2, len - 2);
527 			break;
528 
529 		}
530 		n_sb--;
531 	}
532 
533 	return pipe_handler_send_created_ind(sk);
534 }
535 
536 /* Queue an skb to an actively connected sock.
537  * Socket lock must be held. */
538 static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
539 {
540 	struct pep_sock *pn = pep_sk(sk);
541 	struct pnpipehdr *hdr = pnp_hdr(skb);
542 	int err = NET_RX_SUCCESS;
543 
544 	switch (hdr->message_id) {
545 	case PNS_PIPE_ALIGNED_DATA:
546 		__skb_pull(skb, 1);
547 		/* fall through */
548 	case PNS_PIPE_DATA:
549 		__skb_pull(skb, 3); /* Pipe data header */
550 		if (!pn_flow_safe(pn->rx_fc)) {
551 			err = sock_queue_rcv_skb(sk, skb);
552 			if (!err)
553 				return NET_RX_SUCCESS;
554 			err = NET_RX_DROP;
555 			break;
556 		}
557 
558 		if (pn->rx_credits == 0) {
559 			atomic_inc(&sk->sk_drops);
560 			err = NET_RX_DROP;
561 			break;
562 		}
563 		pn->rx_credits--;
564 		skb->dev = NULL;
565 		skb_set_owner_r(skb, sk);
566 		err = skb->len;
567 		skb_queue_tail(&sk->sk_receive_queue, skb);
568 		if (!sock_flag(sk, SOCK_DEAD))
569 			sk->sk_data_ready(sk, err);
570 		return NET_RX_SUCCESS;
571 
572 	case PNS_PEP_CONNECT_RESP:
573 		if (sk->sk_state != TCP_SYN_SENT)
574 			break;
575 		if (!sock_flag(sk, SOCK_DEAD))
576 			sk->sk_state_change(sk);
577 		if (pep_connresp_rcv(sk, skb)) {
578 			sk->sk_state = TCP_CLOSE_WAIT;
579 			break;
580 		}
581 
582 		sk->sk_state = TCP_ESTABLISHED;
583 		if (!pn_flow_safe(pn->tx_fc)) {
584 			atomic_set(&pn->tx_credits, 1);
585 			sk->sk_write_space(sk);
586 		}
587 		pipe_grant_credits(sk, GFP_ATOMIC);
588 		break;
589 
590 	case PNS_PEP_DISCONNECT_RESP:
591 		/* sock should already be dead, nothing to do */
592 		break;
593 
594 	case PNS_PEP_STATUS_IND:
595 		pipe_rcv_status(sk, skb);
596 		break;
597 	}
598 	kfree_skb(skb);
599 	return err;
600 }
601 
602 /* Listening sock must be locked */
603 static struct sock *pep_find_pipe(const struct hlist_head *hlist,
604 					const struct sockaddr_pn *dst,
605 					u8 pipe_handle)
606 {
607 	struct hlist_node *node;
608 	struct sock *sknode;
609 	u16 dobj = pn_sockaddr_get_object(dst);
610 
611 	sk_for_each(sknode, node, hlist) {
612 		struct pep_sock *pnnode = pep_sk(sknode);
613 
614 		/* Ports match, but addresses might not: */
615 		if (pnnode->pn_sk.sobject != dobj)
616 			continue;
617 		if (pnnode->pipe_handle != pipe_handle)
618 			continue;
619 		if (sknode->sk_state == TCP_CLOSE_WAIT)
620 			continue;
621 
622 		sock_hold(sknode);
623 		return sknode;
624 	}
625 	return NULL;
626 }
627 
628 /*
629  * Deliver an skb to a listening sock.
630  * Socket lock must be held.
631  * We then queue the skb to the right connected sock (if any).
632  */
633 static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
634 {
635 	struct pep_sock *pn = pep_sk(sk);
636 	struct sock *sknode;
637 	struct pnpipehdr *hdr;
638 	struct sockaddr_pn dst;
639 	u8 pipe_handle;
640 
641 	if (!pskb_may_pull(skb, sizeof(*hdr)))
642 		goto drop;
643 
644 	hdr = pnp_hdr(skb);
645 	pipe_handle = hdr->pipe_handle;
646 	if (pipe_handle == PN_PIPE_INVALID_HANDLE)
647 		goto drop;
648 
649 	pn_skb_get_dst_sockaddr(skb, &dst);
650 
651 	/* Look for an existing pipe handle */
652 	sknode = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
653 	if (sknode)
654 		return sk_receive_skb(sknode, skb, 1);
655 
656 	switch (hdr->message_id) {
657 	case PNS_PEP_CONNECT_REQ:
658 		if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) {
659 			pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE,
660 					GFP_ATOMIC);
661 			break;
662 		}
663 		skb_queue_head(&sk->sk_receive_queue, skb);
664 		sk_acceptq_added(sk);
665 		if (!sock_flag(sk, SOCK_DEAD))
666 			sk->sk_data_ready(sk, 0);
667 		return NET_RX_SUCCESS;
668 
669 	case PNS_PEP_DISCONNECT_REQ:
670 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
671 		break;
672 
673 	case PNS_PEP_CTRL_REQ:
674 		pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC);
675 		break;
676 
677 	case PNS_PEP_RESET_REQ:
678 	case PNS_PEP_ENABLE_REQ:
679 	case PNS_PEP_DISABLE_REQ:
680 		/* invalid handle is not even allowed here! */
681 		break;
682 
683 	default:
684 		if ((1 << sk->sk_state)
685 				& ~(TCPF_CLOSE|TCPF_LISTEN|TCPF_CLOSE_WAIT))
686 			/* actively connected socket */
687 			return pipe_handler_do_rcv(sk, skb);
688 	}
689 drop:
690 	kfree_skb(skb);
691 	return NET_RX_SUCCESS;
692 }
693 
694 static int pipe_do_remove(struct sock *sk)
695 {
696 	struct pep_sock *pn = pep_sk(sk);
697 	struct pnpipehdr *ph;
698 	struct sk_buff *skb;
699 
700 	skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL);
701 	if (!skb)
702 		return -ENOMEM;
703 
704 	ph = pnp_hdr(skb);
705 	ph->utid = 0;
706 	ph->message_id = PNS_PIPE_REMOVE_REQ;
707 	ph->pipe_handle = pn->pipe_handle;
708 	ph->data[0] = PAD;
709 	return pn_skb_send(sk, skb, NULL);
710 }
711 
712 /* associated socket ceases to exist */
713 static void pep_sock_close(struct sock *sk, long timeout)
714 {
715 	struct pep_sock *pn = pep_sk(sk);
716 	int ifindex = 0;
717 
718 	sock_hold(sk); /* keep a reference after sk_common_release() */
719 	sk_common_release(sk);
720 
721 	lock_sock(sk);
722 	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) {
723 		if (sk->sk_backlog_rcv == pipe_do_rcv)
724 			/* Forcefully remove dangling Phonet pipe */
725 			pipe_do_remove(sk);
726 		else
727 			pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD,
728 						NULL, 0);
729 	}
730 	sk->sk_state = TCP_CLOSE;
731 
732 	ifindex = pn->ifindex;
733 	pn->ifindex = 0;
734 	release_sock(sk);
735 
736 	if (ifindex)
737 		gprs_detach(sk);
738 	sock_put(sk);
739 }
740 
741 static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
742 {
743 	struct pep_sock *pn = pep_sk(sk), *newpn;
744 	struct sock *newsk = NULL;
745 	struct sk_buff *skb;
746 	struct pnpipehdr *hdr;
747 	struct sockaddr_pn dst, src;
748 	int err;
749 	u16 peer_type;
750 	u8 pipe_handle, enabled, n_sb;
751 	u8 aligned = 0;
752 
753 	skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp);
754 	if (!skb)
755 		return NULL;
756 
757 	lock_sock(sk);
758 	if (sk->sk_state != TCP_LISTEN) {
759 		err = -EINVAL;
760 		goto drop;
761 	}
762 	sk_acceptq_removed(sk);
763 
764 	err = -EPROTO;
765 	if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
766 		goto drop;
767 
768 	hdr = pnp_hdr(skb);
769 	pipe_handle = hdr->pipe_handle;
770 	switch (hdr->state_after_connect) {
771 	case PN_PIPE_DISABLE:
772 		enabled = 0;
773 		break;
774 	case PN_PIPE_ENABLE:
775 		enabled = 1;
776 		break;
777 	default:
778 		pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM,
779 				GFP_KERNEL);
780 		goto drop;
781 	}
782 	peer_type = hdr->other_pep_type << 8;
783 
784 	/* Parse sub-blocks (options) */
785 	n_sb = hdr->data[4];
786 	while (n_sb > 0) {
787 		u8 type, buf[1], len = sizeof(buf);
788 		const u8 *data = pep_get_sb(skb, &type, &len, buf);
789 
790 		if (data == NULL)
791 			goto drop;
792 		switch (type) {
793 		case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
794 			if (len < 1)
795 				goto drop;
796 			peer_type = (peer_type & 0xff00) | data[0];
797 			break;
798 		case PN_PIPE_SB_ALIGNED_DATA:
799 			aligned = data[0] != 0;
800 			break;
801 		}
802 		n_sb--;
803 	}
804 
805 	/* Check for duplicate pipe handle */
806 	newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
807 	if (unlikely(newsk)) {
808 		__sock_put(newsk);
809 		newsk = NULL;
810 		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL);
811 		goto drop;
812 	}
813 
814 	/* Create a new to-be-accepted sock */
815 	newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot);
816 	if (!newsk) {
817 		pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
818 		err = -ENOBUFS;
819 		goto drop;
820 	}
821 
822 	sock_init_data(NULL, newsk);
823 	newsk->sk_state = TCP_SYN_RECV;
824 	newsk->sk_backlog_rcv = pipe_do_rcv;
825 	newsk->sk_protocol = sk->sk_protocol;
826 	newsk->sk_destruct = pipe_destruct;
827 
828 	newpn = pep_sk(newsk);
829 	pn_skb_get_dst_sockaddr(skb, &dst);
830 	pn_skb_get_src_sockaddr(skb, &src);
831 	newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
832 	newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
833 	newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst);
834 	sock_hold(sk);
835 	newpn->listener = sk;
836 	skb_queue_head_init(&newpn->ctrlreq_queue);
837 	newpn->pipe_handle = pipe_handle;
838 	atomic_set(&newpn->tx_credits, 0);
839 	newpn->ifindex = 0;
840 	newpn->peer_type = peer_type;
841 	newpn->rx_credits = 0;
842 	newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
843 	newpn->init_enable = enabled;
844 	newpn->aligned = aligned;
845 
846 	err = pep_accept_conn(newsk, skb);
847 	if (err) {
848 		sock_put(newsk);
849 		newsk = NULL;
850 		goto drop;
851 	}
852 	sk_add_node(newsk, &pn->hlist);
853 drop:
854 	release_sock(sk);
855 	kfree_skb(skb);
856 	*errp = err;
857 	return newsk;
858 }
859 
860 static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
861 {
862 	struct pep_sock *pn = pep_sk(sk);
863 	int err;
864 	u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
865 
866 	pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
867 	err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
868 					PN_PIPE_ENABLE, data, 4);
869 	if (err) {
870 		pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
871 		return err;
872 	}
873 	sk->sk_state = TCP_SYN_SENT;
874 	return 0;
875 }
876 
877 static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
878 {
879 	struct pep_sock *pn = pep_sk(sk);
880 	int answ;
881 
882 	switch (cmd) {
883 	case SIOCINQ:
884 		if (sk->sk_state == TCP_LISTEN)
885 			return -EINVAL;
886 
887 		lock_sock(sk);
888 		if (sock_flag(sk, SOCK_URGINLINE) &&
889 		    !skb_queue_empty(&pn->ctrlreq_queue))
890 			answ = skb_peek(&pn->ctrlreq_queue)->len;
891 		else if (!skb_queue_empty(&sk->sk_receive_queue))
892 			answ = skb_peek(&sk->sk_receive_queue)->len;
893 		else
894 			answ = 0;
895 		release_sock(sk);
896 		return put_user(answ, (int __user *)arg);
897 	}
898 
899 	return -ENOIOCTLCMD;
900 }
901 
902 static int pep_init(struct sock *sk)
903 {
904 	struct pep_sock *pn = pep_sk(sk);
905 
906 	sk->sk_destruct = pipe_destruct;
907 	INIT_HLIST_HEAD(&pn->hlist);
908 	pn->listener = NULL;
909 	skb_queue_head_init(&pn->ctrlreq_queue);
910 	atomic_set(&pn->tx_credits, 0);
911 	pn->ifindex = 0;
912 	pn->peer_type = 0;
913 	pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
914 	pn->rx_credits = 0;
915 	pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
916 	pn->init_enable = 1;
917 	pn->aligned = 0;
918 	return 0;
919 }
920 
921 static int pep_setsockopt(struct sock *sk, int level, int optname,
922 				char __user *optval, unsigned int optlen)
923 {
924 	struct pep_sock *pn = pep_sk(sk);
925 	int val = 0, err = 0;
926 
927 	if (level != SOL_PNPIPE)
928 		return -ENOPROTOOPT;
929 	if (optlen >= sizeof(int)) {
930 		if (get_user(val, (int __user *) optval))
931 			return -EFAULT;
932 	}
933 
934 	lock_sock(sk);
935 	switch (optname) {
936 	case PNPIPE_ENCAP:
937 		if (val && val != PNPIPE_ENCAP_IP) {
938 			err = -EINVAL;
939 			break;
940 		}
941 		if (!pn->ifindex == !val)
942 			break; /* Nothing to do! */
943 		if (!capable(CAP_NET_ADMIN)) {
944 			err = -EPERM;
945 			break;
946 		}
947 		if (val) {
948 			release_sock(sk);
949 			err = gprs_attach(sk);
950 			if (err > 0) {
951 				pn->ifindex = err;
952 				err = 0;
953 			}
954 		} else {
955 			pn->ifindex = 0;
956 			release_sock(sk);
957 			gprs_detach(sk);
958 			err = 0;
959 		}
960 		goto out_norel;
961 
962 	default:
963 		err = -ENOPROTOOPT;
964 	}
965 	release_sock(sk);
966 
967 out_norel:
968 	return err;
969 }
970 
971 static int pep_getsockopt(struct sock *sk, int level, int optname,
972 				char __user *optval, int __user *optlen)
973 {
974 	struct pep_sock *pn = pep_sk(sk);
975 	int len, val;
976 
977 	if (level != SOL_PNPIPE)
978 		return -ENOPROTOOPT;
979 	if (get_user(len, optlen))
980 		return -EFAULT;
981 
982 	switch (optname) {
983 	case PNPIPE_ENCAP:
984 		val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE;
985 		break;
986 
987 	case PNPIPE_IFINDEX:
988 		val = pn->ifindex;
989 		break;
990 
991 	case PNPIPE_HANDLE:
992 		val = pn->pipe_handle;
993 		if (val == PN_PIPE_INVALID_HANDLE)
994 			return -EINVAL;
995 		break;
996 
997 	default:
998 		return -ENOPROTOOPT;
999 	}
1000 
1001 	len = min_t(unsigned int, sizeof(int), len);
1002 	if (put_user(len, optlen))
1003 		return -EFAULT;
1004 	if (put_user(val, (int __user *) optval))
1005 		return -EFAULT;
1006 	return 0;
1007 }
1008 
1009 static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
1010 {
1011 	struct pep_sock *pn = pep_sk(sk);
1012 	struct pnpipehdr *ph;
1013 	int err;
1014 
1015 	if (pn_flow_safe(pn->tx_fc) &&
1016 	    !atomic_add_unless(&pn->tx_credits, -1, 0)) {
1017 		kfree_skb(skb);
1018 		return -ENOBUFS;
1019 	}
1020 
1021 	skb_push(skb, 3 + pn->aligned);
1022 	skb_reset_transport_header(skb);
1023 	ph = pnp_hdr(skb);
1024 	ph->utid = 0;
1025 	if (pn->aligned) {
1026 		ph->message_id = PNS_PIPE_ALIGNED_DATA;
1027 		ph->data[0] = 0; /* padding */
1028 	} else
1029 		ph->message_id = PNS_PIPE_DATA;
1030 	ph->pipe_handle = pn->pipe_handle;
1031 	err = pn_skb_send(sk, skb, NULL);
1032 
1033 	if (err && pn_flow_safe(pn->tx_fc))
1034 		atomic_inc(&pn->tx_credits);
1035 	return err;
1036 
1037 }
1038 
1039 static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
1040 			struct msghdr *msg, size_t len)
1041 {
1042 	struct pep_sock *pn = pep_sk(sk);
1043 	struct sk_buff *skb;
1044 	long timeo;
1045 	int flags = msg->msg_flags;
1046 	int err, done;
1047 
1048 	if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
1049 				MSG_CMSG_COMPAT)) ||
1050 			!(msg->msg_flags & MSG_EOR))
1051 		return -EOPNOTSUPP;
1052 
1053 	skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
1054 					flags & MSG_DONTWAIT, &err);
1055 	if (!skb)
1056 		return err;
1057 
1058 	skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned);
1059 	err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1060 	if (err < 0)
1061 		goto outfree;
1062 
1063 	lock_sock(sk);
1064 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1065 	if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
1066 		err = -ENOTCONN;
1067 		goto out;
1068 	}
1069 	if (sk->sk_state != TCP_ESTABLISHED) {
1070 		/* Wait until the pipe gets to enabled state */
1071 disabled:
1072 		err = sk_stream_wait_connect(sk, &timeo);
1073 		if (err)
1074 			goto out;
1075 
1076 		if (sk->sk_state == TCP_CLOSE_WAIT) {
1077 			err = -ECONNRESET;
1078 			goto out;
1079 		}
1080 	}
1081 	BUG_ON(sk->sk_state != TCP_ESTABLISHED);
1082 
1083 	/* Wait until flow control allows TX */
1084 	done = atomic_read(&pn->tx_credits);
1085 	while (!done) {
1086 		DEFINE_WAIT(wait);
1087 
1088 		if (!timeo) {
1089 			err = -EAGAIN;
1090 			goto out;
1091 		}
1092 		if (signal_pending(current)) {
1093 			err = sock_intr_errno(timeo);
1094 			goto out;
1095 		}
1096 
1097 		prepare_to_wait(sk_sleep(sk), &wait,
1098 				TASK_INTERRUPTIBLE);
1099 		done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits));
1100 		finish_wait(sk_sleep(sk), &wait);
1101 
1102 		if (sk->sk_state != TCP_ESTABLISHED)
1103 			goto disabled;
1104 	}
1105 
1106 	err = pipe_skb_send(sk, skb);
1107 	if (err >= 0)
1108 		err = len; /* success! */
1109 	skb = NULL;
1110 out:
1111 	release_sock(sk);
1112 outfree:
1113 	kfree_skb(skb);
1114 	return err;
1115 }
1116 
1117 int pep_writeable(struct sock *sk)
1118 {
1119 	struct pep_sock *pn = pep_sk(sk);
1120 
1121 	return atomic_read(&pn->tx_credits);
1122 }
1123 
1124 int pep_write(struct sock *sk, struct sk_buff *skb)
1125 {
1126 	struct sk_buff *rskb, *fs;
1127 	int flen = 0;
1128 
1129 	if (pep_sk(sk)->aligned)
1130 		return pipe_skb_send(sk, skb);
1131 
1132 	rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
1133 	if (!rskb) {
1134 		kfree_skb(skb);
1135 		return -ENOMEM;
1136 	}
1137 	skb_shinfo(rskb)->frag_list = skb;
1138 	rskb->len += skb->len;
1139 	rskb->data_len += rskb->len;
1140 	rskb->truesize += rskb->len;
1141 
1142 	/* Avoid nested fragments */
1143 	skb_walk_frags(skb, fs)
1144 		flen += fs->len;
1145 	skb->next = skb_shinfo(skb)->frag_list;
1146 	skb_frag_list_init(skb);
1147 	skb->len -= flen;
1148 	skb->data_len -= flen;
1149 	skb->truesize -= flen;
1150 
1151 	skb_reserve(rskb, MAX_PHONET_HEADER + 3);
1152 	return pipe_skb_send(sk, rskb);
1153 }
1154 
1155 struct sk_buff *pep_read(struct sock *sk)
1156 {
1157 	struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
1158 
1159 	if (sk->sk_state == TCP_ESTABLISHED)
1160 		pipe_grant_credits(sk, GFP_ATOMIC);
1161 	return skb;
1162 }
1163 
1164 static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
1165 			struct msghdr *msg, size_t len, int noblock,
1166 			int flags, int *addr_len)
1167 {
1168 	struct sk_buff *skb;
1169 	int err;
1170 
1171 	if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
1172 			MSG_NOSIGNAL|MSG_CMSG_COMPAT))
1173 		return -EOPNOTSUPP;
1174 
1175 	if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
1176 		return -ENOTCONN;
1177 
1178 	if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) {
1179 		/* Dequeue and acknowledge control request */
1180 		struct pep_sock *pn = pep_sk(sk);
1181 
1182 		if (flags & MSG_PEEK)
1183 			return -EOPNOTSUPP;
1184 		skb = skb_dequeue(&pn->ctrlreq_queue);
1185 		if (skb) {
1186 			pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
1187 						GFP_KERNEL);
1188 			msg->msg_flags |= MSG_OOB;
1189 			goto copy;
1190 		}
1191 		if (flags & MSG_OOB)
1192 			return -EINVAL;
1193 	}
1194 
1195 	skb = skb_recv_datagram(sk, flags, noblock, &err);
1196 	lock_sock(sk);
1197 	if (skb == NULL) {
1198 		if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT)
1199 			err = -ECONNRESET;
1200 		release_sock(sk);
1201 		return err;
1202 	}
1203 
1204 	if (sk->sk_state == TCP_ESTABLISHED)
1205 		pipe_grant_credits(sk, GFP_KERNEL);
1206 	release_sock(sk);
1207 copy:
1208 	msg->msg_flags |= MSG_EOR;
1209 	if (skb->len > len)
1210 		msg->msg_flags |= MSG_TRUNC;
1211 	else
1212 		len = skb->len;
1213 
1214 	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
1215 	if (!err)
1216 		err = (flags & MSG_TRUNC) ? skb->len : len;
1217 
1218 	skb_free_datagram(sk, skb);
1219 	return err;
1220 }
1221 
1222 static void pep_sock_unhash(struct sock *sk)
1223 {
1224 	struct pep_sock *pn = pep_sk(sk);
1225 	struct sock *skparent = NULL;
1226 
1227 	lock_sock(sk);
1228 
1229 	if (pn->listener != NULL) {
1230 		skparent = pn->listener;
1231 		pn->listener = NULL;
1232 		release_sock(sk);
1233 
1234 		pn = pep_sk(skparent);
1235 		lock_sock(skparent);
1236 		sk_del_node_init(sk);
1237 		sk = skparent;
1238 	}
1239 
1240 	/* Unhash a listening sock only when it is closed
1241 	 * and all of its active connected pipes are closed. */
1242 	if (hlist_empty(&pn->hlist))
1243 		pn_sock_unhash(&pn->pn_sk.sk);
1244 	release_sock(sk);
1245 
1246 	if (skparent)
1247 		sock_put(skparent);
1248 }
1249 
1250 static struct proto pep_proto = {
1251 	.close		= pep_sock_close,
1252 	.accept		= pep_sock_accept,
1253 	.connect	= pep_sock_connect,
1254 	.ioctl		= pep_ioctl,
1255 	.init		= pep_init,
1256 	.setsockopt	= pep_setsockopt,
1257 	.getsockopt	= pep_getsockopt,
1258 	.sendmsg	= pep_sendmsg,
1259 	.recvmsg	= pep_recvmsg,
1260 	.backlog_rcv	= pep_do_rcv,
1261 	.hash		= pn_sock_hash,
1262 	.unhash		= pep_sock_unhash,
1263 	.get_port	= pn_sock_get_port,
1264 	.obj_size	= sizeof(struct pep_sock),
1265 	.owner		= THIS_MODULE,
1266 	.name		= "PNPIPE",
1267 };
1268 
1269 static struct phonet_protocol pep_pn_proto = {
1270 	.ops		= &phonet_stream_ops,
1271 	.prot		= &pep_proto,
1272 	.sock_type	= SOCK_SEQPACKET,
1273 };
1274 
1275 static int __init pep_register(void)
1276 {
1277 	return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto);
1278 }
1279 
1280 static void __exit pep_unregister(void)
1281 {
1282 	phonet_proto_unregister(PN_PROTO_PIPE, &pep_pn_proto);
1283 }
1284 
1285 module_init(pep_register);
1286 module_exit(pep_unregister);
1287 MODULE_AUTHOR("Remi Denis-Courmont, Nokia");
1288 MODULE_DESCRIPTION("Phonet pipe protocol");
1289 MODULE_LICENSE("GPL");
1290 MODULE_ALIAS_NET_PF_PROTO(PF_PHONET, PN_PROTO_PIPE);
1291