xref: /linux/net/rxrpc/af_rxrpc.c (revision c145211d1f9e2ef19e7b4c2b943f68366daa97af)
1 /* AF_RXRPC implementation
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/net.h>
14 #include <linux/slab.h>
15 #include <linux/skbuff.h>
16 #include <linux/poll.h>
17 #include <linux/proc_fs.h>
18 #include <linux/key-type.h>
19 #include <net/net_namespace.h>
20 #include <net/sock.h>
21 #include <net/af_rxrpc.h>
22 #include "ar-internal.h"
23 
24 MODULE_DESCRIPTION("RxRPC network protocol");
25 MODULE_AUTHOR("Red Hat, Inc.");
26 MODULE_LICENSE("GPL");
27 MODULE_ALIAS_NETPROTO(PF_RXRPC);
28 
29 unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
30 module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
31 MODULE_PARM_DESC(debug, "RxRPC debugging mask");
32 
33 static int sysctl_rxrpc_max_qlen __read_mostly = 10;
34 
35 static struct proto rxrpc_proto;
36 static const struct proto_ops rxrpc_rpc_ops;
37 
38 /* local epoch for detecting local-end reset */
39 __be32 rxrpc_epoch;
40 
41 /* current debugging ID */
42 atomic_t rxrpc_debug_id;
43 
44 /* count of skbs currently in use */
45 atomic_t rxrpc_n_skbs;
46 
47 struct workqueue_struct *rxrpc_workqueue;
48 
49 static void rxrpc_sock_destructor(struct sock *);
50 
51 /*
52  * see if an RxRPC socket is currently writable
53  */
54 static inline int rxrpc_writable(struct sock *sk)
55 {
56 	return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
57 }
58 
59 /*
60  * wait for write bufferage to become available
61  */
62 static void rxrpc_write_space(struct sock *sk)
63 {
64 	_enter("%p", sk);
65 	read_lock(&sk->sk_callback_lock);
66 	if (rxrpc_writable(sk)) {
67 		if (sk_has_sleeper(sk))
68 			wake_up_interruptible(sk->sk_sleep);
69 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
70 	}
71 	read_unlock(&sk->sk_callback_lock);
72 }
73 
74 /*
75  * validate an RxRPC address
76  */
77 static int rxrpc_validate_address(struct rxrpc_sock *rx,
78 				  struct sockaddr_rxrpc *srx,
79 				  int len)
80 {
81 	if (len < sizeof(struct sockaddr_rxrpc))
82 		return -EINVAL;
83 
84 	if (srx->srx_family != AF_RXRPC)
85 		return -EAFNOSUPPORT;
86 
87 	if (srx->transport_type != SOCK_DGRAM)
88 		return -ESOCKTNOSUPPORT;
89 
90 	len -= offsetof(struct sockaddr_rxrpc, transport);
91 	if (srx->transport_len < sizeof(sa_family_t) ||
92 	    srx->transport_len > len)
93 		return -EINVAL;
94 
95 	if (srx->transport.family != rx->proto)
96 		return -EAFNOSUPPORT;
97 
98 	switch (srx->transport.family) {
99 	case AF_INET:
100 		_debug("INET: %x @ %pI4",
101 		       ntohs(srx->transport.sin.sin_port),
102 		       &srx->transport.sin.sin_addr);
103 		if (srx->transport_len > 8)
104 			memset((void *)&srx->transport + 8, 0,
105 			       srx->transport_len - 8);
106 		break;
107 
108 	case AF_INET6:
109 	default:
110 		return -EAFNOSUPPORT;
111 	}
112 
113 	return 0;
114 }
115 
116 /*
117  * bind a local address to an RxRPC socket
118  */
119 static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
120 {
121 	struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) saddr;
122 	struct sock *sk = sock->sk;
123 	struct rxrpc_local *local;
124 	struct rxrpc_sock *rx = rxrpc_sk(sk), *prx;
125 	__be16 service_id;
126 	int ret;
127 
128 	_enter("%p,%p,%d", rx, saddr, len);
129 
130 	ret = rxrpc_validate_address(rx, srx, len);
131 	if (ret < 0)
132 		goto error;
133 
134 	lock_sock(&rx->sk);
135 
136 	if (rx->sk.sk_state != RXRPC_UNCONNECTED) {
137 		ret = -EINVAL;
138 		goto error_unlock;
139 	}
140 
141 	memcpy(&rx->srx, srx, sizeof(rx->srx));
142 
143 	/* find a local transport endpoint if we don't have one already */
144 	local = rxrpc_lookup_local(&rx->srx);
145 	if (IS_ERR(local)) {
146 		ret = PTR_ERR(local);
147 		goto error_unlock;
148 	}
149 
150 	rx->local = local;
151 	if (srx->srx_service) {
152 		service_id = htons(srx->srx_service);
153 		write_lock_bh(&local->services_lock);
154 		list_for_each_entry(prx, &local->services, listen_link) {
155 			if (prx->service_id == service_id)
156 				goto service_in_use;
157 		}
158 
159 		rx->service_id = service_id;
160 		list_add_tail(&rx->listen_link, &local->services);
161 		write_unlock_bh(&local->services_lock);
162 
163 		rx->sk.sk_state = RXRPC_SERVER_BOUND;
164 	} else {
165 		rx->sk.sk_state = RXRPC_CLIENT_BOUND;
166 	}
167 
168 	release_sock(&rx->sk);
169 	_leave(" = 0");
170 	return 0;
171 
172 service_in_use:
173 	ret = -EADDRINUSE;
174 	write_unlock_bh(&local->services_lock);
175 error_unlock:
176 	release_sock(&rx->sk);
177 error:
178 	_leave(" = %d", ret);
179 	return ret;
180 }
181 
182 /*
183  * set the number of pending calls permitted on a listening socket
184  */
185 static int rxrpc_listen(struct socket *sock, int backlog)
186 {
187 	struct sock *sk = sock->sk;
188 	struct rxrpc_sock *rx = rxrpc_sk(sk);
189 	int ret;
190 
191 	_enter("%p,%d", rx, backlog);
192 
193 	lock_sock(&rx->sk);
194 
195 	switch (rx->sk.sk_state) {
196 	case RXRPC_UNCONNECTED:
197 		ret = -EADDRNOTAVAIL;
198 		break;
199 	case RXRPC_CLIENT_BOUND:
200 	case RXRPC_CLIENT_CONNECTED:
201 	default:
202 		ret = -EBUSY;
203 		break;
204 	case RXRPC_SERVER_BOUND:
205 		ASSERT(rx->local != NULL);
206 		sk->sk_max_ack_backlog = backlog;
207 		rx->sk.sk_state = RXRPC_SERVER_LISTENING;
208 		ret = 0;
209 		break;
210 	}
211 
212 	release_sock(&rx->sk);
213 	_leave(" = %d", ret);
214 	return ret;
215 }
216 
217 /*
218  * find a transport by address
219  */
220 static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock,
221 						       struct sockaddr *addr,
222 						       int addr_len, int flags,
223 						       gfp_t gfp)
224 {
225 	struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
226 	struct rxrpc_transport *trans;
227 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
228 	struct rxrpc_peer *peer;
229 
230 	_enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
231 
232 	ASSERT(rx->local != NULL);
233 	ASSERT(rx->sk.sk_state > RXRPC_UNCONNECTED);
234 
235 	if (rx->srx.transport_type != srx->transport_type)
236 		return ERR_PTR(-ESOCKTNOSUPPORT);
237 	if (rx->srx.transport.family != srx->transport.family)
238 		return ERR_PTR(-EAFNOSUPPORT);
239 
240 	/* find a remote transport endpoint from the local one */
241 	peer = rxrpc_get_peer(srx, gfp);
242 	if (IS_ERR(peer))
243 		return ERR_CAST(peer);
244 
245 	/* find a transport */
246 	trans = rxrpc_get_transport(rx->local, peer, gfp);
247 	rxrpc_put_peer(peer);
248 	_leave(" = %p", trans);
249 	return trans;
250 }
251 
252 /**
253  * rxrpc_kernel_begin_call - Allow a kernel service to begin a call
254  * @sock: The socket on which to make the call
255  * @srx: The address of the peer to contact (defaults to socket setting)
256  * @key: The security context to use (defaults to socket setting)
257  * @user_call_ID: The ID to use
258  *
259  * Allow a kernel service to begin a call on the nominated socket.  This just
260  * sets up all the internal tracking structures and allocates connection and
261  * call IDs as appropriate.  The call to be used is returned.
262  *
263  * The default socket destination address and security may be overridden by
264  * supplying @srx and @key.
265  */
266 struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
267 					   struct sockaddr_rxrpc *srx,
268 					   struct key *key,
269 					   unsigned long user_call_ID,
270 					   gfp_t gfp)
271 {
272 	struct rxrpc_conn_bundle *bundle;
273 	struct rxrpc_transport *trans;
274 	struct rxrpc_call *call;
275 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
276 	__be16 service_id;
277 
278 	_enter(",,%x,%lx", key_serial(key), user_call_ID);
279 
280 	lock_sock(&rx->sk);
281 
282 	if (srx) {
283 		trans = rxrpc_name_to_transport(sock, (struct sockaddr *) srx,
284 						sizeof(*srx), 0, gfp);
285 		if (IS_ERR(trans)) {
286 			call = ERR_CAST(trans);
287 			trans = NULL;
288 			goto out_notrans;
289 		}
290 	} else {
291 		trans = rx->trans;
292 		if (!trans) {
293 			call = ERR_PTR(-ENOTCONN);
294 			goto out_notrans;
295 		}
296 		atomic_inc(&trans->usage);
297 	}
298 
299 	service_id = rx->service_id;
300 	if (srx)
301 		service_id = htons(srx->srx_service);
302 
303 	if (!key)
304 		key = rx->key;
305 	if (key && !key->payload.data)
306 		key = NULL; /* a no-security key */
307 
308 	bundle = rxrpc_get_bundle(rx, trans, key, service_id, gfp);
309 	if (IS_ERR(bundle)) {
310 		call = ERR_CAST(bundle);
311 		goto out;
312 	}
313 
314 	call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID, true,
315 				     gfp);
316 	rxrpc_put_bundle(trans, bundle);
317 out:
318 	rxrpc_put_transport(trans);
319 out_notrans:
320 	release_sock(&rx->sk);
321 	_leave(" = %p", call);
322 	return call;
323 }
324 
325 EXPORT_SYMBOL(rxrpc_kernel_begin_call);
326 
327 /**
328  * rxrpc_kernel_end_call - Allow a kernel service to end a call it was using
329  * @call: The call to end
330  *
331  * Allow a kernel service to end a call it was using.  The call must be
332  * complete before this is called (the call should be aborted if necessary).
333  */
334 void rxrpc_kernel_end_call(struct rxrpc_call *call)
335 {
336 	_enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
337 	rxrpc_remove_user_ID(call->socket, call);
338 	rxrpc_put_call(call);
339 }
340 
341 EXPORT_SYMBOL(rxrpc_kernel_end_call);
342 
343 /**
344  * rxrpc_kernel_intercept_rx_messages - Intercept received RxRPC messages
345  * @sock: The socket to intercept received messages on
346  * @interceptor: The function to pass the messages to
347  *
348  * Allow a kernel service to intercept messages heading for the Rx queue on an
349  * RxRPC socket.  They get passed to the specified function instead.
350  * @interceptor should free the socket buffers it is given.  @interceptor is
351  * called with the socket receive queue spinlock held and softirqs disabled -
352  * this ensures that the messages will be delivered in the right order.
353  */
354 void rxrpc_kernel_intercept_rx_messages(struct socket *sock,
355 					rxrpc_interceptor_t interceptor)
356 {
357 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
358 
359 	_enter("");
360 	rx->interceptor = interceptor;
361 }
362 
363 EXPORT_SYMBOL(rxrpc_kernel_intercept_rx_messages);
364 
365 /*
366  * connect an RxRPC socket
367  * - this just targets it at a specific destination; no actual connection
368  *   negotiation takes place
369  */
370 static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
371 			 int addr_len, int flags)
372 {
373 	struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr;
374 	struct sock *sk = sock->sk;
375 	struct rxrpc_transport *trans;
376 	struct rxrpc_local *local;
377 	struct rxrpc_sock *rx = rxrpc_sk(sk);
378 	int ret;
379 
380 	_enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
381 
382 	ret = rxrpc_validate_address(rx, srx, addr_len);
383 	if (ret < 0) {
384 		_leave(" = %d [bad addr]", ret);
385 		return ret;
386 	}
387 
388 	lock_sock(&rx->sk);
389 
390 	switch (rx->sk.sk_state) {
391 	case RXRPC_UNCONNECTED:
392 		/* find a local transport endpoint if we don't have one already */
393 		ASSERTCMP(rx->local, ==, NULL);
394 		rx->srx.srx_family = AF_RXRPC;
395 		rx->srx.srx_service = 0;
396 		rx->srx.transport_type = srx->transport_type;
397 		rx->srx.transport_len = sizeof(sa_family_t);
398 		rx->srx.transport.family = srx->transport.family;
399 		local = rxrpc_lookup_local(&rx->srx);
400 		if (IS_ERR(local)) {
401 			release_sock(&rx->sk);
402 			return PTR_ERR(local);
403 		}
404 		rx->local = local;
405 		rx->sk.sk_state = RXRPC_CLIENT_BOUND;
406 	case RXRPC_CLIENT_BOUND:
407 		break;
408 	case RXRPC_CLIENT_CONNECTED:
409 		release_sock(&rx->sk);
410 		return -EISCONN;
411 	default:
412 		release_sock(&rx->sk);
413 		return -EBUSY; /* server sockets can't connect as well */
414 	}
415 
416 	trans = rxrpc_name_to_transport(sock, addr, addr_len, flags,
417 					GFP_KERNEL);
418 	if (IS_ERR(trans)) {
419 		release_sock(&rx->sk);
420 		_leave(" = %ld", PTR_ERR(trans));
421 		return PTR_ERR(trans);
422 	}
423 
424 	rx->trans = trans;
425 	rx->service_id = htons(srx->srx_service);
426 	rx->sk.sk_state = RXRPC_CLIENT_CONNECTED;
427 
428 	release_sock(&rx->sk);
429 	return 0;
430 }
431 
432 /*
433  * send a message through an RxRPC socket
434  * - in a client this does a number of things:
435  *   - finds/sets up a connection for the security specified (if any)
436  *   - initiates a call (ID in control data)
437  *   - ends the request phase of a call (if MSG_MORE is not set)
438  *   - sends a call data packet
439  *   - may send an abort (abort code in control data)
440  */
441 static int rxrpc_sendmsg(struct kiocb *iocb, struct socket *sock,
442 			 struct msghdr *m, size_t len)
443 {
444 	struct rxrpc_transport *trans;
445 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
446 	int ret;
447 
448 	_enter(",{%d},,%zu", rx->sk.sk_state, len);
449 
450 	if (m->msg_flags & MSG_OOB)
451 		return -EOPNOTSUPP;
452 
453 	if (m->msg_name) {
454 		ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen);
455 		if (ret < 0) {
456 			_leave(" = %d [bad addr]", ret);
457 			return ret;
458 		}
459 	}
460 
461 	trans = NULL;
462 	lock_sock(&rx->sk);
463 
464 	if (m->msg_name) {
465 		ret = -EISCONN;
466 		trans = rxrpc_name_to_transport(sock, m->msg_name,
467 						m->msg_namelen, 0, GFP_KERNEL);
468 		if (IS_ERR(trans)) {
469 			ret = PTR_ERR(trans);
470 			trans = NULL;
471 			goto out;
472 		}
473 	} else {
474 		trans = rx->trans;
475 		if (trans)
476 			atomic_inc(&trans->usage);
477 	}
478 
479 	switch (rx->sk.sk_state) {
480 	case RXRPC_SERVER_LISTENING:
481 		if (!m->msg_name) {
482 			ret = rxrpc_server_sendmsg(iocb, rx, m, len);
483 			break;
484 		}
485 	case RXRPC_SERVER_BOUND:
486 	case RXRPC_CLIENT_BOUND:
487 		if (!m->msg_name) {
488 			ret = -ENOTCONN;
489 			break;
490 		}
491 	case RXRPC_CLIENT_CONNECTED:
492 		ret = rxrpc_client_sendmsg(iocb, rx, trans, m, len);
493 		break;
494 	default:
495 		ret = -ENOTCONN;
496 		break;
497 	}
498 
499 out:
500 	release_sock(&rx->sk);
501 	if (trans)
502 		rxrpc_put_transport(trans);
503 	_leave(" = %d", ret);
504 	return ret;
505 }
506 
507 /*
508  * set RxRPC socket options
509  */
510 static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
511 			    char __user *optval, unsigned int optlen)
512 {
513 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
514 	unsigned min_sec_level;
515 	int ret;
516 
517 	_enter(",%d,%d,,%d", level, optname, optlen);
518 
519 	lock_sock(&rx->sk);
520 	ret = -EOPNOTSUPP;
521 
522 	if (level == SOL_RXRPC) {
523 		switch (optname) {
524 		case RXRPC_EXCLUSIVE_CONNECTION:
525 			ret = -EINVAL;
526 			if (optlen != 0)
527 				goto error;
528 			ret = -EISCONN;
529 			if (rx->sk.sk_state != RXRPC_UNCONNECTED)
530 				goto error;
531 			set_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags);
532 			goto success;
533 
534 		case RXRPC_SECURITY_KEY:
535 			ret = -EINVAL;
536 			if (rx->key)
537 				goto error;
538 			ret = -EISCONN;
539 			if (rx->sk.sk_state != RXRPC_UNCONNECTED)
540 				goto error;
541 			ret = rxrpc_request_key(rx, optval, optlen);
542 			goto error;
543 
544 		case RXRPC_SECURITY_KEYRING:
545 			ret = -EINVAL;
546 			if (rx->key)
547 				goto error;
548 			ret = -EISCONN;
549 			if (rx->sk.sk_state != RXRPC_UNCONNECTED)
550 				goto error;
551 			ret = rxrpc_server_keyring(rx, optval, optlen);
552 			goto error;
553 
554 		case RXRPC_MIN_SECURITY_LEVEL:
555 			ret = -EINVAL;
556 			if (optlen != sizeof(unsigned))
557 				goto error;
558 			ret = -EISCONN;
559 			if (rx->sk.sk_state != RXRPC_UNCONNECTED)
560 				goto error;
561 			ret = get_user(min_sec_level,
562 				       (unsigned __user *) optval);
563 			if (ret < 0)
564 				goto error;
565 			ret = -EINVAL;
566 			if (min_sec_level > RXRPC_SECURITY_MAX)
567 				goto error;
568 			rx->min_sec_level = min_sec_level;
569 			goto success;
570 
571 		default:
572 			break;
573 		}
574 	}
575 
576 success:
577 	ret = 0;
578 error:
579 	release_sock(&rx->sk);
580 	return ret;
581 }
582 
583 /*
584  * permit an RxRPC socket to be polled
585  */
586 static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
587 			       poll_table *wait)
588 {
589 	unsigned int mask;
590 	struct sock *sk = sock->sk;
591 
592 	sock_poll_wait(file, sk->sk_sleep, wait);
593 	mask = 0;
594 
595 	/* the socket is readable if there are any messages waiting on the Rx
596 	 * queue */
597 	if (!skb_queue_empty(&sk->sk_receive_queue))
598 		mask |= POLLIN | POLLRDNORM;
599 
600 	/* the socket is writable if there is space to add new data to the
601 	 * socket; there is no guarantee that any particular call in progress
602 	 * on the socket may have space in the Tx ACK window */
603 	if (rxrpc_writable(sk))
604 		mask |= POLLOUT | POLLWRNORM;
605 
606 	return mask;
607 }
608 
609 /*
610  * create an RxRPC socket
611  */
612 static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
613 			int kern)
614 {
615 	struct rxrpc_sock *rx;
616 	struct sock *sk;
617 
618 	_enter("%p,%d", sock, protocol);
619 
620 	if (!net_eq(net, &init_net))
621 		return -EAFNOSUPPORT;
622 
623 	/* we support transport protocol UDP only */
624 	if (protocol != PF_INET)
625 		return -EPROTONOSUPPORT;
626 
627 	if (sock->type != SOCK_DGRAM)
628 		return -ESOCKTNOSUPPORT;
629 
630 	sock->ops = &rxrpc_rpc_ops;
631 	sock->state = SS_UNCONNECTED;
632 
633 	sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto);
634 	if (!sk)
635 		return -ENOMEM;
636 
637 	sock_init_data(sock, sk);
638 	sk->sk_state		= RXRPC_UNCONNECTED;
639 	sk->sk_write_space	= rxrpc_write_space;
640 	sk->sk_max_ack_backlog	= sysctl_rxrpc_max_qlen;
641 	sk->sk_destruct		= rxrpc_sock_destructor;
642 
643 	rx = rxrpc_sk(sk);
644 	rx->proto = protocol;
645 	rx->calls = RB_ROOT;
646 
647 	INIT_LIST_HEAD(&rx->listen_link);
648 	INIT_LIST_HEAD(&rx->secureq);
649 	INIT_LIST_HEAD(&rx->acceptq);
650 	rwlock_init(&rx->call_lock);
651 	memset(&rx->srx, 0, sizeof(rx->srx));
652 
653 	_leave(" = 0 [%p]", rx);
654 	return 0;
655 }
656 
657 /*
658  * RxRPC socket destructor
659  */
660 static void rxrpc_sock_destructor(struct sock *sk)
661 {
662 	_enter("%p", sk);
663 
664 	rxrpc_purge_queue(&sk->sk_receive_queue);
665 
666 	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
667 	WARN_ON(!sk_unhashed(sk));
668 	WARN_ON(sk->sk_socket);
669 
670 	if (!sock_flag(sk, SOCK_DEAD)) {
671 		printk("Attempt to release alive rxrpc socket: %p\n", sk);
672 		return;
673 	}
674 }
675 
676 /*
677  * release an RxRPC socket
678  */
679 static int rxrpc_release_sock(struct sock *sk)
680 {
681 	struct rxrpc_sock *rx = rxrpc_sk(sk);
682 
683 	_enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
684 
685 	/* declare the socket closed for business */
686 	sock_orphan(sk);
687 	sk->sk_shutdown = SHUTDOWN_MASK;
688 
689 	spin_lock_bh(&sk->sk_receive_queue.lock);
690 	sk->sk_state = RXRPC_CLOSE;
691 	spin_unlock_bh(&sk->sk_receive_queue.lock);
692 
693 	ASSERTCMP(rx->listen_link.next, !=, LIST_POISON1);
694 
695 	if (!list_empty(&rx->listen_link)) {
696 		write_lock_bh(&rx->local->services_lock);
697 		list_del(&rx->listen_link);
698 		write_unlock_bh(&rx->local->services_lock);
699 	}
700 
701 	/* try to flush out this socket */
702 	rxrpc_release_calls_on_socket(rx);
703 	flush_workqueue(rxrpc_workqueue);
704 	rxrpc_purge_queue(&sk->sk_receive_queue);
705 
706 	if (rx->conn) {
707 		rxrpc_put_connection(rx->conn);
708 		rx->conn = NULL;
709 	}
710 
711 	if (rx->bundle) {
712 		rxrpc_put_bundle(rx->trans, rx->bundle);
713 		rx->bundle = NULL;
714 	}
715 	if (rx->trans) {
716 		rxrpc_put_transport(rx->trans);
717 		rx->trans = NULL;
718 	}
719 	if (rx->local) {
720 		rxrpc_put_local(rx->local);
721 		rx->local = NULL;
722 	}
723 
724 	key_put(rx->key);
725 	rx->key = NULL;
726 	key_put(rx->securities);
727 	rx->securities = NULL;
728 	sock_put(sk);
729 
730 	_leave(" = 0");
731 	return 0;
732 }
733 
734 /*
735  * release an RxRPC BSD socket on close() or equivalent
736  */
737 static int rxrpc_release(struct socket *sock)
738 {
739 	struct sock *sk = sock->sk;
740 
741 	_enter("%p{%p}", sock, sk);
742 
743 	if (!sk)
744 		return 0;
745 
746 	sock->sk = NULL;
747 
748 	return rxrpc_release_sock(sk);
749 }
750 
751 /*
752  * RxRPC network protocol
753  */
754 static const struct proto_ops rxrpc_rpc_ops = {
755 	.family		= PF_UNIX,
756 	.owner		= THIS_MODULE,
757 	.release	= rxrpc_release,
758 	.bind		= rxrpc_bind,
759 	.connect	= rxrpc_connect,
760 	.socketpair	= sock_no_socketpair,
761 	.accept		= sock_no_accept,
762 	.getname	= sock_no_getname,
763 	.poll		= rxrpc_poll,
764 	.ioctl		= sock_no_ioctl,
765 	.listen		= rxrpc_listen,
766 	.shutdown	= sock_no_shutdown,
767 	.setsockopt	= rxrpc_setsockopt,
768 	.getsockopt	= sock_no_getsockopt,
769 	.sendmsg	= rxrpc_sendmsg,
770 	.recvmsg	= rxrpc_recvmsg,
771 	.mmap		= sock_no_mmap,
772 	.sendpage	= sock_no_sendpage,
773 };
774 
775 static struct proto rxrpc_proto = {
776 	.name		= "RXRPC",
777 	.owner		= THIS_MODULE,
778 	.obj_size	= sizeof(struct rxrpc_sock),
779 	.max_header	= sizeof(struct rxrpc_header),
780 };
781 
782 static const struct net_proto_family rxrpc_family_ops = {
783 	.family	= PF_RXRPC,
784 	.create = rxrpc_create,
785 	.owner	= THIS_MODULE,
786 };
787 
788 /*
789  * initialise and register the RxRPC protocol
790  */
791 static int __init af_rxrpc_init(void)
792 {
793 	struct sk_buff *dummy_skb;
794 	int ret = -1;
795 
796 	BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof(dummy_skb->cb));
797 
798 	rxrpc_epoch = htonl(get_seconds());
799 
800 	ret = -ENOMEM;
801 	rxrpc_call_jar = kmem_cache_create(
802 		"rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
803 		SLAB_HWCACHE_ALIGN, NULL);
804 	if (!rxrpc_call_jar) {
805 		printk(KERN_NOTICE "RxRPC: Failed to allocate call jar\n");
806 		goto error_call_jar;
807 	}
808 
809 	rxrpc_workqueue = create_workqueue("krxrpcd");
810 	if (!rxrpc_workqueue) {
811 		printk(KERN_NOTICE "RxRPC: Failed to allocate work queue\n");
812 		goto error_work_queue;
813 	}
814 
815 	ret = proto_register(&rxrpc_proto, 1);
816 	if (ret < 0) {
817 		printk(KERN_CRIT "RxRPC: Cannot register protocol\n");
818 		goto error_proto;
819 	}
820 
821 	ret = sock_register(&rxrpc_family_ops);
822 	if (ret < 0) {
823 		printk(KERN_CRIT "RxRPC: Cannot register socket family\n");
824 		goto error_sock;
825 	}
826 
827 	ret = register_key_type(&key_type_rxrpc);
828 	if (ret < 0) {
829 		printk(KERN_CRIT "RxRPC: Cannot register client key type\n");
830 		goto error_key_type;
831 	}
832 
833 	ret = register_key_type(&key_type_rxrpc_s);
834 	if (ret < 0) {
835 		printk(KERN_CRIT "RxRPC: Cannot register server key type\n");
836 		goto error_key_type_s;
837 	}
838 
839 #ifdef CONFIG_PROC_FS
840 	proc_net_fops_create(&init_net, "rxrpc_calls", 0, &rxrpc_call_seq_fops);
841 	proc_net_fops_create(&init_net, "rxrpc_conns", 0, &rxrpc_connection_seq_fops);
842 #endif
843 	return 0;
844 
845 error_key_type_s:
846 	unregister_key_type(&key_type_rxrpc);
847 error_key_type:
848 	sock_unregister(PF_RXRPC);
849 error_sock:
850 	proto_unregister(&rxrpc_proto);
851 error_proto:
852 	destroy_workqueue(rxrpc_workqueue);
853 error_work_queue:
854 	kmem_cache_destroy(rxrpc_call_jar);
855 error_call_jar:
856 	return ret;
857 }
858 
859 /*
860  * unregister the RxRPC protocol
861  */
862 static void __exit af_rxrpc_exit(void)
863 {
864 	_enter("");
865 	unregister_key_type(&key_type_rxrpc_s);
866 	unregister_key_type(&key_type_rxrpc);
867 	sock_unregister(PF_RXRPC);
868 	proto_unregister(&rxrpc_proto);
869 	rxrpc_destroy_all_calls();
870 	rxrpc_destroy_all_connections();
871 	rxrpc_destroy_all_transports();
872 	rxrpc_destroy_all_peers();
873 	rxrpc_destroy_all_locals();
874 
875 	ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0);
876 
877 	_debug("flush scheduled work");
878 	flush_workqueue(rxrpc_workqueue);
879 	proc_net_remove(&init_net, "rxrpc_conns");
880 	proc_net_remove(&init_net, "rxrpc_calls");
881 	destroy_workqueue(rxrpc_workqueue);
882 	kmem_cache_destroy(rxrpc_call_jar);
883 	_leave("");
884 }
885 
886 module_init(af_rxrpc_init);
887 module_exit(af_rxrpc_exit);
888