xref: /linux/net/rxrpc/peer_object.c (revision cd2a9e62c8a3c5cae7691982667d79a0edc65283)
1 /* RxRPC remote transport endpoint management
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/udp.h>
18 #include <linux/in.h>
19 #include <linux/in6.h>
20 #include <linux/icmp.h>
21 #include <linux/slab.h>
22 #include <net/sock.h>
23 #include <net/af_rxrpc.h>
24 #include <net/ip.h>
25 #include <net/route.h>
26 #include "ar-internal.h"
27 
28 static LIST_HEAD(rxrpc_peers);
29 static DEFINE_RWLOCK(rxrpc_peer_lock);
30 static DECLARE_WAIT_QUEUE_HEAD(rxrpc_peer_wq);
31 
32 static void rxrpc_destroy_peer(struct work_struct *work);
33 
34 /*
35  * assess the MTU size for the network interface through which this peer is
36  * reached
37  */
38 static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
39 {
40 	struct rtable *rt;
41 	struct flowi4 fl4;
42 
43 	peer->if_mtu = 1500;
44 
45 	rt = ip_route_output_ports(&init_net, &fl4, NULL,
46 				   peer->srx.transport.sin.sin_addr.s_addr, 0,
47 				   htons(7000), htons(7001),
48 				   IPPROTO_UDP, 0, 0);
49 	if (IS_ERR(rt)) {
50 		_leave(" [route err %ld]", PTR_ERR(rt));
51 		return;
52 	}
53 
54 	peer->if_mtu = dst_mtu(&rt->dst);
55 	dst_release(&rt->dst);
56 
57 	_leave(" [if_mtu %u]", peer->if_mtu);
58 }
59 
60 /*
61  * allocate a new peer
62  */
63 static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
64 					   gfp_t gfp)
65 {
66 	struct rxrpc_peer *peer;
67 
68 	_enter("");
69 
70 	peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
71 	if (peer) {
72 		INIT_WORK(&peer->destroyer, &rxrpc_destroy_peer);
73 		INIT_LIST_HEAD(&peer->link);
74 		INIT_LIST_HEAD(&peer->error_targets);
75 		spin_lock_init(&peer->lock);
76 		atomic_set(&peer->usage, 1);
77 		peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
78 		memcpy(&peer->srx, srx, sizeof(*srx));
79 
80 		rxrpc_assess_MTU_size(peer);
81 		peer->mtu = peer->if_mtu;
82 
83 		if (srx->transport.family == AF_INET) {
84 			peer->hdrsize = sizeof(struct iphdr);
85 			switch (srx->transport_type) {
86 			case SOCK_DGRAM:
87 				peer->hdrsize += sizeof(struct udphdr);
88 				break;
89 			default:
90 				BUG();
91 				break;
92 			}
93 		} else {
94 			BUG();
95 		}
96 
97 		peer->hdrsize += sizeof(struct rxrpc_wire_header);
98 		peer->maxdata = peer->mtu - peer->hdrsize;
99 	}
100 
101 	_leave(" = %p", peer);
102 	return peer;
103 }
104 
105 /*
106  * obtain a remote transport endpoint for the specified address
107  */
108 struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *srx, gfp_t gfp)
109 {
110 	struct rxrpc_peer *peer, *candidate;
111 	const char *new = "old";
112 	int usage;
113 
114 	_enter("{%d,%d,%pI4+%hu}",
115 	       srx->transport_type,
116 	       srx->transport_len,
117 	       &srx->transport.sin.sin_addr,
118 	       ntohs(srx->transport.sin.sin_port));
119 
120 	/* search the peer list first */
121 	read_lock_bh(&rxrpc_peer_lock);
122 	list_for_each_entry(peer, &rxrpc_peers, link) {
123 		_debug("check PEER %d { u=%d t=%d l=%d }",
124 		       peer->debug_id,
125 		       atomic_read(&peer->usage),
126 		       peer->srx.transport_type,
127 		       peer->srx.transport_len);
128 
129 		if (atomic_read(&peer->usage) > 0 &&
130 		    peer->srx.transport_type == srx->transport_type &&
131 		    peer->srx.transport_len == srx->transport_len &&
132 		    memcmp(&peer->srx.transport,
133 			   &srx->transport,
134 			   srx->transport_len) == 0)
135 			goto found_extant_peer;
136 	}
137 	read_unlock_bh(&rxrpc_peer_lock);
138 
139 	/* not yet present - create a candidate for a new record and then
140 	 * redo the search */
141 	candidate = rxrpc_alloc_peer(srx, gfp);
142 	if (!candidate) {
143 		_leave(" = -ENOMEM");
144 		return ERR_PTR(-ENOMEM);
145 	}
146 
147 	write_lock_bh(&rxrpc_peer_lock);
148 
149 	list_for_each_entry(peer, &rxrpc_peers, link) {
150 		if (atomic_read(&peer->usage) > 0 &&
151 		    peer->srx.transport_type == srx->transport_type &&
152 		    peer->srx.transport_len == srx->transport_len &&
153 		    memcmp(&peer->srx.transport,
154 			   &srx->transport,
155 			   srx->transport_len) == 0)
156 			goto found_extant_second;
157 	}
158 
159 	/* we can now add the new candidate to the list */
160 	peer = candidate;
161 	candidate = NULL;
162 	usage = atomic_read(&peer->usage);
163 
164 	list_add_tail(&peer->link, &rxrpc_peers);
165 	write_unlock_bh(&rxrpc_peer_lock);
166 	new = "new";
167 
168 success:
169 	_net("PEER %s %d {%d,%u,%pI4+%hu}",
170 	     new,
171 	     peer->debug_id,
172 	     peer->srx.transport_type,
173 	     peer->srx.transport.family,
174 	     &peer->srx.transport.sin.sin_addr,
175 	     ntohs(peer->srx.transport.sin.sin_port));
176 
177 	_leave(" = %p {u=%d}", peer, usage);
178 	return peer;
179 
180 	/* we found the peer in the list immediately */
181 found_extant_peer:
182 	usage = atomic_inc_return(&peer->usage);
183 	read_unlock_bh(&rxrpc_peer_lock);
184 	goto success;
185 
186 	/* we found the peer on the second time through the list */
187 found_extant_second:
188 	usage = atomic_inc_return(&peer->usage);
189 	write_unlock_bh(&rxrpc_peer_lock);
190 	kfree(candidate);
191 	goto success;
192 }
193 
194 /*
195  * find the peer associated with a packet
196  */
197 struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *local,
198 				   __be32 addr, __be16 port)
199 {
200 	struct rxrpc_peer *peer;
201 
202 	_enter("");
203 
204 	/* search the peer list */
205 	read_lock_bh(&rxrpc_peer_lock);
206 
207 	if (local->srx.transport.family == AF_INET &&
208 	    local->srx.transport_type == SOCK_DGRAM
209 	    ) {
210 		list_for_each_entry(peer, &rxrpc_peers, link) {
211 			if (atomic_read(&peer->usage) > 0 &&
212 			    peer->srx.transport_type == SOCK_DGRAM &&
213 			    peer->srx.transport.family == AF_INET &&
214 			    peer->srx.transport.sin.sin_port == port &&
215 			    peer->srx.transport.sin.sin_addr.s_addr == addr)
216 				goto found_UDP_peer;
217 		}
218 
219 		goto new_UDP_peer;
220 	}
221 
222 	read_unlock_bh(&rxrpc_peer_lock);
223 	_leave(" = -EAFNOSUPPORT");
224 	return ERR_PTR(-EAFNOSUPPORT);
225 
226 found_UDP_peer:
227 	_net("Rx UDP DGRAM from peer %d", peer->debug_id);
228 	atomic_inc(&peer->usage);
229 	read_unlock_bh(&rxrpc_peer_lock);
230 	_leave(" = %p", peer);
231 	return peer;
232 
233 new_UDP_peer:
234 	_net("Rx UDP DGRAM from NEW peer");
235 	read_unlock_bh(&rxrpc_peer_lock);
236 	_leave(" = -EBUSY [new]");
237 	return ERR_PTR(-EBUSY);
238 }
239 
240 /*
241  * release a remote transport endpoint
242  */
243 void rxrpc_put_peer(struct rxrpc_peer *peer)
244 {
245 	_enter("%p{u=%d}", peer, atomic_read(&peer->usage));
246 
247 	ASSERTCMP(atomic_read(&peer->usage), >, 0);
248 
249 	if (likely(!atomic_dec_and_test(&peer->usage))) {
250 		_leave(" [in use]");
251 		return;
252 	}
253 
254 	rxrpc_queue_work(&peer->destroyer);
255 	_leave("");
256 }
257 
258 /*
259  * destroy a remote transport endpoint
260  */
261 static void rxrpc_destroy_peer(struct work_struct *work)
262 {
263 	struct rxrpc_peer *peer =
264 		container_of(work, struct rxrpc_peer, destroyer);
265 
266 	_enter("%p{%d}", peer, atomic_read(&peer->usage));
267 
268 	write_lock_bh(&rxrpc_peer_lock);
269 	list_del(&peer->link);
270 	write_unlock_bh(&rxrpc_peer_lock);
271 
272 	_net("DESTROY PEER %d", peer->debug_id);
273 	kfree(peer);
274 
275 	if (list_empty(&rxrpc_peers))
276 		wake_up_all(&rxrpc_peer_wq);
277 	_leave("");
278 }
279 
280 /*
281  * preemptively destroy all the peer records from a transport endpoint rather
282  * than waiting for them to time out
283  */
284 void __exit rxrpc_destroy_all_peers(void)
285 {
286 	DECLARE_WAITQUEUE(myself,current);
287 
288 	_enter("");
289 
290 	/* we simply have to wait for them to go away */
291 	if (!list_empty(&rxrpc_peers)) {
292 		set_current_state(TASK_UNINTERRUPTIBLE);
293 		add_wait_queue(&rxrpc_peer_wq, &myself);
294 
295 		while (!list_empty(&rxrpc_peers)) {
296 			schedule();
297 			set_current_state(TASK_UNINTERRUPTIBLE);
298 		}
299 
300 		remove_wait_queue(&rxrpc_peer_wq, &myself);
301 		set_current_state(TASK_RUNNING);
302 	}
303 
304 	_leave("");
305 }
306