xref: /linux/net/rxrpc/peer_event.c (revision b7d3826c2ed6c3e626e7ae796c5df2c0d2551c6a)
1 /* Peer event handling, typically ICMP messages.
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/net.h>
14 #include <linux/skbuff.h>
15 #include <linux/errqueue.h>
16 #include <linux/udp.h>
17 #include <linux/in.h>
18 #include <linux/in6.h>
19 #include <linux/icmp.h>
20 #include <net/sock.h>
21 #include <net/af_rxrpc.h>
22 #include <net/ip.h>
23 #include "ar-internal.h"
24 
25 static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
26 static void rxrpc_distribute_error(struct rxrpc_peer *, int,
27 				   enum rxrpc_call_completion);
28 
29 /*
30  * Find the peer associated with an ICMP packet.
31  */
32 static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
33 						     const struct sk_buff *skb,
34 						     struct sockaddr_rxrpc *srx)
35 {
36 	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
37 
38 	_enter("");
39 
40 	memset(srx, 0, sizeof(*srx));
41 	srx->transport_type = local->srx.transport_type;
42 	srx->transport_len = local->srx.transport_len;
43 	srx->transport.family = local->srx.transport.family;
44 
45 	/* Can we see an ICMP4 packet on an ICMP6 listening socket?  and vice
46 	 * versa?
47 	 */
48 	switch (srx->transport.family) {
49 	case AF_INET:
50 		srx->transport_len = sizeof(srx->transport.sin);
51 		srx->transport.family = AF_INET;
52 		srx->transport.sin.sin_port = serr->port;
53 		switch (serr->ee.ee_origin) {
54 		case SO_EE_ORIGIN_ICMP:
55 			_net("Rx ICMP");
56 			memcpy(&srx->transport.sin.sin_addr,
57 			       skb_network_header(skb) + serr->addr_offset,
58 			       sizeof(struct in_addr));
59 			break;
60 		case SO_EE_ORIGIN_ICMP6:
61 			_net("Rx ICMP6 on v4 sock");
62 			memcpy(&srx->transport.sin.sin_addr,
63 			       skb_network_header(skb) + serr->addr_offset + 12,
64 			       sizeof(struct in_addr));
65 			break;
66 		default:
67 			memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr,
68 			       sizeof(struct in_addr));
69 			break;
70 		}
71 		break;
72 
73 #ifdef CONFIG_AF_RXRPC_IPV6
74 	case AF_INET6:
75 		switch (serr->ee.ee_origin) {
76 		case SO_EE_ORIGIN_ICMP6:
77 			_net("Rx ICMP6");
78 			srx->transport.sin6.sin6_port = serr->port;
79 			memcpy(&srx->transport.sin6.sin6_addr,
80 			       skb_network_header(skb) + serr->addr_offset,
81 			       sizeof(struct in6_addr));
82 			break;
83 		case SO_EE_ORIGIN_ICMP:
84 			_net("Rx ICMP on v6 sock");
85 			srx->transport_len = sizeof(srx->transport.sin);
86 			srx->transport.family = AF_INET;
87 			srx->transport.sin.sin_port = serr->port;
88 			memcpy(&srx->transport.sin.sin_addr,
89 			       skb_network_header(skb) + serr->addr_offset,
90 			       sizeof(struct in_addr));
91 			break;
92 		default:
93 			memcpy(&srx->transport.sin6.sin6_addr,
94 			       &ipv6_hdr(skb)->saddr,
95 			       sizeof(struct in6_addr));
96 			break;
97 		}
98 		break;
99 #endif
100 
101 	default:
102 		BUG();
103 	}
104 
105 	return rxrpc_lookup_peer_rcu(local, srx);
106 }
107 
108 /*
109  * Handle an MTU/fragmentation problem.
110  */
111 static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr)
112 {
113 	u32 mtu = serr->ee.ee_info;
114 
115 	_net("Rx ICMP Fragmentation Needed (%d)", mtu);
116 
117 	/* wind down the local interface MTU */
118 	if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
119 		peer->if_mtu = mtu;
120 		_net("I/F MTU %u", mtu);
121 	}
122 
123 	if (mtu == 0) {
124 		/* they didn't give us a size, estimate one */
125 		mtu = peer->if_mtu;
126 		if (mtu > 1500) {
127 			mtu >>= 1;
128 			if (mtu < 1500)
129 				mtu = 1500;
130 		} else {
131 			mtu -= 100;
132 			if (mtu < peer->hdrsize)
133 				mtu = peer->hdrsize + 4;
134 		}
135 	}
136 
137 	if (mtu < peer->mtu) {
138 		spin_lock_bh(&peer->lock);
139 		peer->mtu = mtu;
140 		peer->maxdata = peer->mtu - peer->hdrsize;
141 		spin_unlock_bh(&peer->lock);
142 		_net("Net MTU %u (maxdata %u)",
143 		     peer->mtu, peer->maxdata);
144 	}
145 }
146 
147 /*
148  * Handle an error received on the local endpoint.
149  */
150 void rxrpc_error_report(struct sock *sk)
151 {
152 	struct sock_exterr_skb *serr;
153 	struct sockaddr_rxrpc srx;
154 	struct rxrpc_local *local = sk->sk_user_data;
155 	struct rxrpc_peer *peer;
156 	struct sk_buff *skb;
157 
158 	_enter("%p{%d}", sk, local->debug_id);
159 
160 	skb = sock_dequeue_err_skb(sk);
161 	if (!skb) {
162 		_leave("UDP socket errqueue empty");
163 		return;
164 	}
165 	rxrpc_new_skb(skb, rxrpc_skb_rx_received);
166 	serr = SKB_EXT_ERR(skb);
167 	if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
168 		_leave("UDP empty message");
169 		rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
170 		return;
171 	}
172 
173 	rcu_read_lock();
174 	peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
175 	if (peer && !rxrpc_get_peer_maybe(peer))
176 		peer = NULL;
177 	if (!peer) {
178 		rcu_read_unlock();
179 		rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
180 		_leave(" [no peer]");
181 		return;
182 	}
183 
184 	trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
185 
186 	if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
187 	     serr->ee.ee_type == ICMP_DEST_UNREACH &&
188 	     serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
189 		rxrpc_adjust_mtu(peer, serr);
190 		rcu_read_unlock();
191 		rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
192 		rxrpc_put_peer(peer);
193 		_leave(" [MTU update]");
194 		return;
195 	}
196 
197 	rxrpc_store_error(peer, serr);
198 	rcu_read_unlock();
199 	rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
200 
201 	_leave("");
202 }
203 
204 /*
205  * Map an error report to error codes on the peer record.
206  */
207 static void rxrpc_store_error(struct rxrpc_peer *peer,
208 			      struct sock_exterr_skb *serr)
209 {
210 	enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
211 	struct sock_extended_err *ee;
212 	int err;
213 
214 	_enter("");
215 
216 	ee = &serr->ee;
217 
218 	err = ee->ee_errno;
219 
220 	switch (ee->ee_origin) {
221 	case SO_EE_ORIGIN_ICMP:
222 		switch (ee->ee_type) {
223 		case ICMP_DEST_UNREACH:
224 			switch (ee->ee_code) {
225 			case ICMP_NET_UNREACH:
226 				_net("Rx Received ICMP Network Unreachable");
227 				break;
228 			case ICMP_HOST_UNREACH:
229 				_net("Rx Received ICMP Host Unreachable");
230 				break;
231 			case ICMP_PORT_UNREACH:
232 				_net("Rx Received ICMP Port Unreachable");
233 				break;
234 			case ICMP_NET_UNKNOWN:
235 				_net("Rx Received ICMP Unknown Network");
236 				break;
237 			case ICMP_HOST_UNKNOWN:
238 				_net("Rx Received ICMP Unknown Host");
239 				break;
240 			default:
241 				_net("Rx Received ICMP DestUnreach code=%u",
242 				     ee->ee_code);
243 				break;
244 			}
245 			break;
246 
247 		case ICMP_TIME_EXCEEDED:
248 			_net("Rx Received ICMP TTL Exceeded");
249 			break;
250 
251 		default:
252 			_proto("Rx Received ICMP error { type=%u code=%u }",
253 			       ee->ee_type, ee->ee_code);
254 			break;
255 		}
256 		break;
257 
258 	case SO_EE_ORIGIN_NONE:
259 	case SO_EE_ORIGIN_LOCAL:
260 		_proto("Rx Received local error { error=%d }", err);
261 		compl = RXRPC_CALL_LOCAL_ERROR;
262 		break;
263 
264 	case SO_EE_ORIGIN_ICMP6:
265 	default:
266 		_proto("Rx Received error report { orig=%u }", ee->ee_origin);
267 		break;
268 	}
269 
270 	rxrpc_distribute_error(peer, err, compl);
271 }
272 
273 /*
274  * Distribute an error that occurred on a peer.
275  */
276 static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
277 				   enum rxrpc_call_completion compl)
278 {
279 	struct rxrpc_call *call;
280 
281 	hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
282 		rxrpc_see_call(call);
283 		if (call->state < RXRPC_CALL_COMPLETE &&
284 		    rxrpc_set_call_completion(call, compl, 0, -error))
285 			rxrpc_notify_socket(call);
286 	}
287 }
288 
289 /*
290  * Add RTT information to cache.  This is called in softirq mode and has
291  * exclusive access to the peer RTT data.
292  */
293 void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
294 			rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
295 			ktime_t send_time, ktime_t resp_time)
296 {
297 	struct rxrpc_peer *peer = call->peer;
298 	s64 rtt;
299 	u64 sum = peer->rtt_sum, avg;
300 	u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage;
301 
302 	rtt = ktime_to_ns(ktime_sub(resp_time, send_time));
303 	if (rtt < 0)
304 		return;
305 
306 	/* Replace the oldest datum in the RTT buffer */
307 	sum -= peer->rtt_cache[cursor];
308 	sum += rtt;
309 	peer->rtt_cache[cursor] = rtt;
310 	peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1);
311 	peer->rtt_sum = sum;
312 	if (usage < RXRPC_RTT_CACHE_SIZE) {
313 		usage++;
314 		peer->rtt_usage = usage;
315 	}
316 
317 	/* Now recalculate the average */
318 	if (usage == RXRPC_RTT_CACHE_SIZE) {
319 		avg = sum / RXRPC_RTT_CACHE_SIZE;
320 	} else {
321 		avg = sum;
322 		do_div(avg, usage);
323 	}
324 
325 	peer->rtt = avg;
326 	trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
327 			   usage, avg);
328 }
329 
330 /*
331  * Perform keep-alive pings.
332  */
333 static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
334 					  struct list_head *collector,
335 					  time64_t base,
336 					  u8 cursor)
337 {
338 	struct rxrpc_peer *peer;
339 	const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
340 	time64_t keepalive_at;
341 	int slot;
342 
343 	spin_lock_bh(&rxnet->peer_hash_lock);
344 
345 	while (!list_empty(collector)) {
346 		peer = list_entry(collector->next,
347 				  struct rxrpc_peer, keepalive_link);
348 
349 		list_del_init(&peer->keepalive_link);
350 		if (!rxrpc_get_peer_maybe(peer))
351 			continue;
352 
353 		spin_unlock_bh(&rxnet->peer_hash_lock);
354 
355 		keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
356 		slot = keepalive_at - base;
357 		_debug("%02x peer %u t=%d {%pISp}",
358 		       cursor, peer->debug_id, slot, &peer->srx.transport);
359 
360 		if (keepalive_at <= base ||
361 		    keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
362 			rxrpc_send_keepalive(peer);
363 			slot = RXRPC_KEEPALIVE_TIME;
364 		}
365 
366 		/* A transmission to this peer occurred since last we examined
367 		 * it so put it into the appropriate future bucket.
368 		 */
369 		slot += cursor;
370 		slot &= mask;
371 		spin_lock_bh(&rxnet->peer_hash_lock);
372 		list_add_tail(&peer->keepalive_link,
373 			      &rxnet->peer_keepalive[slot & mask]);
374 		rxrpc_put_peer(peer);
375 	}
376 
377 	spin_unlock_bh(&rxnet->peer_hash_lock);
378 }
379 
380 /*
381  * Perform keep-alive pings with VERSION packets to keep any NAT alive.
382  */
383 void rxrpc_peer_keepalive_worker(struct work_struct *work)
384 {
385 	struct rxrpc_net *rxnet =
386 		container_of(work, struct rxrpc_net, peer_keepalive_work);
387 	const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
388 	time64_t base, now, delay;
389 	u8 cursor, stop;
390 	LIST_HEAD(collector);
391 
392 	now = ktime_get_seconds();
393 	base = rxnet->peer_keepalive_base;
394 	cursor = rxnet->peer_keepalive_cursor;
395 	_enter("%lld,%u", base - now, cursor);
396 
397 	if (!rxnet->live)
398 		return;
399 
400 	/* Remove to a temporary list all the peers that are currently lodged
401 	 * in expired buckets plus all new peers.
402 	 *
403 	 * Everything in the bucket at the cursor is processed this
404 	 * second; the bucket at cursor + 1 goes at now + 1s and so
405 	 * on...
406 	 */
407 	spin_lock_bh(&rxnet->peer_hash_lock);
408 	list_splice_init(&rxnet->peer_keepalive_new, &collector);
409 
410 	stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
411 	while (base <= now && (s8)(cursor - stop) < 0) {
412 		list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
413 				      &collector);
414 		base++;
415 		cursor++;
416 	}
417 
418 	base = now;
419 	spin_unlock_bh(&rxnet->peer_hash_lock);
420 
421 	rxnet->peer_keepalive_base = base;
422 	rxnet->peer_keepalive_cursor = cursor;
423 	rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
424 	ASSERT(list_empty(&collector));
425 
426 	/* Schedule the timer for the next occupied timeslot. */
427 	cursor = rxnet->peer_keepalive_cursor;
428 	stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
429 	for (; (s8)(cursor - stop) < 0; cursor++) {
430 		if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
431 			break;
432 		base++;
433 	}
434 
435 	now = ktime_get_seconds();
436 	delay = base - now;
437 	if (delay < 1)
438 		delay = 1;
439 	delay *= HZ;
440 	if (rxnet->live)
441 		timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
442 
443 	_leave("");
444 }
445