xref: /linux/net/rxrpc/peer_event.c (revision 38c6104e0bc7c8af20ab4897cb0504e3339e4fe4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Peer event handling, typically ICMP messages.
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/module.h>
9 #include <linux/net.h>
10 #include <linux/skbuff.h>
11 #include <linux/errqueue.h>
12 #include <linux/udp.h>
13 #include <linux/in.h>
14 #include <linux/in6.h>
15 #include <linux/icmp.h>
16 #include <net/sock.h>
17 #include <net/af_rxrpc.h>
18 #include <net/ip.h>
19 #include "ar-internal.h"
20 
21 static void rxrpc_store_error(struct rxrpc_peer *, struct sk_buff *);
22 static void rxrpc_distribute_error(struct rxrpc_peer *, struct sk_buff *,
23 				   enum rxrpc_call_completion, int);
24 
25 /*
26  * Find the peer associated with a local error.
27  */
28 static struct rxrpc_peer *rxrpc_lookup_peer_local_rcu(struct rxrpc_local *local,
29 						      const struct sk_buff *skb,
30 						      struct sockaddr_rxrpc *srx)
31 {
32 	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
33 
34 	_enter("");
35 
36 	memset(srx, 0, sizeof(*srx));
37 	srx->transport_type = local->srx.transport_type;
38 	srx->transport_len = local->srx.transport_len;
39 	srx->transport.family = local->srx.transport.family;
40 
41 	/* Can we see an ICMP4 packet on an ICMP6 listening socket?  and vice
42 	 * versa?
43 	 */
44 	switch (srx->transport.family) {
45 	case AF_INET:
46 		srx->transport_len = sizeof(srx->transport.sin);
47 		srx->transport.family = AF_INET;
48 		srx->transport.sin.sin_port = serr->port;
49 		switch (serr->ee.ee_origin) {
50 		case SO_EE_ORIGIN_ICMP:
51 			memcpy(&srx->transport.sin.sin_addr,
52 			       skb_network_header(skb) + serr->addr_offset,
53 			       sizeof(struct in_addr));
54 			break;
55 		case SO_EE_ORIGIN_ICMP6:
56 			memcpy(&srx->transport.sin.sin_addr,
57 			       skb_network_header(skb) + serr->addr_offset + 12,
58 			       sizeof(struct in_addr));
59 			break;
60 		default:
61 			memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr,
62 			       sizeof(struct in_addr));
63 			break;
64 		}
65 		break;
66 
67 #ifdef CONFIG_AF_RXRPC_IPV6
68 	case AF_INET6:
69 		switch (serr->ee.ee_origin) {
70 		case SO_EE_ORIGIN_ICMP6:
71 			srx->transport.sin6.sin6_port = serr->port;
72 			memcpy(&srx->transport.sin6.sin6_addr,
73 			       skb_network_header(skb) + serr->addr_offset,
74 			       sizeof(struct in6_addr));
75 			break;
76 		case SO_EE_ORIGIN_ICMP:
77 			srx->transport_len = sizeof(srx->transport.sin);
78 			srx->transport.family = AF_INET;
79 			srx->transport.sin.sin_port = serr->port;
80 			memcpy(&srx->transport.sin.sin_addr,
81 			       skb_network_header(skb) + serr->addr_offset,
82 			       sizeof(struct in_addr));
83 			break;
84 		default:
85 			memcpy(&srx->transport.sin6.sin6_addr,
86 			       &ipv6_hdr(skb)->saddr,
87 			       sizeof(struct in6_addr));
88 			break;
89 		}
90 		break;
91 #endif
92 
93 	default:
94 		BUG();
95 	}
96 
97 	return rxrpc_lookup_peer_rcu(local, srx);
98 }
99 
100 /*
101  * Handle an MTU/fragmentation problem.
102  */
103 static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu)
104 {
105 	unsigned int max_data;
106 
107 	/* wind down the local interface MTU */
108 	if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu)
109 		peer->if_mtu = mtu;
110 
111 	if (mtu == 0) {
112 		/* they didn't give us a size, estimate one */
113 		mtu = peer->if_mtu;
114 		if (mtu > 1500) {
115 			mtu >>= 1;
116 			if (mtu < 1500)
117 				mtu = 1500;
118 		} else {
119 			mtu -= 100;
120 			if (mtu < peer->hdrsize)
121 				mtu = peer->hdrsize + 4;
122 		}
123 	}
124 
125 	max_data = max_t(int, mtu - peer->hdrsize, 500);
126 	if (max_data < peer->max_data) {
127 		if (peer->pmtud_good > max_data)
128 			peer->pmtud_good = max_data;
129 		if (peer->pmtud_bad > max_data + 1)
130 			peer->pmtud_bad = max_data + 1;
131 
132 		trace_rxrpc_pmtud_reduce(peer, 0, max_data, rxrpc_pmtud_reduce_icmp);
133 		write_seqcount_begin(&peer->mtu_lock);
134 		peer->max_data = max_data;
135 		write_seqcount_end(&peer->mtu_lock);
136 	}
137 }
138 
139 /*
140  * Handle an error received on the local endpoint.
141  */
142 void rxrpc_input_error(struct rxrpc_local *local, struct sk_buff *skb)
143 {
144 	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
145 	struct sockaddr_rxrpc srx;
146 	struct rxrpc_peer *peer = NULL;
147 
148 	_enter("L=%x", local->debug_id);
149 
150 	if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
151 		_leave("UDP empty message");
152 		return;
153 	}
154 
155 	rcu_read_lock();
156 	peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx);
157 	if (peer && !rxrpc_get_peer_maybe(peer, rxrpc_peer_get_input_error))
158 		peer = NULL;
159 	rcu_read_unlock();
160 	if (!peer)
161 		return;
162 
163 	trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
164 
165 	if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
166 	     serr->ee.ee_type == ICMP_DEST_UNREACH &&
167 	     serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
168 		rxrpc_adjust_mtu(peer, serr->ee.ee_info);
169 		goto out;
170 	}
171 
172 	if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6 &&
173 	     serr->ee.ee_type == ICMPV6_PKT_TOOBIG &&
174 	     serr->ee.ee_code == 0)) {
175 		rxrpc_adjust_mtu(peer, serr->ee.ee_info);
176 		goto out;
177 	}
178 
179 	rxrpc_store_error(peer, skb);
180 out:
181 	rxrpc_put_peer(peer, rxrpc_peer_put_input_error);
182 }
183 
184 /*
185  * Map an error report to error codes on the peer record.
186  */
187 static void rxrpc_store_error(struct rxrpc_peer *peer, struct sk_buff *skb)
188 {
189 	enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
190 	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
191 	struct sock_extended_err *ee = &serr->ee;
192 	int err = ee->ee_errno;
193 
194 	_enter("");
195 
196 	switch (ee->ee_origin) {
197 	case SO_EE_ORIGIN_NONE:
198 	case SO_EE_ORIGIN_LOCAL:
199 		compl = RXRPC_CALL_LOCAL_ERROR;
200 		break;
201 
202 	case SO_EE_ORIGIN_ICMP6:
203 		if (err == EACCES)
204 			err = EHOSTUNREACH;
205 		fallthrough;
206 	case SO_EE_ORIGIN_ICMP:
207 	default:
208 		break;
209 	}
210 
211 	rxrpc_distribute_error(peer, skb, compl, err);
212 }
213 
214 /*
215  * Distribute an error that occurred on a peer.
216  */
217 static void rxrpc_distribute_error(struct rxrpc_peer *peer, struct sk_buff *skb,
218 				   enum rxrpc_call_completion compl, int err)
219 {
220 	struct rxrpc_call *call;
221 	HLIST_HEAD(error_targets);
222 
223 	spin_lock_irq(&peer->lock);
224 	hlist_move_list(&peer->error_targets, &error_targets);
225 
226 	while (!hlist_empty(&error_targets)) {
227 		call = hlist_entry(error_targets.first,
228 				   struct rxrpc_call, error_link);
229 		hlist_del_init(&call->error_link);
230 		spin_unlock_irq(&peer->lock);
231 
232 		rxrpc_see_call(call, rxrpc_call_see_distribute_error);
233 		rxrpc_set_call_completion(call, compl, 0, -err);
234 		rxrpc_input_call_event(call);
235 
236 		spin_lock_irq(&peer->lock);
237 	}
238 
239 	spin_unlock_irq(&peer->lock);
240 }
241 
242 /*
243  * Perform keep-alive pings.
244  */
245 static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
246 					  struct list_head *collector,
247 					  time64_t base,
248 					  u8 cursor)
249 {
250 	struct rxrpc_peer *peer;
251 	const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
252 	time64_t keepalive_at;
253 	bool use;
254 	int slot;
255 
256 	spin_lock_bh(&rxnet->peer_hash_lock);
257 
258 	while (!list_empty(collector)) {
259 		peer = list_entry(collector->next,
260 				  struct rxrpc_peer, keepalive_link);
261 
262 		list_del_init(&peer->keepalive_link);
263 		if (!rxrpc_get_peer_maybe(peer, rxrpc_peer_get_keepalive))
264 			continue;
265 
266 		use = __rxrpc_use_local(peer->local, rxrpc_local_use_peer_keepalive);
267 		spin_unlock_bh(&rxnet->peer_hash_lock);
268 
269 		if (use) {
270 			keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
271 			slot = keepalive_at - base;
272 			_debug("%02x peer %u t=%d {%pISp}",
273 			       cursor, peer->debug_id, slot, &peer->srx.transport);
274 
275 			if (keepalive_at <= base ||
276 			    keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
277 				rxrpc_send_keepalive(peer);
278 				slot = RXRPC_KEEPALIVE_TIME;
279 			}
280 
281 			/* A transmission to this peer occurred since last we
282 			 * examined it so put it into the appropriate future
283 			 * bucket.
284 			 */
285 			slot += cursor;
286 			slot &= mask;
287 			spin_lock_bh(&rxnet->peer_hash_lock);
288 			list_add_tail(&peer->keepalive_link,
289 				      &rxnet->peer_keepalive[slot & mask]);
290 			spin_unlock_bh(&rxnet->peer_hash_lock);
291 			rxrpc_unuse_local(peer->local, rxrpc_local_unuse_peer_keepalive);
292 		}
293 		rxrpc_put_peer(peer, rxrpc_peer_put_keepalive);
294 		spin_lock_bh(&rxnet->peer_hash_lock);
295 	}
296 
297 	spin_unlock_bh(&rxnet->peer_hash_lock);
298 }
299 
300 /*
301  * Perform keep-alive pings with VERSION packets to keep any NAT alive.
302  */
303 void rxrpc_peer_keepalive_worker(struct work_struct *work)
304 {
305 	struct rxrpc_net *rxnet =
306 		container_of(work, struct rxrpc_net, peer_keepalive_work);
307 	const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
308 	time64_t base, now, delay;
309 	u8 cursor, stop;
310 	LIST_HEAD(collector);
311 
312 	now = ktime_get_seconds();
313 	base = rxnet->peer_keepalive_base;
314 	cursor = rxnet->peer_keepalive_cursor;
315 	_enter("%lld,%u", base - now, cursor);
316 
317 	if (!rxnet->live)
318 		return;
319 
320 	/* Remove to a temporary list all the peers that are currently lodged
321 	 * in expired buckets plus all new peers.
322 	 *
323 	 * Everything in the bucket at the cursor is processed this
324 	 * second; the bucket at cursor + 1 goes at now + 1s and so
325 	 * on...
326 	 */
327 	spin_lock_bh(&rxnet->peer_hash_lock);
328 	list_splice_init(&rxnet->peer_keepalive_new, &collector);
329 
330 	stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
331 	while (base <= now && (s8)(cursor - stop) < 0) {
332 		list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
333 				      &collector);
334 		base++;
335 		cursor++;
336 	}
337 
338 	base = now;
339 	spin_unlock_bh(&rxnet->peer_hash_lock);
340 
341 	rxnet->peer_keepalive_base = base;
342 	rxnet->peer_keepalive_cursor = cursor;
343 	rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
344 	ASSERT(list_empty(&collector));
345 
346 	/* Schedule the timer for the next occupied timeslot. */
347 	cursor = rxnet->peer_keepalive_cursor;
348 	stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
349 	for (; (s8)(cursor - stop) < 0; cursor++) {
350 		if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
351 			break;
352 		base++;
353 	}
354 
355 	now = ktime_get_seconds();
356 	delay = base - now;
357 	if (delay < 1)
358 		delay = 1;
359 	delay *= HZ;
360 	if (rxnet->live)
361 		timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
362 
363 	_leave("");
364 }
365 
366 /*
367  * Do path MTU probing.
368  */
369 void rxrpc_input_probe_for_pmtud(struct rxrpc_connection *conn, rxrpc_serial_t acked_serial,
370 				 bool sendmsg_fail)
371 {
372 	struct rxrpc_peer *peer = conn->peer;
373 	unsigned int max_data = peer->max_data;
374 	int good, trial, bad, jumbo;
375 
376 	good  = peer->pmtud_good;
377 	trial = peer->pmtud_trial;
378 	bad   = peer->pmtud_bad;
379 	if (good >= bad - 1) {
380 		conn->pmtud_probe = 0;
381 		peer->pmtud_lost = false;
382 		return;
383 	}
384 
385 	if (!peer->pmtud_probing)
386 		goto send_probe;
387 
388 	if (sendmsg_fail || after(acked_serial, conn->pmtud_probe)) {
389 		/* Retry a lost probe. */
390 		if (!peer->pmtud_lost) {
391 			trace_rxrpc_pmtud_lost(conn, acked_serial);
392 			conn->pmtud_probe = 0;
393 			peer->pmtud_lost = true;
394 			goto send_probe;
395 		}
396 
397 		/* The probed size didn't seem to get through. */
398 		bad = trial;
399 		peer->pmtud_bad = bad;
400 		if (bad <= max_data)
401 			max_data = bad - 1;
402 	} else {
403 		/* It did get through. */
404 		good = trial;
405 		peer->pmtud_good = good;
406 		if (good > max_data)
407 			max_data = good;
408 	}
409 
410 	max_data = umin(max_data, peer->ackr_max_data);
411 	if (max_data != peer->max_data) {
412 		preempt_disable();
413 		write_seqcount_begin(&peer->mtu_lock);
414 		peer->max_data = max_data;
415 		write_seqcount_end(&peer->mtu_lock);
416 		preempt_enable();
417 	}
418 
419 	jumbo = max_data + sizeof(struct rxrpc_jumbo_header);
420 	jumbo /= RXRPC_JUMBO_SUBPKTLEN;
421 	peer->pmtud_jumbo = jumbo;
422 
423 	trace_rxrpc_pmtud_rx(conn, acked_serial);
424 	conn->pmtud_probe = 0;
425 	peer->pmtud_lost = false;
426 
427 	if (good < RXRPC_JUMBO(2) && bad > RXRPC_JUMBO(2))
428 		trial = RXRPC_JUMBO(2);
429 	else if (good < RXRPC_JUMBO(4) && bad > RXRPC_JUMBO(4))
430 		trial = RXRPC_JUMBO(4);
431 	else if (good < RXRPC_JUMBO(3) && bad > RXRPC_JUMBO(3))
432 		trial = RXRPC_JUMBO(3);
433 	else if (good < RXRPC_JUMBO(6) && bad > RXRPC_JUMBO(6))
434 		trial = RXRPC_JUMBO(6);
435 	else if (good < RXRPC_JUMBO(5) && bad > RXRPC_JUMBO(5))
436 		trial = RXRPC_JUMBO(5);
437 	else if (good < RXRPC_JUMBO(8) && bad > RXRPC_JUMBO(8))
438 		trial = RXRPC_JUMBO(8);
439 	else if (good < RXRPC_JUMBO(7) && bad > RXRPC_JUMBO(7))
440 		trial = RXRPC_JUMBO(7);
441 	else
442 		trial = (good + bad) / 2;
443 	peer->pmtud_trial = trial;
444 
445 	if (good >= bad)
446 		return;
447 
448 send_probe:
449 	peer->pmtud_pending = true;
450 }
451