xref: /linux/net/rxrpc/conn_object.c (revision 3cf0a98fea776adb09087e521fe150c295a4b031)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC virtual connection handler, common bits.
3  *
4  * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/net.h>
13 #include <linux/skbuff.h>
14 #include "ar-internal.h"
15 
16 /*
17  * Time till a connection expires after last use (in seconds).
18  */
19 unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
20 unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
21 
22 static void rxrpc_clean_up_connection(struct work_struct *work);
23 static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
24 					 unsigned long reap_at);
25 
rxrpc_poke_conn(struct rxrpc_connection * conn,enum rxrpc_conn_trace why)26 void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
27 {
28 	struct rxrpc_local *local = conn->local;
29 	bool busy;
30 
31 	if (WARN_ON_ONCE(!local))
32 		return;
33 
34 	spin_lock_irq(&local->lock);
35 	busy = !list_empty(&conn->attend_link);
36 	if (!busy) {
37 		rxrpc_get_connection(conn, why);
38 		list_add_tail(&conn->attend_link, &local->conn_attend_q);
39 	}
40 	spin_unlock_irq(&local->lock);
41 	rxrpc_wake_up_io_thread(local);
42 }
43 
rxrpc_connection_timer(struct timer_list * timer)44 static void rxrpc_connection_timer(struct timer_list *timer)
45 {
46 	struct rxrpc_connection *conn =
47 		container_of(timer, struct rxrpc_connection, timer);
48 
49 	rxrpc_poke_conn(conn, rxrpc_conn_get_poke_timer);
50 }
51 
52 /*
53  * allocate a new connection
54  */
rxrpc_alloc_connection(struct rxrpc_net * rxnet,gfp_t gfp)55 struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
56 						gfp_t gfp)
57 {
58 	struct rxrpc_connection *conn;
59 
60 	_enter("");
61 
62 	conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
63 	if (conn) {
64 		INIT_LIST_HEAD(&conn->cache_link);
65 		timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
66 		INIT_WORK(&conn->processor, rxrpc_process_connection);
67 		INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
68 		INIT_LIST_HEAD(&conn->proc_link);
69 		INIT_LIST_HEAD(&conn->link);
70 		INIT_LIST_HEAD(&conn->attend_link);
71 		mutex_init(&conn->security_lock);
72 		mutex_init(&conn->tx_data_alloc_lock);
73 		skb_queue_head_init(&conn->rx_queue);
74 		conn->rxnet = rxnet;
75 		conn->security = &rxrpc_no_security;
76 		spin_lock_init(&conn->state_lock);
77 		conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
78 		conn->idle_timestamp = jiffies;
79 	}
80 
81 	_leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
82 	return conn;
83 }
84 
85 /*
86  * Look up a connection in the cache by protocol parameters.
87  *
88  * If successful, a pointer to the connection is returned, but no ref is taken.
89  * NULL is returned if there is no match.
90  *
91  * When searching for a service call, if we find a peer but no connection, we
92  * return that through *_peer in case we need to create a new service call.
93  *
94  * The caller must be holding the RCU read lock.
95  */
rxrpc_find_client_connection_rcu(struct rxrpc_local * local,struct sockaddr_rxrpc * srx,struct sk_buff * skb)96 struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *local,
97 							  struct sockaddr_rxrpc *srx,
98 							  struct sk_buff *skb)
99 {
100 	struct rxrpc_connection *conn;
101 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
102 	struct rxrpc_peer *peer;
103 
104 	_enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
105 
106 	/* Look up client connections by connection ID alone as their
107 	 * IDs are unique for this machine.
108 	 */
109 	conn = idr_find(&local->conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
110 	if (!conn || refcount_read(&conn->ref) == 0) {
111 		_debug("no conn");
112 		goto not_found;
113 	}
114 
115 	if (conn->proto.epoch != sp->hdr.epoch ||
116 	    conn->local != local)
117 		goto not_found;
118 
119 	peer = conn->peer;
120 	switch (srx->transport.family) {
121 	case AF_INET:
122 		if (peer->srx.transport.sin.sin_port !=
123 		    srx->transport.sin.sin_port)
124 			goto not_found;
125 		break;
126 #ifdef CONFIG_AF_RXRPC_IPV6
127 	case AF_INET6:
128 		if (peer->srx.transport.sin6.sin6_port !=
129 		    srx->transport.sin6.sin6_port)
130 			goto not_found;
131 		break;
132 #endif
133 	default:
134 		BUG();
135 	}
136 
137 	_leave(" = %p", conn);
138 	return conn;
139 
140 not_found:
141 	_leave(" = NULL");
142 	return NULL;
143 }
144 
145 /*
146  * Disconnect a call and clear any channel it occupies when that call
147  * terminates.  The caller must hold the channel_lock and must release the
148  * call's ref on the connection.
149  */
__rxrpc_disconnect_call(struct rxrpc_connection * conn,struct rxrpc_call * call)150 void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
151 			     struct rxrpc_call *call)
152 {
153 	struct rxrpc_channel *chan =
154 		&conn->channels[call->cid & RXRPC_CHANNELMASK];
155 
156 	_enter("%d,%x", conn->debug_id, call->cid);
157 
158 	if (chan->call == call) {
159 		/* Save the result of the call so that we can repeat it if necessary
160 		 * through the channel, whilst disposing of the actual call record.
161 		 */
162 		trace_rxrpc_disconnect_call(call);
163 		switch (call->completion) {
164 		case RXRPC_CALL_SUCCEEDED:
165 			chan->last_seq = call->rx_highest_seq;
166 			chan->last_type = RXRPC_PACKET_TYPE_ACK;
167 			break;
168 		case RXRPC_CALL_LOCALLY_ABORTED:
169 			chan->last_abort = call->abort_code;
170 			chan->last_type = RXRPC_PACKET_TYPE_ABORT;
171 			break;
172 		default:
173 			chan->last_abort = RX_CALL_DEAD;
174 			chan->last_type = RXRPC_PACKET_TYPE_ABORT;
175 			break;
176 		}
177 
178 		chan->last_call = chan->call_id;
179 		chan->call_id = chan->call_counter;
180 		chan->call = NULL;
181 	}
182 
183 	_leave("");
184 }
185 
186 /*
187  * Disconnect a call and clear any channel it occupies when that call
188  * terminates.
189  */
rxrpc_disconnect_call(struct rxrpc_call * call)190 void rxrpc_disconnect_call(struct rxrpc_call *call)
191 {
192 	struct rxrpc_connection *conn = call->conn;
193 
194 	set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
195 	rxrpc_see_call(call, rxrpc_call_see_disconnected);
196 
197 	call->peer->cong_ssthresh = call->cong_ssthresh;
198 
199 	if (!hlist_unhashed(&call->error_link)) {
200 		spin_lock_irq(&call->peer->lock);
201 		hlist_del_init(&call->error_link);
202 		spin_unlock_irq(&call->peer->lock);
203 	}
204 
205 	if (rxrpc_is_client_call(call)) {
206 		rxrpc_disconnect_client_call(call->bundle, call);
207 	} else {
208 		__rxrpc_disconnect_call(conn, call);
209 		conn->idle_timestamp = jiffies;
210 		if (atomic_dec_and_test(&conn->active))
211 			rxrpc_set_service_reap_timer(conn->rxnet,
212 						     jiffies + rxrpc_connection_expiry * HZ);
213 	}
214 
215 	rxrpc_put_call(call, rxrpc_call_put_io_thread);
216 }
217 
218 /*
219  * Queue a connection's work processor, getting a ref to pass to the work
220  * queue.
221  */
rxrpc_queue_conn(struct rxrpc_connection * conn,enum rxrpc_conn_trace why)222 void rxrpc_queue_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
223 {
224 	if (atomic_read(&conn->active) >= 0 &&
225 	    rxrpc_queue_work(&conn->processor))
226 		rxrpc_see_connection(conn, why);
227 }
228 
229 /*
230  * Note the re-emergence of a connection.
231  */
rxrpc_see_connection(struct rxrpc_connection * conn,enum rxrpc_conn_trace why)232 void rxrpc_see_connection(struct rxrpc_connection *conn,
233 			  enum rxrpc_conn_trace why)
234 {
235 	if (conn) {
236 		int r = refcount_read(&conn->ref);
237 
238 		trace_rxrpc_conn(conn->debug_id, r, why);
239 	}
240 }
241 
242 /*
243  * Get a ref on a connection.
244  */
rxrpc_get_connection(struct rxrpc_connection * conn,enum rxrpc_conn_trace why)245 struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn,
246 					      enum rxrpc_conn_trace why)
247 {
248 	int r;
249 
250 	__refcount_inc(&conn->ref, &r);
251 	trace_rxrpc_conn(conn->debug_id, r + 1, why);
252 	return conn;
253 }
254 
255 /*
256  * Try to get a ref on a connection.
257  */
258 struct rxrpc_connection *
rxrpc_get_connection_maybe(struct rxrpc_connection * conn,enum rxrpc_conn_trace why)259 rxrpc_get_connection_maybe(struct rxrpc_connection *conn,
260 			   enum rxrpc_conn_trace why)
261 {
262 	int r;
263 
264 	if (conn) {
265 		if (__refcount_inc_not_zero(&conn->ref, &r))
266 			trace_rxrpc_conn(conn->debug_id, r + 1, why);
267 		else
268 			conn = NULL;
269 	}
270 	return conn;
271 }
272 
273 /*
274  * Set the service connection reap timer.
275  */
rxrpc_set_service_reap_timer(struct rxrpc_net * rxnet,unsigned long reap_at)276 static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
277 					 unsigned long reap_at)
278 {
279 	if (rxnet->live)
280 		timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
281 }
282 
283 /*
284  * destroy a virtual connection
285  */
rxrpc_rcu_free_connection(struct rcu_head * rcu)286 static void rxrpc_rcu_free_connection(struct rcu_head *rcu)
287 {
288 	struct rxrpc_connection *conn =
289 		container_of(rcu, struct rxrpc_connection, rcu);
290 	struct rxrpc_net *rxnet = conn->rxnet;
291 
292 	_enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref));
293 
294 	trace_rxrpc_conn(conn->debug_id, refcount_read(&conn->ref),
295 			 rxrpc_conn_free);
296 	kfree(conn);
297 
298 	if (atomic_dec_and_test(&rxnet->nr_conns))
299 		wake_up_var(&rxnet->nr_conns);
300 }
301 
302 /*
303  * Clean up a dead connection.
304  */
rxrpc_clean_up_connection(struct work_struct * work)305 static void rxrpc_clean_up_connection(struct work_struct *work)
306 {
307 	struct rxrpc_connection *conn =
308 		container_of(work, struct rxrpc_connection, destructor);
309 	struct rxrpc_net *rxnet = conn->rxnet;
310 
311 	ASSERT(!conn->channels[0].call &&
312 	       !conn->channels[1].call &&
313 	       !conn->channels[2].call &&
314 	       !conn->channels[3].call);
315 	ASSERT(list_empty(&conn->cache_link));
316 
317 	del_timer_sync(&conn->timer);
318 	cancel_work_sync(&conn->processor); /* Processing may restart the timer */
319 	del_timer_sync(&conn->timer);
320 
321 	write_lock(&rxnet->conn_lock);
322 	list_del_init(&conn->proc_link);
323 	write_unlock(&rxnet->conn_lock);
324 
325 	if (conn->pmtud_probe) {
326 		trace_rxrpc_pmtud_lost(conn, 0);
327 		conn->peer->pmtud_probing = false;
328 		conn->peer->pmtud_pending = true;
329 	}
330 
331 	rxrpc_purge_queue(&conn->rx_queue);
332 
333 	rxrpc_kill_client_conn(conn);
334 
335 	conn->security->clear(conn);
336 	key_put(conn->key);
337 	rxrpc_put_bundle(conn->bundle, rxrpc_bundle_put_conn);
338 	rxrpc_put_peer(conn->peer, rxrpc_peer_put_conn);
339 	rxrpc_put_local(conn->local, rxrpc_local_put_kill_conn);
340 
341 	/* Drain the Rx queue.  Note that even though we've unpublished, an
342 	 * incoming packet could still be being added to our Rx queue, so we
343 	 * will need to drain it again in the RCU cleanup handler.
344 	 */
345 	rxrpc_purge_queue(&conn->rx_queue);
346 
347 	page_frag_cache_drain(&conn->tx_data_alloc);
348 	call_rcu(&conn->rcu, rxrpc_rcu_free_connection);
349 }
350 
351 /*
352  * Drop a ref on a connection.
353  */
rxrpc_put_connection(struct rxrpc_connection * conn,enum rxrpc_conn_trace why)354 void rxrpc_put_connection(struct rxrpc_connection *conn,
355 			  enum rxrpc_conn_trace why)
356 {
357 	unsigned int debug_id;
358 	bool dead;
359 	int r;
360 
361 	if (!conn)
362 		return;
363 
364 	debug_id = conn->debug_id;
365 	dead = __refcount_dec_and_test(&conn->ref, &r);
366 	trace_rxrpc_conn(debug_id, r - 1, why);
367 	if (dead) {
368 		del_timer(&conn->timer);
369 		cancel_work(&conn->processor);
370 
371 		if (in_softirq() || work_busy(&conn->processor) ||
372 		    timer_pending(&conn->timer))
373 			/* Can't use the rxrpc workqueue as we need to cancel/flush
374 			 * something that may be running/waiting there.
375 			 */
376 			schedule_work(&conn->destructor);
377 		else
378 			rxrpc_clean_up_connection(&conn->destructor);
379 	}
380 }
381 
382 /*
383  * reap dead service connections
384  */
rxrpc_service_connection_reaper(struct work_struct * work)385 void rxrpc_service_connection_reaper(struct work_struct *work)
386 {
387 	struct rxrpc_connection *conn, *_p;
388 	struct rxrpc_net *rxnet =
389 		container_of(work, struct rxrpc_net, service_conn_reaper);
390 	unsigned long expire_at, earliest, idle_timestamp, now;
391 	int active;
392 
393 	LIST_HEAD(graveyard);
394 
395 	_enter("");
396 
397 	now = jiffies;
398 	earliest = now + MAX_JIFFY_OFFSET;
399 
400 	write_lock(&rxnet->conn_lock);
401 	list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
402 		ASSERTCMP(atomic_read(&conn->active), >=, 0);
403 		if (likely(atomic_read(&conn->active) > 0))
404 			continue;
405 		if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
406 			continue;
407 
408 		if (rxnet->live && !conn->local->dead) {
409 			idle_timestamp = READ_ONCE(conn->idle_timestamp);
410 			expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
411 			if (conn->local->service_closed)
412 				expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
413 
414 			_debug("reap CONN %d { a=%d,t=%ld }",
415 			       conn->debug_id, atomic_read(&conn->active),
416 			       (long)expire_at - (long)now);
417 
418 			if (time_before(now, expire_at)) {
419 				if (time_before(expire_at, earliest))
420 					earliest = expire_at;
421 				continue;
422 			}
423 		}
424 
425 		/* The activity count sits at 0 whilst the conn is unused on
426 		 * the list; we reduce that to -1 to make the conn unavailable.
427 		 */
428 		active = 0;
429 		if (!atomic_try_cmpxchg(&conn->active, &active, -1))
430 			continue;
431 		rxrpc_see_connection(conn, rxrpc_conn_see_reap_service);
432 
433 		if (rxrpc_conn_is_client(conn))
434 			BUG();
435 		else
436 			rxrpc_unpublish_service_conn(conn);
437 
438 		list_move_tail(&conn->link, &graveyard);
439 	}
440 	write_unlock(&rxnet->conn_lock);
441 
442 	if (earliest != now + MAX_JIFFY_OFFSET) {
443 		_debug("reschedule reaper %ld", (long)earliest - (long)now);
444 		ASSERT(time_after(earliest, now));
445 		rxrpc_set_service_reap_timer(rxnet, earliest);
446 	}
447 
448 	while (!list_empty(&graveyard)) {
449 		conn = list_entry(graveyard.next, struct rxrpc_connection,
450 				  link);
451 		list_del_init(&conn->link);
452 
453 		ASSERTCMP(atomic_read(&conn->active), ==, -1);
454 		rxrpc_put_connection(conn, rxrpc_conn_put_service_reaped);
455 	}
456 
457 	_leave("");
458 }
459 
460 /*
461  * preemptively destroy all the service connection records rather than
462  * waiting for them to time out
463  */
rxrpc_destroy_all_connections(struct rxrpc_net * rxnet)464 void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
465 {
466 	struct rxrpc_connection *conn, *_p;
467 	bool leak = false;
468 
469 	_enter("");
470 
471 	atomic_dec(&rxnet->nr_conns);
472 
473 	del_timer_sync(&rxnet->service_conn_reap_timer);
474 	rxrpc_queue_work(&rxnet->service_conn_reaper);
475 	flush_workqueue(rxrpc_workqueue);
476 
477 	write_lock(&rxnet->conn_lock);
478 	list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
479 		pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
480 		       conn, refcount_read(&conn->ref));
481 		leak = true;
482 	}
483 	write_unlock(&rxnet->conn_lock);
484 	BUG_ON(leak);
485 
486 	ASSERT(list_empty(&rxnet->conn_proc_list));
487 
488 	/* We need to wait for the connections to be destroyed by RCU as they
489 	 * pin things that we still need to get rid of.
490 	 */
491 	wait_var_event(&rxnet->nr_conns, !atomic_read(&rxnet->nr_conns));
492 	_leave("");
493 }
494