xref: /linux/net/rxrpc/conn_client.c (revision 74ce1896c6c65b2f8cccbf59162d542988835835)
1 /* Client connection-specific management code.
2  *
3  * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  *
11  *
12  * Client connections need to be cached for a little while after they've made a
13  * call so as to handle retransmitted DATA packets in case the server didn't
14  * receive the final ACK or terminating ABORT we sent it.
15  *
16  * Client connections can be in one of a number of cache states:
17  *
18  *  (1) INACTIVE - The connection is not held in any list and may not have been
19  *      exposed to the world.  If it has been previously exposed, it was
20  *      discarded from the idle list after expiring.
21  *
22  *  (2) WAITING - The connection is waiting for the number of client conns to
23  *      drop below the maximum capacity.  Calls may be in progress upon it from
24  *      when it was active and got culled.
25  *
26  *	The connection is on the rxrpc_waiting_client_conns list which is kept
27  *	in to-be-granted order.  Culled conns with waiters go to the back of
28  *	the queue just like new conns.
29  *
30  *  (3) ACTIVE - The connection has at least one call in progress upon it, it
31  *      may freely grant available channels to new calls and calls may be
32  *      waiting on it for channels to become available.
33  *
34  *	The connection is on the rxnet->active_client_conns list which is kept
35  *	in activation order for culling purposes.
36  *
37  *	rxrpc_nr_active_client_conns is held incremented also.
38  *
39  *  (4) UPGRADE - As for ACTIVE, but only one call may be in progress and is
40  *      being used to probe for service upgrade.
41  *
42  *  (5) CULLED - The connection got summarily culled to try and free up
43  *      capacity.  Calls currently in progress on the connection are allowed to
44  *      continue, but new calls will have to wait.  There can be no waiters in
45  *      this state - the conn would have to go to the WAITING state instead.
46  *
47  *  (6) IDLE - The connection has no calls in progress upon it and must have
48  *      been exposed to the world (ie. the EXPOSED flag must be set).  When it
49  *      expires, the EXPOSED flag is cleared and the connection transitions to
50  *      the INACTIVE state.
51  *
52  *	The connection is on the rxnet->idle_client_conns list which is kept in
53  *	order of how soon they'll expire.
54  *
55  * There are flags of relevance to the cache:
56  *
57  *  (1) EXPOSED - The connection ID got exposed to the world.  If this flag is
58  *      set, an extra ref is added to the connection preventing it from being
59  *      reaped when it has no calls outstanding.  This flag is cleared and the
60  *      ref dropped when a conn is discarded from the idle list.
61  *
62  *      This allows us to move terminal call state retransmission to the
63  *      connection and to discard the call immediately we think it is done
64  *      with.  It also give us a chance to reuse the connection.
65  *
66  *  (2) DONT_REUSE - The connection should be discarded as soon as possible and
67  *      should not be reused.  This is set when an exclusive connection is used
68  *      or a call ID counter overflows.
69  *
70  * The caching state may only be changed if the cache lock is held.
71  *
72  * There are two idle client connection expiry durations.  If the total number
73  * of connections is below the reap threshold, we use the normal duration; if
74  * it's above, we use the fast duration.
75  */
76 
77 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
78 
79 #include <linux/slab.h>
80 #include <linux/idr.h>
81 #include <linux/timer.h>
82 #include <linux/sched/signal.h>
83 
84 #include "ar-internal.h"
85 
86 __read_mostly unsigned int rxrpc_max_client_connections = 1000;
87 __read_mostly unsigned int rxrpc_reap_client_connections = 900;
88 __read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
89 __read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
90 
91 /*
92  * We use machine-unique IDs for our client connections.
93  */
94 DEFINE_IDR(rxrpc_client_conn_ids);
95 static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
96 
97 static void rxrpc_cull_active_client_conns(struct rxrpc_net *);
98 
99 /*
100  * Get a connection ID and epoch for a client connection from the global pool.
101  * The connection struct pointer is then recorded in the idr radix tree.  The
102  * epoch doesn't change until the client is rebooted (or, at least, unless the
103  * module is unloaded).
104  */
105 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
106 					  gfp_t gfp)
107 {
108 	struct rxrpc_net *rxnet = conn->params.local->rxnet;
109 	int id;
110 
111 	_enter("");
112 
113 	idr_preload(gfp);
114 	spin_lock(&rxrpc_conn_id_lock);
115 
116 	id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
117 			      1, 0x40000000, GFP_NOWAIT);
118 	if (id < 0)
119 		goto error;
120 
121 	spin_unlock(&rxrpc_conn_id_lock);
122 	idr_preload_end();
123 
124 	conn->proto.epoch = rxnet->epoch;
125 	conn->proto.cid = id << RXRPC_CIDSHIFT;
126 	set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
127 	_leave(" [CID %x]", conn->proto.cid);
128 	return 0;
129 
130 error:
131 	spin_unlock(&rxrpc_conn_id_lock);
132 	idr_preload_end();
133 	_leave(" = %d", id);
134 	return id;
135 }
136 
137 /*
138  * Release a connection ID for a client connection from the global pool.
139  */
140 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
141 {
142 	if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
143 		spin_lock(&rxrpc_conn_id_lock);
144 		idr_remove(&rxrpc_client_conn_ids,
145 			   conn->proto.cid >> RXRPC_CIDSHIFT);
146 		spin_unlock(&rxrpc_conn_id_lock);
147 	}
148 }
149 
150 /*
151  * Destroy the client connection ID tree.
152  */
153 void rxrpc_destroy_client_conn_ids(void)
154 {
155 	struct rxrpc_connection *conn;
156 	int id;
157 
158 	if (!idr_is_empty(&rxrpc_client_conn_ids)) {
159 		idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
160 			pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
161 			       conn, atomic_read(&conn->usage));
162 		}
163 		BUG();
164 	}
165 
166 	idr_destroy(&rxrpc_client_conn_ids);
167 }
168 
169 /*
170  * Allocate a client connection.
171  */
172 static struct rxrpc_connection *
173 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
174 {
175 	struct rxrpc_connection *conn;
176 	struct rxrpc_net *rxnet = cp->local->rxnet;
177 	int ret;
178 
179 	_enter("");
180 
181 	conn = rxrpc_alloc_connection(gfp);
182 	if (!conn) {
183 		_leave(" = -ENOMEM");
184 		return ERR_PTR(-ENOMEM);
185 	}
186 
187 	atomic_set(&conn->usage, 1);
188 	if (cp->exclusive)
189 		__set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
190 	if (cp->upgrade)
191 		__set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
192 
193 	conn->params		= *cp;
194 	conn->out_clientflag	= RXRPC_CLIENT_INITIATED;
195 	conn->state		= RXRPC_CONN_CLIENT;
196 	conn->service_id	= cp->service_id;
197 
198 	ret = rxrpc_get_client_connection_id(conn, gfp);
199 	if (ret < 0)
200 		goto error_0;
201 
202 	ret = rxrpc_init_client_conn_security(conn);
203 	if (ret < 0)
204 		goto error_1;
205 
206 	ret = conn->security->prime_packet_security(conn);
207 	if (ret < 0)
208 		goto error_2;
209 
210 	write_lock(&rxnet->conn_lock);
211 	list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
212 	write_unlock(&rxnet->conn_lock);
213 
214 	/* We steal the caller's peer ref. */
215 	cp->peer = NULL;
216 	rxrpc_get_local(conn->params.local);
217 	key_get(conn->params.key);
218 
219 	trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage),
220 			 __builtin_return_address(0));
221 	trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
222 	_leave(" = %p", conn);
223 	return conn;
224 
225 error_2:
226 	conn->security->clear(conn);
227 error_1:
228 	rxrpc_put_client_connection_id(conn);
229 error_0:
230 	kfree(conn);
231 	_leave(" = %d", ret);
232 	return ERR_PTR(ret);
233 }
234 
235 /*
236  * Determine if a connection may be reused.
237  */
238 static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
239 {
240 	struct rxrpc_net *rxnet = conn->params.local->rxnet;
241 	int id_cursor, id, distance, limit;
242 
243 	if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
244 		goto dont_reuse;
245 
246 	if (conn->proto.epoch != rxnet->epoch)
247 		goto mark_dont_reuse;
248 
249 	/* The IDR tree gets very expensive on memory if the connection IDs are
250 	 * widely scattered throughout the number space, so we shall want to
251 	 * kill off connections that, say, have an ID more than about four
252 	 * times the maximum number of client conns away from the current
253 	 * allocation point to try and keep the IDs concentrated.
254 	 */
255 	id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
256 	id = conn->proto.cid >> RXRPC_CIDSHIFT;
257 	distance = id - id_cursor;
258 	if (distance < 0)
259 		distance = -distance;
260 	limit = max(rxrpc_max_client_connections * 4, 1024U);
261 	if (distance > limit)
262 		goto mark_dont_reuse;
263 
264 	return true;
265 
266 mark_dont_reuse:
267 	set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
268 dont_reuse:
269 	return false;
270 }
271 
272 /*
273  * Create or find a client connection to use for a call.
274  *
275  * If we return with a connection, the call will be on its waiting list.  It's
276  * left to the caller to assign a channel and wake up the call.
277  */
278 static int rxrpc_get_client_conn(struct rxrpc_call *call,
279 				 struct rxrpc_conn_parameters *cp,
280 				 struct sockaddr_rxrpc *srx,
281 				 gfp_t gfp)
282 {
283 	struct rxrpc_connection *conn, *candidate = NULL;
284 	struct rxrpc_local *local = cp->local;
285 	struct rb_node *p, **pp, *parent;
286 	long diff;
287 	int ret = -ENOMEM;
288 
289 	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
290 
291 	cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
292 	if (!cp->peer)
293 		goto error;
294 
295 	call->cong_cwnd = cp->peer->cong_cwnd;
296 	if (call->cong_cwnd >= call->cong_ssthresh)
297 		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
298 	else
299 		call->cong_mode = RXRPC_CALL_SLOW_START;
300 
301 	/* If the connection is not meant to be exclusive, search the available
302 	 * connections to see if the connection we want to use already exists.
303 	 */
304 	if (!cp->exclusive) {
305 		_debug("search 1");
306 		spin_lock(&local->client_conns_lock);
307 		p = local->client_conns.rb_node;
308 		while (p) {
309 			conn = rb_entry(p, struct rxrpc_connection, client_node);
310 
311 #define cmp(X) ((long)conn->params.X - (long)cp->X)
312 			diff = (cmp(peer) ?:
313 				cmp(key) ?:
314 				cmp(security_level) ?:
315 				cmp(upgrade));
316 #undef cmp
317 			if (diff < 0) {
318 				p = p->rb_left;
319 			} else if (diff > 0) {
320 				p = p->rb_right;
321 			} else {
322 				if (rxrpc_may_reuse_conn(conn) &&
323 				    rxrpc_get_connection_maybe(conn))
324 					goto found_extant_conn;
325 				/* The connection needs replacing.  It's better
326 				 * to effect that when we have something to
327 				 * replace it with so that we don't have to
328 				 * rebalance the tree twice.
329 				 */
330 				break;
331 			}
332 		}
333 		spin_unlock(&local->client_conns_lock);
334 	}
335 
336 	/* There wasn't a connection yet or we need an exclusive connection.
337 	 * We need to create a candidate and then potentially redo the search
338 	 * in case we're racing with another thread also trying to connect on a
339 	 * shareable connection.
340 	 */
341 	_debug("new conn");
342 	candidate = rxrpc_alloc_client_connection(cp, gfp);
343 	if (IS_ERR(candidate)) {
344 		ret = PTR_ERR(candidate);
345 		goto error_peer;
346 	}
347 
348 	/* Add the call to the new connection's waiting list in case we're
349 	 * going to have to wait for the connection to come live.  It's our
350 	 * connection, so we want first dibs on the channel slots.  We would
351 	 * normally have to take channel_lock but we do this before anyone else
352 	 * can see the connection.
353 	 */
354 	list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
355 
356 	if (cp->exclusive) {
357 		call->conn = candidate;
358 		call->security_ix = candidate->security_ix;
359 		call->service_id = candidate->service_id;
360 		_leave(" = 0 [exclusive %d]", candidate->debug_id);
361 		return 0;
362 	}
363 
364 	/* Publish the new connection for userspace to find.  We need to redo
365 	 * the search before doing this lest we race with someone else adding a
366 	 * conflicting instance.
367 	 */
368 	_debug("search 2");
369 	spin_lock(&local->client_conns_lock);
370 
371 	pp = &local->client_conns.rb_node;
372 	parent = NULL;
373 	while (*pp) {
374 		parent = *pp;
375 		conn = rb_entry(parent, struct rxrpc_connection, client_node);
376 
377 #define cmp(X) ((long)conn->params.X - (long)candidate->params.X)
378 		diff = (cmp(peer) ?:
379 			cmp(key) ?:
380 			cmp(security_level) ?:
381 			cmp(upgrade));
382 #undef cmp
383 		if (diff < 0) {
384 			pp = &(*pp)->rb_left;
385 		} else if (diff > 0) {
386 			pp = &(*pp)->rb_right;
387 		} else {
388 			if (rxrpc_may_reuse_conn(conn) &&
389 			    rxrpc_get_connection_maybe(conn))
390 				goto found_extant_conn;
391 			/* The old connection is from an outdated epoch. */
392 			_debug("replace conn");
393 			clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
394 			rb_replace_node(&conn->client_node,
395 					&candidate->client_node,
396 					&local->client_conns);
397 			trace_rxrpc_client(conn, -1, rxrpc_client_replace);
398 			goto candidate_published;
399 		}
400 	}
401 
402 	_debug("new conn");
403 	rb_link_node(&candidate->client_node, parent, pp);
404 	rb_insert_color(&candidate->client_node, &local->client_conns);
405 
406 candidate_published:
407 	set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
408 	call->conn = candidate;
409 	call->security_ix = candidate->security_ix;
410 	call->service_id = candidate->service_id;
411 	spin_unlock(&local->client_conns_lock);
412 	_leave(" = 0 [new %d]", candidate->debug_id);
413 	return 0;
414 
415 	/* We come here if we found a suitable connection already in existence.
416 	 * Discard any candidate we may have allocated, and try to get a
417 	 * channel on this one.
418 	 */
419 found_extant_conn:
420 	_debug("found conn");
421 	spin_unlock(&local->client_conns_lock);
422 
423 	if (candidate) {
424 		trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
425 		rxrpc_put_connection(candidate);
426 		candidate = NULL;
427 	}
428 
429 	spin_lock(&conn->channel_lock);
430 	call->conn = conn;
431 	call->security_ix = conn->security_ix;
432 	call->service_id = conn->service_id;
433 	list_add(&call->chan_wait_link, &conn->waiting_calls);
434 	spin_unlock(&conn->channel_lock);
435 	_leave(" = 0 [extant %d]", conn->debug_id);
436 	return 0;
437 
438 error_peer:
439 	rxrpc_put_peer(cp->peer);
440 	cp->peer = NULL;
441 error:
442 	_leave(" = %d", ret);
443 	return ret;
444 }
445 
446 /*
447  * Activate a connection.
448  */
449 static void rxrpc_activate_conn(struct rxrpc_net *rxnet,
450 				struct rxrpc_connection *conn)
451 {
452 	if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
453 		trace_rxrpc_client(conn, -1, rxrpc_client_to_upgrade);
454 		conn->cache_state = RXRPC_CONN_CLIENT_UPGRADE;
455 	} else {
456 		trace_rxrpc_client(conn, -1, rxrpc_client_to_active);
457 		conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
458 	}
459 	rxnet->nr_active_client_conns++;
460 	list_move_tail(&conn->cache_link, &rxnet->active_client_conns);
461 }
462 
463 /*
464  * Attempt to animate a connection for a new call.
465  *
466  * If it's not exclusive, the connection is in the endpoint tree, and we're in
467  * the conn's list of those waiting to grab a channel.  There is, however, a
468  * limit on the number of live connections allowed at any one time, so we may
469  * have to wait for capacity to become available.
470  *
471  * Note that a connection on the waiting queue might *also* have active
472  * channels if it has been culled to make space and then re-requested by a new
473  * call.
474  */
475 static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet,
476 				      struct rxrpc_connection *conn)
477 {
478 	unsigned int nr_conns;
479 
480 	_enter("%d,%d", conn->debug_id, conn->cache_state);
481 
482 	if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE ||
483 	    conn->cache_state == RXRPC_CONN_CLIENT_UPGRADE)
484 		goto out;
485 
486 	spin_lock(&rxnet->client_conn_cache_lock);
487 
488 	nr_conns = rxnet->nr_client_conns;
489 	if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
490 		trace_rxrpc_client(conn, -1, rxrpc_client_count);
491 		rxnet->nr_client_conns = nr_conns + 1;
492 	}
493 
494 	switch (conn->cache_state) {
495 	case RXRPC_CONN_CLIENT_ACTIVE:
496 	case RXRPC_CONN_CLIENT_UPGRADE:
497 	case RXRPC_CONN_CLIENT_WAITING:
498 		break;
499 
500 	case RXRPC_CONN_CLIENT_INACTIVE:
501 	case RXRPC_CONN_CLIENT_CULLED:
502 	case RXRPC_CONN_CLIENT_IDLE:
503 		if (nr_conns >= rxrpc_max_client_connections)
504 			goto wait_for_capacity;
505 		goto activate_conn;
506 
507 	default:
508 		BUG();
509 	}
510 
511 out_unlock:
512 	spin_unlock(&rxnet->client_conn_cache_lock);
513 out:
514 	_leave(" [%d]", conn->cache_state);
515 	return;
516 
517 activate_conn:
518 	_debug("activate");
519 	rxrpc_activate_conn(rxnet, conn);
520 	goto out_unlock;
521 
522 wait_for_capacity:
523 	_debug("wait");
524 	trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
525 	conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
526 	list_move_tail(&conn->cache_link, &rxnet->waiting_client_conns);
527 	goto out_unlock;
528 }
529 
530 /*
531  * Deactivate a channel.
532  */
533 static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn,
534 					 unsigned int channel)
535 {
536 	struct rxrpc_channel *chan = &conn->channels[channel];
537 
538 	rcu_assign_pointer(chan->call, NULL);
539 	conn->active_chans &= ~(1 << channel);
540 }
541 
542 /*
543  * Assign a channel to the call at the front of the queue and wake the call up.
544  * We don't increment the callNumber counter until this number has been exposed
545  * to the world.
546  */
547 static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
548 				       unsigned int channel)
549 {
550 	struct rxrpc_channel *chan = &conn->channels[channel];
551 	struct rxrpc_call *call = list_entry(conn->waiting_calls.next,
552 					     struct rxrpc_call, chan_wait_link);
553 	u32 call_id = chan->call_counter + 1;
554 
555 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
556 
557 	write_lock_bh(&call->state_lock);
558 	if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
559 		call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
560 	else
561 		call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
562 	write_unlock_bh(&call->state_lock);
563 
564 	rxrpc_see_call(call);
565 	list_del_init(&call->chan_wait_link);
566 	conn->active_chans |= 1 << channel;
567 	call->peer	= rxrpc_get_peer(conn->params.peer);
568 	call->cid	= conn->proto.cid | channel;
569 	call->call_id	= call_id;
570 
571 	trace_rxrpc_connect_call(call);
572 	_net("CONNECT call %08x:%08x as call %d on conn %d",
573 	     call->cid, call->call_id, call->debug_id, conn->debug_id);
574 
575 	/* Paired with the read barrier in rxrpc_wait_for_channel().  This
576 	 * orders cid and epoch in the connection wrt to call_id without the
577 	 * need to take the channel_lock.
578 	 *
579 	 * We provisionally assign a callNumber at this point, but we don't
580 	 * confirm it until the call is about to be exposed.
581 	 *
582 	 * TODO: Pair with a barrier in the data_ready handler when that looks
583 	 * at the call ID through a connection channel.
584 	 */
585 	smp_wmb();
586 	chan->call_id	= call_id;
587 	rcu_assign_pointer(chan->call, call);
588 	wake_up(&call->waitq);
589 }
590 
591 /*
592  * Assign channels and callNumbers to waiting calls with channel_lock
593  * held by caller.
594  */
595 static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn)
596 {
597 	u8 avail, mask;
598 
599 	switch (conn->cache_state) {
600 	case RXRPC_CONN_CLIENT_ACTIVE:
601 		mask = RXRPC_ACTIVE_CHANS_MASK;
602 		break;
603 	case RXRPC_CONN_CLIENT_UPGRADE:
604 		mask = 0x01;
605 		break;
606 	default:
607 		return;
608 	}
609 
610 	while (!list_empty(&conn->waiting_calls) &&
611 	       (avail = ~conn->active_chans,
612 		avail &= mask,
613 		avail != 0))
614 		rxrpc_activate_one_channel(conn, __ffs(avail));
615 }
616 
617 /*
618  * Assign channels and callNumbers to waiting calls.
619  */
620 static void rxrpc_activate_channels(struct rxrpc_connection *conn)
621 {
622 	_enter("%d", conn->debug_id);
623 
624 	trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans);
625 
626 	if (conn->active_chans == RXRPC_ACTIVE_CHANS_MASK)
627 		return;
628 
629 	spin_lock(&conn->channel_lock);
630 	rxrpc_activate_channels_locked(conn);
631 	spin_unlock(&conn->channel_lock);
632 	_leave("");
633 }
634 
635 /*
636  * Wait for a callNumber and a channel to be granted to a call.
637  */
638 static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
639 {
640 	int ret = 0;
641 
642 	_enter("%d", call->debug_id);
643 
644 	if (!call->call_id) {
645 		DECLARE_WAITQUEUE(myself, current);
646 
647 		if (!gfpflags_allow_blocking(gfp)) {
648 			ret = -EAGAIN;
649 			goto out;
650 		}
651 
652 		add_wait_queue_exclusive(&call->waitq, &myself);
653 		for (;;) {
654 			set_current_state(TASK_INTERRUPTIBLE);
655 			if (call->call_id)
656 				break;
657 			if (signal_pending(current)) {
658 				ret = -ERESTARTSYS;
659 				break;
660 			}
661 			schedule();
662 		}
663 		remove_wait_queue(&call->waitq, &myself);
664 		__set_current_state(TASK_RUNNING);
665 	}
666 
667 	/* Paired with the write barrier in rxrpc_activate_one_channel(). */
668 	smp_rmb();
669 
670 out:
671 	_leave(" = %d", ret);
672 	return ret;
673 }
674 
675 /*
676  * find a connection for a call
677  * - called in process context with IRQs enabled
678  */
679 int rxrpc_connect_call(struct rxrpc_call *call,
680 		       struct rxrpc_conn_parameters *cp,
681 		       struct sockaddr_rxrpc *srx,
682 		       gfp_t gfp)
683 {
684 	struct rxrpc_net *rxnet = cp->local->rxnet;
685 	int ret;
686 
687 	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
688 
689 	rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper.work);
690 	rxrpc_cull_active_client_conns(rxnet);
691 
692 	ret = rxrpc_get_client_conn(call, cp, srx, gfp);
693 	if (ret < 0)
694 		goto out;
695 
696 	rxrpc_animate_client_conn(rxnet, call->conn);
697 	rxrpc_activate_channels(call->conn);
698 
699 	ret = rxrpc_wait_for_channel(call, gfp);
700 	if (ret < 0) {
701 		rxrpc_disconnect_client_call(call);
702 		goto out;
703 	}
704 
705 	spin_lock_bh(&call->conn->params.peer->lock);
706 	hlist_add_head(&call->error_link,
707 		       &call->conn->params.peer->error_targets);
708 	spin_unlock_bh(&call->conn->params.peer->lock);
709 
710 out:
711 	_leave(" = %d", ret);
712 	return ret;
713 }
714 
715 /*
716  * Note that a connection is about to be exposed to the world.  Once it is
717  * exposed, we maintain an extra ref on it that stops it from being summarily
718  * discarded before it's (a) had a chance to deal with retransmission and (b)
719  * had a chance at re-use (the per-connection security negotiation is
720  * expensive).
721  */
722 static void rxrpc_expose_client_conn(struct rxrpc_connection *conn,
723 				     unsigned int channel)
724 {
725 	if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
726 		trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
727 		rxrpc_get_connection(conn);
728 	}
729 }
730 
731 /*
732  * Note that a call, and thus a connection, is about to be exposed to the
733  * world.
734  */
735 void rxrpc_expose_client_call(struct rxrpc_call *call)
736 {
737 	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
738 	struct rxrpc_connection *conn = call->conn;
739 	struct rxrpc_channel *chan = &conn->channels[channel];
740 
741 	if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
742 		/* Mark the call ID as being used.  If the callNumber counter
743 		 * exceeds ~2 billion, we kill the connection after its
744 		 * outstanding calls have finished so that the counter doesn't
745 		 * wrap.
746 		 */
747 		chan->call_counter++;
748 		if (chan->call_counter >= INT_MAX)
749 			set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
750 		rxrpc_expose_client_conn(conn, channel);
751 	}
752 }
753 
754 /*
755  * Disconnect a client call.
756  */
757 void rxrpc_disconnect_client_call(struct rxrpc_call *call)
758 {
759 	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
760 	struct rxrpc_connection *conn = call->conn;
761 	struct rxrpc_channel *chan = &conn->channels[channel];
762 	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&call->socket->sk));
763 
764 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
765 	call->conn = NULL;
766 
767 	spin_lock(&conn->channel_lock);
768 
769 	/* Calls that have never actually been assigned a channel can simply be
770 	 * discarded.  If the conn didn't get used either, it will follow
771 	 * immediately unless someone else grabs it in the meantime.
772 	 */
773 	if (!list_empty(&call->chan_wait_link)) {
774 		_debug("call is waiting");
775 		ASSERTCMP(call->call_id, ==, 0);
776 		ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
777 		list_del_init(&call->chan_wait_link);
778 
779 		trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted);
780 
781 		/* We must deactivate or idle the connection if it's now
782 		 * waiting for nothing.
783 		 */
784 		spin_lock(&rxnet->client_conn_cache_lock);
785 		if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING &&
786 		    list_empty(&conn->waiting_calls) &&
787 		    !conn->active_chans)
788 			goto idle_connection;
789 		goto out;
790 	}
791 
792 	ASSERTCMP(rcu_access_pointer(chan->call), ==, call);
793 
794 	/* If a client call was exposed to the world, we save the result for
795 	 * retransmission.
796 	 *
797 	 * We use a barrier here so that the call number and abort code can be
798 	 * read without needing to take a lock.
799 	 *
800 	 * TODO: Make the incoming packet handler check this and handle
801 	 * terminal retransmission without requiring access to the call.
802 	 */
803 	if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
804 		_debug("exposed %u,%u", call->call_id, call->abort_code);
805 		__rxrpc_disconnect_call(conn, call);
806 	}
807 
808 	/* See if we can pass the channel directly to another call. */
809 	if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE &&
810 	    !list_empty(&conn->waiting_calls)) {
811 		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
812 		rxrpc_activate_one_channel(conn, channel);
813 		goto out_2;
814 	}
815 
816 	/* Things are more complex and we need the cache lock.  We might be
817 	 * able to simply idle the conn or it might now be lurking on the wait
818 	 * list.  It might even get moved back to the active list whilst we're
819 	 * waiting for the lock.
820 	 */
821 	spin_lock(&rxnet->client_conn_cache_lock);
822 
823 	switch (conn->cache_state) {
824 	case RXRPC_CONN_CLIENT_UPGRADE:
825 		/* Deal with termination of a service upgrade probe. */
826 		if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
827 			clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
828 			trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
829 			conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
830 			rxrpc_activate_channels_locked(conn);
831 		}
832 		/* fall through */
833 	case RXRPC_CONN_CLIENT_ACTIVE:
834 		if (list_empty(&conn->waiting_calls)) {
835 			rxrpc_deactivate_one_channel(conn, channel);
836 			if (!conn->active_chans) {
837 				rxnet->nr_active_client_conns--;
838 				goto idle_connection;
839 			}
840 			goto out;
841 		}
842 
843 		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
844 		rxrpc_activate_one_channel(conn, channel);
845 		goto out;
846 
847 	case RXRPC_CONN_CLIENT_CULLED:
848 		rxrpc_deactivate_one_channel(conn, channel);
849 		ASSERT(list_empty(&conn->waiting_calls));
850 		if (!conn->active_chans)
851 			goto idle_connection;
852 		goto out;
853 
854 	case RXRPC_CONN_CLIENT_WAITING:
855 		rxrpc_deactivate_one_channel(conn, channel);
856 		goto out;
857 
858 	default:
859 		BUG();
860 	}
861 
862 out:
863 	spin_unlock(&rxnet->client_conn_cache_lock);
864 out_2:
865 	spin_unlock(&conn->channel_lock);
866 	rxrpc_put_connection(conn);
867 	_leave("");
868 	return;
869 
870 idle_connection:
871 	/* As no channels remain active, the connection gets deactivated
872 	 * immediately or moved to the idle list for a short while.
873 	 */
874 	if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
875 		trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
876 		conn->idle_timestamp = jiffies;
877 		conn->cache_state = RXRPC_CONN_CLIENT_IDLE;
878 		list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
879 		if (rxnet->idle_client_conns.next == &conn->cache_link &&
880 		    !rxnet->kill_all_client_conns)
881 			queue_delayed_work(rxrpc_workqueue,
882 					   &rxnet->client_conn_reaper,
883 					   rxrpc_conn_idle_client_expiry);
884 	} else {
885 		trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
886 		conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
887 		list_del_init(&conn->cache_link);
888 	}
889 	goto out;
890 }
891 
892 /*
893  * Clean up a dead client connection.
894  */
895 static struct rxrpc_connection *
896 rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
897 {
898 	struct rxrpc_connection *next = NULL;
899 	struct rxrpc_local *local = conn->params.local;
900 	struct rxrpc_net *rxnet = local->rxnet;
901 	unsigned int nr_conns;
902 
903 	trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
904 
905 	if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) {
906 		spin_lock(&local->client_conns_lock);
907 		if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS,
908 				       &conn->flags))
909 			rb_erase(&conn->client_node, &local->client_conns);
910 		spin_unlock(&local->client_conns_lock);
911 	}
912 
913 	rxrpc_put_client_connection_id(conn);
914 
915 	ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE);
916 
917 	if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
918 		trace_rxrpc_client(conn, -1, rxrpc_client_uncount);
919 		spin_lock(&rxnet->client_conn_cache_lock);
920 		nr_conns = --rxnet->nr_client_conns;
921 
922 		if (nr_conns < rxrpc_max_client_connections &&
923 		    !list_empty(&rxnet->waiting_client_conns)) {
924 			next = list_entry(rxnet->waiting_client_conns.next,
925 					  struct rxrpc_connection, cache_link);
926 			rxrpc_get_connection(next);
927 			rxrpc_activate_conn(rxnet, next);
928 		}
929 
930 		spin_unlock(&rxnet->client_conn_cache_lock);
931 	}
932 
933 	rxrpc_kill_connection(conn);
934 	if (next)
935 		rxrpc_activate_channels(next);
936 
937 	/* We need to get rid of the temporary ref we took upon next, but we
938 	 * can't call rxrpc_put_connection() recursively.
939 	 */
940 	return next;
941 }
942 
943 /*
944  * Clean up a dead client connections.
945  */
946 void rxrpc_put_client_conn(struct rxrpc_connection *conn)
947 {
948 	const void *here = __builtin_return_address(0);
949 	int n;
950 
951 	do {
952 		n = atomic_dec_return(&conn->usage);
953 		trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here);
954 		if (n > 0)
955 			return;
956 		ASSERTCMP(n, >=, 0);
957 
958 		conn = rxrpc_put_one_client_conn(conn);
959 	} while (conn);
960 }
961 
962 /*
963  * Kill the longest-active client connections to make room for new ones.
964  */
965 static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet)
966 {
967 	struct rxrpc_connection *conn;
968 	unsigned int nr_conns = rxnet->nr_client_conns;
969 	unsigned int nr_active, limit;
970 
971 	_enter("");
972 
973 	ASSERTCMP(nr_conns, >=, 0);
974 	if (nr_conns < rxrpc_max_client_connections) {
975 		_leave(" [ok]");
976 		return;
977 	}
978 	limit = rxrpc_reap_client_connections;
979 
980 	spin_lock(&rxnet->client_conn_cache_lock);
981 	nr_active = rxnet->nr_active_client_conns;
982 
983 	while (nr_active > limit) {
984 		ASSERT(!list_empty(&rxnet->active_client_conns));
985 		conn = list_entry(rxnet->active_client_conns.next,
986 				  struct rxrpc_connection, cache_link);
987 		ASSERTIFCMP(conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE,
988 			    conn->cache_state, ==, RXRPC_CONN_CLIENT_UPGRADE);
989 
990 		if (list_empty(&conn->waiting_calls)) {
991 			trace_rxrpc_client(conn, -1, rxrpc_client_to_culled);
992 			conn->cache_state = RXRPC_CONN_CLIENT_CULLED;
993 			list_del_init(&conn->cache_link);
994 		} else {
995 			trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
996 			conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
997 			list_move_tail(&conn->cache_link,
998 				       &rxnet->waiting_client_conns);
999 		}
1000 
1001 		nr_active--;
1002 	}
1003 
1004 	rxnet->nr_active_client_conns = nr_active;
1005 	spin_unlock(&rxnet->client_conn_cache_lock);
1006 	ASSERTCMP(nr_active, >=, 0);
1007 	_leave(" [culled]");
1008 }
1009 
1010 /*
1011  * Discard expired client connections from the idle list.  Each conn in the
1012  * idle list has been exposed and holds an extra ref because of that.
1013  *
1014  * This may be called from conn setup or from a work item so cannot be
1015  * considered non-reentrant.
1016  */
1017 void rxrpc_discard_expired_client_conns(struct work_struct *work)
1018 {
1019 	struct rxrpc_connection *conn;
1020 	struct rxrpc_net *rxnet =
1021 		container_of(to_delayed_work(work),
1022 			     struct rxrpc_net, client_conn_reaper);
1023 	unsigned long expiry, conn_expires_at, now;
1024 	unsigned int nr_conns;
1025 	bool did_discard = false;
1026 
1027 	_enter("");
1028 
1029 	if (list_empty(&rxnet->idle_client_conns)) {
1030 		_leave(" [empty]");
1031 		return;
1032 	}
1033 
1034 	/* Don't double up on the discarding */
1035 	if (!spin_trylock(&rxnet->client_conn_discard_lock)) {
1036 		_leave(" [already]");
1037 		return;
1038 	}
1039 
1040 	/* We keep an estimate of what the number of conns ought to be after
1041 	 * we've discarded some so that we don't overdo the discarding.
1042 	 */
1043 	nr_conns = rxnet->nr_client_conns;
1044 
1045 next:
1046 	spin_lock(&rxnet->client_conn_cache_lock);
1047 
1048 	if (list_empty(&rxnet->idle_client_conns))
1049 		goto out;
1050 
1051 	conn = list_entry(rxnet->idle_client_conns.next,
1052 			  struct rxrpc_connection, cache_link);
1053 	ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags));
1054 
1055 	if (!rxnet->kill_all_client_conns) {
1056 		/* If the number of connections is over the reap limit, we
1057 		 * expedite discard by reducing the expiry timeout.  We must,
1058 		 * however, have at least a short grace period to be able to do
1059 		 * final-ACK or ABORT retransmission.
1060 		 */
1061 		expiry = rxrpc_conn_idle_client_expiry;
1062 		if (nr_conns > rxrpc_reap_client_connections)
1063 			expiry = rxrpc_conn_idle_client_fast_expiry;
1064 
1065 		conn_expires_at = conn->idle_timestamp + expiry;
1066 
1067 		now = READ_ONCE(jiffies);
1068 		if (time_after(conn_expires_at, now))
1069 			goto not_yet_expired;
1070 	}
1071 
1072 	trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1073 	if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
1074 		BUG();
1075 	conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
1076 	list_del_init(&conn->cache_link);
1077 
1078 	spin_unlock(&rxnet->client_conn_cache_lock);
1079 
1080 	/* When we cleared the EXPOSED flag, we took on responsibility for the
1081 	 * reference that that had on the usage count.  We deal with that here.
1082 	 * If someone re-sets the flag and re-gets the ref, that's fine.
1083 	 */
1084 	rxrpc_put_connection(conn);
1085 	did_discard = true;
1086 	nr_conns--;
1087 	goto next;
1088 
1089 not_yet_expired:
1090 	/* The connection at the front of the queue hasn't yet expired, so
1091 	 * schedule the work item for that point if we discarded something.
1092 	 *
1093 	 * We don't worry if the work item is already scheduled - it can look
1094 	 * after rescheduling itself at a later time.  We could cancel it, but
1095 	 * then things get messier.
1096 	 */
1097 	_debug("not yet");
1098 	if (!rxnet->kill_all_client_conns)
1099 		queue_delayed_work(rxrpc_workqueue,
1100 				   &rxnet->client_conn_reaper,
1101 				   conn_expires_at - now);
1102 
1103 out:
1104 	spin_unlock(&rxnet->client_conn_cache_lock);
1105 	spin_unlock(&rxnet->client_conn_discard_lock);
1106 	_leave("");
1107 }
1108 
1109 /*
1110  * Preemptively destroy all the client connection records rather than waiting
1111  * for them to time out
1112  */
1113 void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
1114 {
1115 	_enter("");
1116 
1117 	spin_lock(&rxnet->client_conn_cache_lock);
1118 	rxnet->kill_all_client_conns = true;
1119 	spin_unlock(&rxnet->client_conn_cache_lock);
1120 
1121 	cancel_delayed_work(&rxnet->client_conn_reaper);
1122 
1123 	if (!queue_delayed_work(rxrpc_workqueue, &rxnet->client_conn_reaper, 0))
1124 		_debug("destroy: queue failed");
1125 
1126 	_leave("");
1127 }
1128