xref: /linux/net/rxrpc/call_object.c (revision 57f273adbcd44172cbe0bd10b8b7408dd255699f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC individual remote procedure call handling
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/circ_buf.h>
13 #include <linux/spinlock_types.h>
14 #include <net/sock.h>
15 #include <net/af_rxrpc.h>
16 #include "ar-internal.h"
17 
18 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
19 	[RXRPC_CALL_UNINITIALISED]		= "Uninit  ",
20 	[RXRPC_CALL_CLIENT_AWAIT_CONN]		= "ClWtConn",
21 	[RXRPC_CALL_CLIENT_SEND_REQUEST]	= "ClSndReq",
22 	[RXRPC_CALL_CLIENT_AWAIT_REPLY]		= "ClAwtRpl",
23 	[RXRPC_CALL_CLIENT_RECV_REPLY]		= "ClRcvRpl",
24 	[RXRPC_CALL_SERVER_PREALLOC]		= "SvPrealc",
25 	[RXRPC_CALL_SERVER_SECURING]		= "SvSecure",
26 	[RXRPC_CALL_SERVER_RECV_REQUEST]	= "SvRcvReq",
27 	[RXRPC_CALL_SERVER_ACK_REQUEST]		= "SvAckReq",
28 	[RXRPC_CALL_SERVER_SEND_REPLY]		= "SvSndRpl",
29 	[RXRPC_CALL_SERVER_AWAIT_ACK]		= "SvAwtACK",
30 	[RXRPC_CALL_COMPLETE]			= "Complete",
31 };
32 
33 const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
34 	[RXRPC_CALL_SUCCEEDED]			= "Complete",
35 	[RXRPC_CALL_REMOTELY_ABORTED]		= "RmtAbort",
36 	[RXRPC_CALL_LOCALLY_ABORTED]		= "LocAbort",
37 	[RXRPC_CALL_LOCAL_ERROR]		= "LocError",
38 	[RXRPC_CALL_NETWORK_ERROR]		= "NetError",
39 };
40 
41 struct kmem_cache *rxrpc_call_jar;
42 
43 static struct semaphore rxrpc_call_limiter =
44 	__SEMAPHORE_INITIALIZER(rxrpc_call_limiter, 1000);
45 static struct semaphore rxrpc_kernel_call_limiter =
46 	__SEMAPHORE_INITIALIZER(rxrpc_kernel_call_limiter, 1000);
47 
48 static void rxrpc_call_timer_expired(struct timer_list *t)
49 {
50 	struct rxrpc_call *call = from_timer(call, t, timer);
51 
52 	_enter("%d", call->debug_id);
53 
54 	if (call->state < RXRPC_CALL_COMPLETE) {
55 		trace_rxrpc_timer_expired(call, jiffies);
56 		__rxrpc_queue_call(call);
57 	} else {
58 		rxrpc_put_call(call, rxrpc_call_put);
59 	}
60 }
61 
62 void rxrpc_reduce_call_timer(struct rxrpc_call *call,
63 			     unsigned long expire_at,
64 			     unsigned long now,
65 			     enum rxrpc_timer_trace why)
66 {
67 	if (rxrpc_try_get_call(call, rxrpc_call_got_timer)) {
68 		trace_rxrpc_timer(call, why, now);
69 		if (timer_reduce(&call->timer, expire_at))
70 			rxrpc_put_call(call, rxrpc_call_put_notimer);
71 	}
72 }
73 
74 void rxrpc_delete_call_timer(struct rxrpc_call *call)
75 {
76 	if (del_timer_sync(&call->timer))
77 		rxrpc_put_call(call, rxrpc_call_put_timer);
78 }
79 
80 static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
81 
82 /*
83  * find an extant server call
84  * - called in process context with IRQs enabled
85  */
86 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
87 					      unsigned long user_call_ID)
88 {
89 	struct rxrpc_call *call;
90 	struct rb_node *p;
91 
92 	_enter("%p,%lx", rx, user_call_ID);
93 
94 	read_lock(&rx->call_lock);
95 
96 	p = rx->calls.rb_node;
97 	while (p) {
98 		call = rb_entry(p, struct rxrpc_call, sock_node);
99 
100 		if (user_call_ID < call->user_call_ID)
101 			p = p->rb_left;
102 		else if (user_call_ID > call->user_call_ID)
103 			p = p->rb_right;
104 		else
105 			goto found_extant_call;
106 	}
107 
108 	read_unlock(&rx->call_lock);
109 	_leave(" = NULL");
110 	return NULL;
111 
112 found_extant_call:
113 	rxrpc_get_call(call, rxrpc_call_got);
114 	read_unlock(&rx->call_lock);
115 	_leave(" = %p [%d]", call, refcount_read(&call->ref));
116 	return call;
117 }
118 
119 /*
120  * allocate a new call
121  */
122 struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
123 				    unsigned int debug_id)
124 {
125 	struct rxrpc_call *call;
126 	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
127 
128 	call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
129 	if (!call)
130 		return NULL;
131 
132 	mutex_init(&call->user_mutex);
133 
134 	/* Prevent lockdep reporting a deadlock false positive between the afs
135 	 * filesystem and sys_sendmsg() via the mmap sem.
136 	 */
137 	if (rx->sk.sk_kern_sock)
138 		lockdep_set_class(&call->user_mutex,
139 				  &rxrpc_call_user_mutex_lock_class_key);
140 
141 	timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
142 	INIT_WORK(&call->processor, &rxrpc_process_call);
143 	INIT_LIST_HEAD(&call->link);
144 	INIT_LIST_HEAD(&call->chan_wait_link);
145 	INIT_LIST_HEAD(&call->accept_link);
146 	INIT_LIST_HEAD(&call->recvmsg_link);
147 	INIT_LIST_HEAD(&call->sock_link);
148 	INIT_LIST_HEAD(&call->tx_buffer);
149 	skb_queue_head_init(&call->recvmsg_queue);
150 	skb_queue_head_init(&call->rx_oos_queue);
151 	init_waitqueue_head(&call->waitq);
152 	spin_lock_init(&call->notify_lock);
153 	spin_lock_init(&call->tx_lock);
154 	spin_lock_init(&call->input_lock);
155 	spin_lock_init(&call->acks_ack_lock);
156 	rwlock_init(&call->state_lock);
157 	refcount_set(&call->ref, 1);
158 	call->debug_id = debug_id;
159 	call->tx_total_len = -1;
160 	call->next_rx_timo = 20 * HZ;
161 	call->next_req_timo = 1 * HZ;
162 	atomic64_set(&call->ackr_window, 0x100000001ULL);
163 
164 	memset(&call->sock_node, 0xed, sizeof(call->sock_node));
165 
166 	call->rx_winsize = rxrpc_rx_window_size;
167 	call->tx_winsize = 16;
168 
169 	if (RXRPC_TX_SMSS > 2190)
170 		call->cong_cwnd = 2;
171 	else if (RXRPC_TX_SMSS > 1095)
172 		call->cong_cwnd = 3;
173 	else
174 		call->cong_cwnd = 4;
175 	call->cong_ssthresh = RXRPC_TX_MAX_WINDOW;
176 
177 	call->rxnet = rxnet;
178 	call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
179 	atomic_inc(&rxnet->nr_calls);
180 	return call;
181 }
182 
183 /*
184  * Allocate a new client call.
185  */
186 static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
187 						  struct sockaddr_rxrpc *srx,
188 						  gfp_t gfp,
189 						  unsigned int debug_id)
190 {
191 	struct rxrpc_call *call;
192 	ktime_t now;
193 
194 	_enter("");
195 
196 	call = rxrpc_alloc_call(rx, gfp, debug_id);
197 	if (!call)
198 		return ERR_PTR(-ENOMEM);
199 	call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
200 	call->service_id = srx->srx_service;
201 	now = ktime_get_real();
202 	call->acks_latest_ts = now;
203 	call->cong_tstamp = now;
204 
205 	_leave(" = %p", call);
206 	return call;
207 }
208 
209 /*
210  * Initiate the call ack/resend/expiry timer.
211  */
212 static void rxrpc_start_call_timer(struct rxrpc_call *call)
213 {
214 	unsigned long now = jiffies;
215 	unsigned long j = now + MAX_JIFFY_OFFSET;
216 
217 	call->delay_ack_at = j;
218 	call->ack_lost_at = j;
219 	call->resend_at = j;
220 	call->ping_at = j;
221 	call->expect_rx_by = j;
222 	call->expect_req_by = j;
223 	call->expect_term_by = j;
224 	call->timer.expires = now;
225 }
226 
227 /*
228  * Wait for a call slot to become available.
229  */
230 static struct semaphore *rxrpc_get_call_slot(struct rxrpc_call_params *p, gfp_t gfp)
231 {
232 	struct semaphore *limiter = &rxrpc_call_limiter;
233 
234 	if (p->kernel)
235 		limiter = &rxrpc_kernel_call_limiter;
236 	if (p->interruptibility == RXRPC_UNINTERRUPTIBLE) {
237 		down(limiter);
238 		return limiter;
239 	}
240 	return down_interruptible(limiter) < 0 ? NULL : limiter;
241 }
242 
243 /*
244  * Release a call slot.
245  */
246 static void rxrpc_put_call_slot(struct rxrpc_call *call)
247 {
248 	struct semaphore *limiter = &rxrpc_call_limiter;
249 
250 	if (test_bit(RXRPC_CALL_KERNEL, &call->flags))
251 		limiter = &rxrpc_kernel_call_limiter;
252 	up(limiter);
253 }
254 
255 /*
256  * Set up a call for the given parameters.
257  * - Called with the socket lock held, which it must release.
258  * - If it returns a call, the call's lock will need releasing by the caller.
259  */
260 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
261 					 struct rxrpc_conn_parameters *cp,
262 					 struct sockaddr_rxrpc *srx,
263 					 struct rxrpc_call_params *p,
264 					 gfp_t gfp,
265 					 unsigned int debug_id)
266 	__releases(&rx->sk.sk_lock.slock)
267 	__acquires(&call->user_mutex)
268 {
269 	struct rxrpc_call *call, *xcall;
270 	struct rxrpc_net *rxnet;
271 	struct semaphore *limiter;
272 	struct rb_node *parent, **pp;
273 	const void *here = __builtin_return_address(0);
274 	int ret;
275 
276 	_enter("%p,%lx", rx, p->user_call_ID);
277 
278 	limiter = rxrpc_get_call_slot(p, gfp);
279 	if (!limiter) {
280 		release_sock(&rx->sk);
281 		return ERR_PTR(-ERESTARTSYS);
282 	}
283 
284 	call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
285 	if (IS_ERR(call)) {
286 		release_sock(&rx->sk);
287 		up(limiter);
288 		_leave(" = %ld", PTR_ERR(call));
289 		return call;
290 	}
291 
292 	call->interruptibility = p->interruptibility;
293 	call->tx_total_len = p->tx_total_len;
294 	trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
295 			 refcount_read(&call->ref),
296 			 here, (const void *)p->user_call_ID);
297 	if (p->kernel)
298 		__set_bit(RXRPC_CALL_KERNEL, &call->flags);
299 
300 	/* We need to protect a partially set up call against the user as we
301 	 * will be acting outside the socket lock.
302 	 */
303 	mutex_lock(&call->user_mutex);
304 
305 	/* Publish the call, even though it is incompletely set up as yet */
306 	write_lock(&rx->call_lock);
307 
308 	pp = &rx->calls.rb_node;
309 	parent = NULL;
310 	while (*pp) {
311 		parent = *pp;
312 		xcall = rb_entry(parent, struct rxrpc_call, sock_node);
313 
314 		if (p->user_call_ID < xcall->user_call_ID)
315 			pp = &(*pp)->rb_left;
316 		else if (p->user_call_ID > xcall->user_call_ID)
317 			pp = &(*pp)->rb_right;
318 		else
319 			goto error_dup_user_ID;
320 	}
321 
322 	rcu_assign_pointer(call->socket, rx);
323 	call->user_call_ID = p->user_call_ID;
324 	__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
325 	rxrpc_get_call(call, rxrpc_call_got_userid);
326 	rb_link_node(&call->sock_node, parent, pp);
327 	rb_insert_color(&call->sock_node, &rx->calls);
328 	list_add(&call->sock_link, &rx->sock_calls);
329 
330 	write_unlock(&rx->call_lock);
331 
332 	rxnet = call->rxnet;
333 	spin_lock_bh(&rxnet->call_lock);
334 	list_add_tail_rcu(&call->link, &rxnet->calls);
335 	spin_unlock_bh(&rxnet->call_lock);
336 
337 	/* From this point on, the call is protected by its own lock. */
338 	release_sock(&rx->sk);
339 
340 	/* Set up or get a connection record and set the protocol parameters,
341 	 * including channel number and call ID.
342 	 */
343 	ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
344 	if (ret < 0)
345 		goto error_attached_to_socket;
346 
347 	trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
348 			 refcount_read(&call->ref), here, NULL);
349 
350 	rxrpc_start_call_timer(call);
351 
352 	_net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
353 
354 	_leave(" = %p [new]", call);
355 	return call;
356 
357 	/* We unexpectedly found the user ID in the list after taking
358 	 * the call_lock.  This shouldn't happen unless the user races
359 	 * with itself and tries to add the same user ID twice at the
360 	 * same time in different threads.
361 	 */
362 error_dup_user_ID:
363 	write_unlock(&rx->call_lock);
364 	release_sock(&rx->sk);
365 	__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
366 				    RX_CALL_DEAD, -EEXIST);
367 	trace_rxrpc_call(call->debug_id, rxrpc_call_error,
368 			 refcount_read(&call->ref), here, ERR_PTR(-EEXIST));
369 	rxrpc_release_call(rx, call);
370 	mutex_unlock(&call->user_mutex);
371 	rxrpc_put_call(call, rxrpc_call_put);
372 	_leave(" = -EEXIST");
373 	return ERR_PTR(-EEXIST);
374 
375 	/* We got an error, but the call is attached to the socket and is in
376 	 * need of release.  However, we might now race with recvmsg() when
377 	 * completing the call queues it.  Return 0 from sys_sendmsg() and
378 	 * leave the error to recvmsg() to deal with.
379 	 */
380 error_attached_to_socket:
381 	trace_rxrpc_call(call->debug_id, rxrpc_call_error,
382 			 refcount_read(&call->ref), here, ERR_PTR(ret));
383 	set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
384 	__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
385 				    RX_CALL_DEAD, ret);
386 	_leave(" = c=%08x [err]", call->debug_id);
387 	return call;
388 }
389 
390 /*
391  * Set up an incoming call.  call->conn points to the connection.
392  * This is called in BH context and isn't allowed to fail.
393  */
394 void rxrpc_incoming_call(struct rxrpc_sock *rx,
395 			 struct rxrpc_call *call,
396 			 struct sk_buff *skb)
397 {
398 	struct rxrpc_connection *conn = call->conn;
399 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
400 	u32 chan;
401 
402 	_enter(",%d", call->conn->debug_id);
403 
404 	rcu_assign_pointer(call->socket, rx);
405 	call->call_id		= sp->hdr.callNumber;
406 	call->service_id	= sp->hdr.serviceId;
407 	call->cid		= sp->hdr.cid;
408 	call->state		= RXRPC_CALL_SERVER_SECURING;
409 	call->cong_tstamp	= skb->tstamp;
410 
411 	/* Set the channel for this call.  We don't get channel_lock as we're
412 	 * only defending against the data_ready handler (which we're called
413 	 * from) and the RESPONSE packet parser (which is only really
414 	 * interested in call_counter and can cope with a disagreement with the
415 	 * call pointer).
416 	 */
417 	chan = sp->hdr.cid & RXRPC_CHANNELMASK;
418 	conn->channels[chan].call_counter = call->call_id;
419 	conn->channels[chan].call_id = call->call_id;
420 	rcu_assign_pointer(conn->channels[chan].call, call);
421 
422 	spin_lock(&conn->params.peer->lock);
423 	hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
424 	spin_unlock(&conn->params.peer->lock);
425 
426 	_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
427 
428 	rxrpc_start_call_timer(call);
429 	_leave("");
430 }
431 
432 /*
433  * Queue a call's work processor, getting a ref to pass to the work queue.
434  */
435 bool rxrpc_queue_call(struct rxrpc_call *call)
436 {
437 	const void *here = __builtin_return_address(0);
438 	int n;
439 
440 	if (!__refcount_inc_not_zero(&call->ref, &n))
441 		return false;
442 	if (rxrpc_queue_work(&call->processor))
443 		trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
444 				 here, NULL);
445 	else
446 		rxrpc_put_call(call, rxrpc_call_put_noqueue);
447 	return true;
448 }
449 
450 /*
451  * Queue a call's work processor, passing the callers ref to the work queue.
452  */
453 bool __rxrpc_queue_call(struct rxrpc_call *call)
454 {
455 	const void *here = __builtin_return_address(0);
456 	int n = refcount_read(&call->ref);
457 	ASSERTCMP(n, >=, 1);
458 	if (rxrpc_queue_work(&call->processor))
459 		trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
460 				 here, NULL);
461 	else
462 		rxrpc_put_call(call, rxrpc_call_put_noqueue);
463 	return true;
464 }
465 
466 /*
467  * Note the re-emergence of a call.
468  */
469 void rxrpc_see_call(struct rxrpc_call *call)
470 {
471 	const void *here = __builtin_return_address(0);
472 	if (call) {
473 		int n = refcount_read(&call->ref);
474 
475 		trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
476 				 here, NULL);
477 	}
478 }
479 
480 bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
481 {
482 	const void *here = __builtin_return_address(0);
483 	int n;
484 
485 	if (!__refcount_inc_not_zero(&call->ref, &n))
486 		return false;
487 	trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL);
488 	return true;
489 }
490 
491 /*
492  * Note the addition of a ref on a call.
493  */
494 void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
495 {
496 	const void *here = __builtin_return_address(0);
497 	int n;
498 
499 	__refcount_inc(&call->ref, &n);
500 	trace_rxrpc_call(call->debug_id, op, n + 1, here, NULL);
501 }
502 
503 /*
504  * Clean up the Rx skb ring.
505  */
506 static void rxrpc_cleanup_ring(struct rxrpc_call *call)
507 {
508 	skb_queue_purge(&call->recvmsg_queue);
509 	skb_queue_purge(&call->rx_oos_queue);
510 }
511 
512 /*
513  * Detach a call from its owning socket.
514  */
515 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
516 {
517 	const void *here = __builtin_return_address(0);
518 	struct rxrpc_connection *conn = call->conn;
519 	bool put = false;
520 
521 	_enter("{%d,%d}", call->debug_id, refcount_read(&call->ref));
522 
523 	trace_rxrpc_call(call->debug_id, rxrpc_call_release,
524 			 refcount_read(&call->ref),
525 			 here, (const void *)call->flags);
526 
527 	ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
528 
529 	if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
530 		BUG();
531 
532 	rxrpc_put_call_slot(call);
533 	rxrpc_delete_call_timer(call);
534 
535 	/* Make sure we don't get any more notifications */
536 	write_lock_bh(&rx->recvmsg_lock);
537 
538 	if (!list_empty(&call->recvmsg_link)) {
539 		_debug("unlinking once-pending call %p { e=%lx f=%lx }",
540 		       call, call->events, call->flags);
541 		list_del(&call->recvmsg_link);
542 		put = true;
543 	}
544 
545 	/* list_empty() must return false in rxrpc_notify_socket() */
546 	call->recvmsg_link.next = NULL;
547 	call->recvmsg_link.prev = NULL;
548 
549 	write_unlock_bh(&rx->recvmsg_lock);
550 	if (put)
551 		rxrpc_put_call(call, rxrpc_call_put);
552 
553 	write_lock(&rx->call_lock);
554 
555 	if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
556 		rb_erase(&call->sock_node, &rx->calls);
557 		memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
558 		rxrpc_put_call(call, rxrpc_call_put_userid);
559 	}
560 
561 	list_del(&call->sock_link);
562 	write_unlock(&rx->call_lock);
563 
564 	_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
565 
566 	if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
567 		rxrpc_disconnect_call(call);
568 	if (call->security)
569 		call->security->free_call_crypto(call);
570 	_leave("");
571 }
572 
573 /*
574  * release all the calls associated with a socket
575  */
576 void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
577 {
578 	struct rxrpc_call *call;
579 
580 	_enter("%p", rx);
581 
582 	while (!list_empty(&rx->to_be_accepted)) {
583 		call = list_entry(rx->to_be_accepted.next,
584 				  struct rxrpc_call, accept_link);
585 		list_del(&call->accept_link);
586 		rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
587 		rxrpc_put_call(call, rxrpc_call_put);
588 	}
589 
590 	while (!list_empty(&rx->sock_calls)) {
591 		call = list_entry(rx->sock_calls.next,
592 				  struct rxrpc_call, sock_link);
593 		rxrpc_get_call(call, rxrpc_call_got);
594 		rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
595 		rxrpc_send_abort_packet(call);
596 		rxrpc_release_call(rx, call);
597 		rxrpc_put_call(call, rxrpc_call_put);
598 	}
599 
600 	_leave("");
601 }
602 
603 /*
604  * release a call
605  */
606 void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
607 {
608 	struct rxrpc_net *rxnet = call->rxnet;
609 	const void *here = __builtin_return_address(0);
610 	unsigned int debug_id = call->debug_id;
611 	bool dead;
612 	int n;
613 
614 	ASSERT(call != NULL);
615 
616 	dead = __refcount_dec_and_test(&call->ref, &n);
617 	trace_rxrpc_call(debug_id, op, n, here, NULL);
618 	if (dead) {
619 		_debug("call %d dead", call->debug_id);
620 		ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
621 
622 		if (!list_empty(&call->link)) {
623 			spin_lock_bh(&rxnet->call_lock);
624 			list_del_init(&call->link);
625 			spin_unlock_bh(&rxnet->call_lock);
626 		}
627 
628 		rxrpc_cleanup_call(call);
629 	}
630 }
631 
632 /*
633  * Final call destruction - but must be done in process context.
634  */
635 static void rxrpc_destroy_call(struct work_struct *work)
636 {
637 	struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
638 	struct rxrpc_net *rxnet = call->rxnet;
639 
640 	rxrpc_delete_call_timer(call);
641 
642 	rxrpc_put_connection(call->conn);
643 	rxrpc_put_peer(call->peer);
644 	kmem_cache_free(rxrpc_call_jar, call);
645 	if (atomic_dec_and_test(&rxnet->nr_calls))
646 		wake_up_var(&rxnet->nr_calls);
647 }
648 
649 /*
650  * Final call destruction under RCU.
651  */
652 static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
653 {
654 	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
655 
656 	if (in_softirq()) {
657 		INIT_WORK(&call->processor, rxrpc_destroy_call);
658 		if (!rxrpc_queue_work(&call->processor))
659 			BUG();
660 	} else {
661 		rxrpc_destroy_call(&call->processor);
662 	}
663 }
664 
665 /*
666  * clean up a call
667  */
668 void rxrpc_cleanup_call(struct rxrpc_call *call)
669 {
670 	struct rxrpc_txbuf *txb;
671 
672 	_net("DESTROY CALL %d", call->debug_id);
673 
674 	memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
675 
676 	ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
677 	ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
678 
679 	rxrpc_cleanup_ring(call);
680 	while ((txb = list_first_entry_or_null(&call->tx_buffer,
681 					       struct rxrpc_txbuf, call_link))) {
682 		list_del(&txb->call_link);
683 		rxrpc_put_txbuf(txb, rxrpc_txbuf_put_cleaned);
684 	}
685 	rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned);
686 	rxrpc_free_skb(call->acks_soft_tbl, rxrpc_skb_cleaned);
687 
688 	call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
689 }
690 
691 /*
692  * Make sure that all calls are gone from a network namespace.  To reach this
693  * point, any open UDP sockets in that namespace must have been closed, so any
694  * outstanding calls cannot be doing I/O.
695  */
696 void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
697 {
698 	struct rxrpc_call *call;
699 
700 	_enter("");
701 
702 	if (!list_empty(&rxnet->calls)) {
703 		spin_lock_bh(&rxnet->call_lock);
704 
705 		while (!list_empty(&rxnet->calls)) {
706 			call = list_entry(rxnet->calls.next,
707 					  struct rxrpc_call, link);
708 			_debug("Zapping call %p", call);
709 
710 			rxrpc_see_call(call);
711 			list_del_init(&call->link);
712 
713 			pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
714 			       call, refcount_read(&call->ref),
715 			       rxrpc_call_states[call->state],
716 			       call->flags, call->events);
717 
718 			spin_unlock_bh(&rxnet->call_lock);
719 			cond_resched();
720 			spin_lock_bh(&rxnet->call_lock);
721 		}
722 
723 		spin_unlock_bh(&rxnet->call_lock);
724 	}
725 
726 	atomic_dec(&rxnet->nr_calls);
727 	wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
728 }
729