xref: /linux/net/rxrpc/sendmsg.c (revision 905e46acd3272d04566fec49afbd7ad9e2ed9ae3)
1 /* AF_RXRPC sendmsg() implementation.
2  *
3  * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/net.h>
15 #include <linux/gfp.h>
16 #include <linux/skbuff.h>
17 #include <linux/export.h>
18 #include <linux/sched/signal.h>
19 
20 #include <net/sock.h>
21 #include <net/af_rxrpc.h>
22 #include "ar-internal.h"
23 
24 enum rxrpc_command {
25 	RXRPC_CMD_SEND_DATA,		/* send data message */
26 	RXRPC_CMD_SEND_ABORT,		/* request abort generation */
27 	RXRPC_CMD_ACCEPT,		/* [server] accept incoming call */
28 	RXRPC_CMD_REJECT_BUSY,		/* [server] reject a call as busy */
29 };
30 
31 /*
32  * wait for space to appear in the transmit/ACK window
33  * - caller holds the socket locked
34  */
35 static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
36 				    struct rxrpc_call *call,
37 				    long *timeo)
38 {
39 	DECLARE_WAITQUEUE(myself, current);
40 	int ret;
41 
42 	_enter(",{%u,%u,%u}",
43 	       call->tx_hard_ack, call->tx_top, call->tx_winsize);
44 
45 	add_wait_queue(&call->waitq, &myself);
46 
47 	for (;;) {
48 		set_current_state(TASK_INTERRUPTIBLE);
49 		ret = 0;
50 		if (call->tx_top - call->tx_hard_ack <
51 		    min_t(unsigned int, call->tx_winsize,
52 			  call->cong_cwnd + call->cong_extra))
53 			break;
54 		if (call->state >= RXRPC_CALL_COMPLETE) {
55 			ret = -call->error;
56 			break;
57 		}
58 		if (signal_pending(current)) {
59 			ret = sock_intr_errno(*timeo);
60 			break;
61 		}
62 
63 		trace_rxrpc_transmit(call, rxrpc_transmit_wait);
64 		mutex_unlock(&call->user_mutex);
65 		*timeo = schedule_timeout(*timeo);
66 		if (mutex_lock_interruptible(&call->user_mutex) < 0) {
67 			ret = sock_intr_errno(*timeo);
68 			break;
69 		}
70 	}
71 
72 	remove_wait_queue(&call->waitq, &myself);
73 	set_current_state(TASK_RUNNING);
74 	_leave(" = %d", ret);
75 	return ret;
76 }
77 
78 /*
79  * Schedule an instant Tx resend.
80  */
81 static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix)
82 {
83 	spin_lock_bh(&call->lock);
84 
85 	if (call->state < RXRPC_CALL_COMPLETE) {
86 		call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS;
87 		if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
88 			rxrpc_queue_call(call);
89 	}
90 
91 	spin_unlock_bh(&call->lock);
92 }
93 
94 /*
95  * Queue a DATA packet for transmission, set the resend timeout and send the
96  * packet immediately
97  */
98 static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
99 			       bool last)
100 {
101 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
102 	rxrpc_seq_t seq = sp->hdr.seq;
103 	int ret, ix;
104 	u8 annotation = RXRPC_TX_ANNO_UNACK;
105 
106 	_net("queue skb %p [%d]", skb, seq);
107 
108 	ASSERTCMP(seq, ==, call->tx_top + 1);
109 
110 	if (last)
111 		annotation |= RXRPC_TX_ANNO_LAST;
112 
113 	/* We have to set the timestamp before queueing as the retransmit
114 	 * algorithm can see the packet as soon as we queue it.
115 	 */
116 	skb->tstamp = ktime_get_real();
117 
118 	ix = seq & RXRPC_RXTX_BUFF_MASK;
119 	rxrpc_get_skb(skb, rxrpc_skb_tx_got);
120 	call->rxtx_annotations[ix] = annotation;
121 	smp_wmb();
122 	call->rxtx_buffer[ix] = skb;
123 	call->tx_top = seq;
124 	if (last)
125 		trace_rxrpc_transmit(call, rxrpc_transmit_queue_last);
126 	else
127 		trace_rxrpc_transmit(call, rxrpc_transmit_queue);
128 
129 	if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
130 		_debug("________awaiting reply/ACK__________");
131 		write_lock_bh(&call->state_lock);
132 		switch (call->state) {
133 		case RXRPC_CALL_CLIENT_SEND_REQUEST:
134 			call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
135 			break;
136 		case RXRPC_CALL_SERVER_ACK_REQUEST:
137 			call->state = RXRPC_CALL_SERVER_SEND_REPLY;
138 			call->ack_at = call->expire_at;
139 			if (call->ackr_reason == RXRPC_ACK_DELAY)
140 				call->ackr_reason = 0;
141 			__rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply,
142 					  ktime_get_real());
143 			if (!last)
144 				break;
145 		case RXRPC_CALL_SERVER_SEND_REPLY:
146 			call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
147 			break;
148 		default:
149 			break;
150 		}
151 		write_unlock_bh(&call->state_lock);
152 	}
153 
154 	if (seq == 1 && rxrpc_is_client_call(call))
155 		rxrpc_expose_client_call(call);
156 
157 	ret = rxrpc_send_data_packet(call, skb, false);
158 	if (ret < 0) {
159 		_debug("need instant resend %d", ret);
160 		rxrpc_instant_resend(call, ix);
161 	} else {
162 		ktime_t now = ktime_get_real(), resend_at;
163 
164 		resend_at = ktime_add_ms(now, rxrpc_resend_timeout);
165 
166 		if (ktime_before(resend_at, call->resend_at)) {
167 			call->resend_at = resend_at;
168 			rxrpc_set_timer(call, rxrpc_timer_set_for_send, now);
169 		}
170 	}
171 
172 	rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
173 	_leave("");
174 }
175 
176 /*
177  * send data through a socket
178  * - must be called in process context
179  * - The caller holds the call user access mutex, but not the socket lock.
180  */
181 static int rxrpc_send_data(struct rxrpc_sock *rx,
182 			   struct rxrpc_call *call,
183 			   struct msghdr *msg, size_t len)
184 {
185 	struct rxrpc_skb_priv *sp;
186 	struct sk_buff *skb;
187 	struct sock *sk = &rx->sk;
188 	long timeo;
189 	bool more;
190 	int ret, copied;
191 
192 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
193 
194 	/* this should be in poll */
195 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
196 
197 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
198 		return -EPIPE;
199 
200 	more = msg->msg_flags & MSG_MORE;
201 
202 	skb = call->tx_pending;
203 	call->tx_pending = NULL;
204 	rxrpc_see_skb(skb, rxrpc_skb_tx_seen);
205 
206 	copied = 0;
207 	do {
208 		/* Check to see if there's a ping ACK to reply to. */
209 		if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
210 			rxrpc_send_ack_packet(call, false);
211 
212 		if (!skb) {
213 			size_t size, chunk, max, space;
214 
215 			_debug("alloc");
216 
217 			if (call->tx_top - call->tx_hard_ack >=
218 			    min_t(unsigned int, call->tx_winsize,
219 				  call->cong_cwnd + call->cong_extra)) {
220 				ret = -EAGAIN;
221 				if (msg->msg_flags & MSG_DONTWAIT)
222 					goto maybe_error;
223 				ret = rxrpc_wait_for_tx_window(rx, call,
224 							       &timeo);
225 				if (ret < 0)
226 					goto maybe_error;
227 			}
228 
229 			max = RXRPC_JUMBO_DATALEN;
230 			max -= call->conn->security_size;
231 			max &= ~(call->conn->size_align - 1UL);
232 
233 			chunk = max;
234 			if (chunk > msg_data_left(msg) && !more)
235 				chunk = msg_data_left(msg);
236 
237 			space = chunk + call->conn->size_align;
238 			space &= ~(call->conn->size_align - 1UL);
239 
240 			size = space + call->conn->security_size;
241 
242 			_debug("SIZE: %zu/%zu/%zu", chunk, space, size);
243 
244 			/* create a buffer that we can retain until it's ACK'd */
245 			skb = sock_alloc_send_skb(
246 				sk, size, msg->msg_flags & MSG_DONTWAIT, &ret);
247 			if (!skb)
248 				goto maybe_error;
249 
250 			rxrpc_new_skb(skb, rxrpc_skb_tx_new);
251 
252 			_debug("ALLOC SEND %p", skb);
253 
254 			ASSERTCMP(skb->mark, ==, 0);
255 
256 			_debug("HS: %u", call->conn->security_size);
257 			skb_reserve(skb, call->conn->security_size);
258 			skb->len += call->conn->security_size;
259 
260 			sp = rxrpc_skb(skb);
261 			sp->remain = chunk;
262 			if (sp->remain > skb_tailroom(skb))
263 				sp->remain = skb_tailroom(skb);
264 
265 			_net("skb: hr %d, tr %d, hl %d, rm %d",
266 			       skb_headroom(skb),
267 			       skb_tailroom(skb),
268 			       skb_headlen(skb),
269 			       sp->remain);
270 
271 			skb->ip_summed = CHECKSUM_UNNECESSARY;
272 		}
273 
274 		_debug("append");
275 		sp = rxrpc_skb(skb);
276 
277 		/* append next segment of data to the current buffer */
278 		if (msg_data_left(msg) > 0) {
279 			int copy = skb_tailroom(skb);
280 			ASSERTCMP(copy, >, 0);
281 			if (copy > msg_data_left(msg))
282 				copy = msg_data_left(msg);
283 			if (copy > sp->remain)
284 				copy = sp->remain;
285 
286 			_debug("add");
287 			ret = skb_add_data(skb, &msg->msg_iter, copy);
288 			_debug("added");
289 			if (ret < 0)
290 				goto efault;
291 			sp->remain -= copy;
292 			skb->mark += copy;
293 			copied += copy;
294 		}
295 
296 		/* check for the far side aborting the call or a network error
297 		 * occurring */
298 		if (call->state == RXRPC_CALL_COMPLETE)
299 			goto call_terminated;
300 
301 		/* add the packet to the send queue if it's now full */
302 		if (sp->remain <= 0 ||
303 		    (msg_data_left(msg) == 0 && !more)) {
304 			struct rxrpc_connection *conn = call->conn;
305 			uint32_t seq;
306 			size_t pad;
307 
308 			/* pad out if we're using security */
309 			if (conn->security_ix) {
310 				pad = conn->security_size + skb->mark;
311 				pad = conn->size_align - pad;
312 				pad &= conn->size_align - 1;
313 				_debug("pad %zu", pad);
314 				if (pad)
315 					memset(skb_put(skb, pad), 0, pad);
316 			}
317 
318 			seq = call->tx_top + 1;
319 
320 			sp->hdr.seq	= seq;
321 			sp->hdr._rsvd	= 0;
322 			sp->hdr.flags	= conn->out_clientflag;
323 
324 			if (msg_data_left(msg) == 0 && !more)
325 				sp->hdr.flags |= RXRPC_LAST_PACKET;
326 			else if (call->tx_top - call->tx_hard_ack <
327 				 call->tx_winsize)
328 				sp->hdr.flags |= RXRPC_MORE_PACKETS;
329 
330 			ret = conn->security->secure_packet(
331 				call, skb, skb->mark, skb->head);
332 			if (ret < 0)
333 				goto out;
334 
335 			rxrpc_queue_packet(call, skb, !msg_data_left(msg) && !more);
336 			skb = NULL;
337 		}
338 	} while (msg_data_left(msg) > 0);
339 
340 success:
341 	ret = copied;
342 out:
343 	call->tx_pending = skb;
344 	_leave(" = %d", ret);
345 	return ret;
346 
347 call_terminated:
348 	rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
349 	_leave(" = %d", -call->error);
350 	return -call->error;
351 
352 maybe_error:
353 	if (copied)
354 		goto success;
355 	goto out;
356 
357 efault:
358 	ret = -EFAULT;
359 	goto out;
360 }
361 
362 /*
363  * extract control messages from the sendmsg() control buffer
364  */
365 static int rxrpc_sendmsg_cmsg(struct msghdr *msg,
366 			      unsigned long *user_call_ID,
367 			      enum rxrpc_command *command,
368 			      u32 *abort_code,
369 			      bool *_exclusive)
370 {
371 	struct cmsghdr *cmsg;
372 	bool got_user_ID = false;
373 	int len;
374 
375 	*command = RXRPC_CMD_SEND_DATA;
376 
377 	if (msg->msg_controllen == 0)
378 		return -EINVAL;
379 
380 	for_each_cmsghdr(cmsg, msg) {
381 		if (!CMSG_OK(msg, cmsg))
382 			return -EINVAL;
383 
384 		len = cmsg->cmsg_len - sizeof(struct cmsghdr);
385 		_debug("CMSG %d, %d, %d",
386 		       cmsg->cmsg_level, cmsg->cmsg_type, len);
387 
388 		if (cmsg->cmsg_level != SOL_RXRPC)
389 			continue;
390 
391 		switch (cmsg->cmsg_type) {
392 		case RXRPC_USER_CALL_ID:
393 			if (msg->msg_flags & MSG_CMSG_COMPAT) {
394 				if (len != sizeof(u32))
395 					return -EINVAL;
396 				*user_call_ID = *(u32 *) CMSG_DATA(cmsg);
397 			} else {
398 				if (len != sizeof(unsigned long))
399 					return -EINVAL;
400 				*user_call_ID = *(unsigned long *)
401 					CMSG_DATA(cmsg);
402 			}
403 			_debug("User Call ID %lx", *user_call_ID);
404 			got_user_ID = true;
405 			break;
406 
407 		case RXRPC_ABORT:
408 			if (*command != RXRPC_CMD_SEND_DATA)
409 				return -EINVAL;
410 			*command = RXRPC_CMD_SEND_ABORT;
411 			if (len != sizeof(*abort_code))
412 				return -EINVAL;
413 			*abort_code = *(unsigned int *) CMSG_DATA(cmsg);
414 			_debug("Abort %x", *abort_code);
415 			if (*abort_code == 0)
416 				return -EINVAL;
417 			break;
418 
419 		case RXRPC_ACCEPT:
420 			if (*command != RXRPC_CMD_SEND_DATA)
421 				return -EINVAL;
422 			*command = RXRPC_CMD_ACCEPT;
423 			if (len != 0)
424 				return -EINVAL;
425 			break;
426 
427 		case RXRPC_EXCLUSIVE_CALL:
428 			*_exclusive = true;
429 			if (len != 0)
430 				return -EINVAL;
431 			break;
432 		default:
433 			return -EINVAL;
434 		}
435 	}
436 
437 	if (!got_user_ID)
438 		return -EINVAL;
439 	_leave(" = 0");
440 	return 0;
441 }
442 
443 /*
444  * Create a new client call for sendmsg().
445  * - Called with the socket lock held, which it must release.
446  * - If it returns a call, the call's lock will need releasing by the caller.
447  */
448 static struct rxrpc_call *
449 rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
450 				  unsigned long user_call_ID, bool exclusive)
451 	__releases(&rx->sk.sk_lock.slock)
452 {
453 	struct rxrpc_conn_parameters cp;
454 	struct rxrpc_call *call;
455 	struct key *key;
456 
457 	DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name);
458 
459 	_enter("");
460 
461 	if (!msg->msg_name) {
462 		release_sock(&rx->sk);
463 		return ERR_PTR(-EDESTADDRREQ);
464 	}
465 
466 	key = rx->key;
467 	if (key && !rx->key->payload.data[0])
468 		key = NULL;
469 
470 	memset(&cp, 0, sizeof(cp));
471 	cp.local		= rx->local;
472 	cp.key			= rx->key;
473 	cp.security_level	= rx->min_sec_level;
474 	cp.exclusive		= rx->exclusive | exclusive;
475 	cp.service_id		= srx->srx_service;
476 	call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL);
477 	/* The socket is now unlocked */
478 
479 	_leave(" = %p\n", call);
480 	return call;
481 }
482 
483 /*
484  * send a message forming part of a client call through an RxRPC socket
485  * - caller holds the socket locked
486  * - the socket may be either a client socket or a server socket
487  */
488 int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
489 	__releases(&rx->sk.sk_lock.slock)
490 {
491 	enum rxrpc_call_state state;
492 	enum rxrpc_command cmd;
493 	struct rxrpc_call *call;
494 	unsigned long user_call_ID = 0;
495 	bool exclusive = false;
496 	u32 abort_code = 0;
497 	int ret;
498 
499 	_enter("");
500 
501 	ret = rxrpc_sendmsg_cmsg(msg, &user_call_ID, &cmd, &abort_code,
502 				 &exclusive);
503 	if (ret < 0)
504 		goto error_release_sock;
505 
506 	if (cmd == RXRPC_CMD_ACCEPT) {
507 		ret = -EINVAL;
508 		if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
509 			goto error_release_sock;
510 		call = rxrpc_accept_call(rx, user_call_ID, NULL);
511 		/* The socket is now unlocked. */
512 		if (IS_ERR(call))
513 			return PTR_ERR(call);
514 		rxrpc_put_call(call, rxrpc_call_put);
515 		return 0;
516 	}
517 
518 	call = rxrpc_find_call_by_user_ID(rx, user_call_ID);
519 	if (!call) {
520 		ret = -EBADSLT;
521 		if (cmd != RXRPC_CMD_SEND_DATA)
522 			goto error_release_sock;
523 		call = rxrpc_new_client_call_for_sendmsg(rx, msg, user_call_ID,
524 							 exclusive);
525 		/* The socket is now unlocked... */
526 		if (IS_ERR(call))
527 			return PTR_ERR(call);
528 		/* ... and we have the call lock. */
529 	} else {
530 		switch (READ_ONCE(call->state)) {
531 		case RXRPC_CALL_UNINITIALISED:
532 		case RXRPC_CALL_CLIENT_AWAIT_CONN:
533 		case RXRPC_CALL_SERVER_PREALLOC:
534 		case RXRPC_CALL_SERVER_SECURING:
535 		case RXRPC_CALL_SERVER_ACCEPTING:
536 			ret = -EBUSY;
537 			goto error_release_sock;
538 		default:
539 			break;
540 		}
541 
542 		ret = mutex_lock_interruptible(&call->user_mutex);
543 		release_sock(&rx->sk);
544 		if (ret < 0) {
545 			ret = -ERESTARTSYS;
546 			goto error_put;
547 		}
548 	}
549 
550 	state = READ_ONCE(call->state);
551 	_debug("CALL %d USR %lx ST %d on CONN %p",
552 	       call->debug_id, call->user_call_ID, state, call->conn);
553 
554 	if (state >= RXRPC_CALL_COMPLETE) {
555 		/* it's too late for this call */
556 		ret = -ESHUTDOWN;
557 	} else if (cmd == RXRPC_CMD_SEND_ABORT) {
558 		ret = 0;
559 		if (rxrpc_abort_call("CMD", call, 0, abort_code, -ECONNABORTED))
560 			ret = rxrpc_send_abort_packet(call);
561 	} else if (cmd != RXRPC_CMD_SEND_DATA) {
562 		ret = -EINVAL;
563 	} else if (rxrpc_is_client_call(call) &&
564 		   state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
565 		/* request phase complete for this client call */
566 		ret = -EPROTO;
567 	} else if (rxrpc_is_service_call(call) &&
568 		   state != RXRPC_CALL_SERVER_ACK_REQUEST &&
569 		   state != RXRPC_CALL_SERVER_SEND_REPLY) {
570 		/* Reply phase not begun or not complete for service call. */
571 		ret = -EPROTO;
572 	} else {
573 		ret = rxrpc_send_data(rx, call, msg, len);
574 	}
575 
576 	mutex_unlock(&call->user_mutex);
577 error_put:
578 	rxrpc_put_call(call, rxrpc_call_put);
579 	_leave(" = %d", ret);
580 	return ret;
581 
582 error_release_sock:
583 	release_sock(&rx->sk);
584 	return ret;
585 }
586 
587 /**
588  * rxrpc_kernel_send_data - Allow a kernel service to send data on a call
589  * @sock: The socket the call is on
590  * @call: The call to send data through
591  * @msg: The data to send
592  * @len: The amount of data to send
593  *
594  * Allow a kernel service to send data on a call.  The call must be in an state
595  * appropriate to sending data.  No control data should be supplied in @msg,
596  * nor should an address be supplied.  MSG_MORE should be flagged if there's
597  * more data to come, otherwise this data will end the transmission phase.
598  */
599 int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
600 			   struct msghdr *msg, size_t len)
601 {
602 	int ret;
603 
604 	_enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
605 
606 	ASSERTCMP(msg->msg_name, ==, NULL);
607 	ASSERTCMP(msg->msg_control, ==, NULL);
608 
609 	mutex_lock(&call->user_mutex);
610 
611 	_debug("CALL %d USR %lx ST %d on CONN %p",
612 	       call->debug_id, call->user_call_ID, call->state, call->conn);
613 
614 	switch (READ_ONCE(call->state)) {
615 	case RXRPC_CALL_CLIENT_SEND_REQUEST:
616 	case RXRPC_CALL_SERVER_ACK_REQUEST:
617 	case RXRPC_CALL_SERVER_SEND_REPLY:
618 		ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len);
619 		break;
620 	case RXRPC_CALL_COMPLETE:
621 		read_lock_bh(&call->state_lock);
622 		ret = -call->error;
623 		read_unlock_bh(&call->state_lock);
624 		break;
625 	default:
626 		/* Request phase complete for this client call */
627 		trace_rxrpc_rx_eproto(call, 0, tracepoint_string("late_send"));
628 		ret = -EPROTO;
629 		break;
630 	}
631 
632 	mutex_unlock(&call->user_mutex);
633 	_leave(" = %d", ret);
634 	return ret;
635 }
636 EXPORT_SYMBOL(rxrpc_kernel_send_data);
637 
638 /**
639  * rxrpc_kernel_abort_call - Allow a kernel service to abort a call
640  * @sock: The socket the call is on
641  * @call: The call to be aborted
642  * @abort_code: The abort code to stick into the ABORT packet
643  * @error: Local error value
644  * @why: 3-char string indicating why.
645  *
646  * Allow a kernel service to abort a call, if it's still in an abortable state
647  * and return true if the call was aborted, false if it was already complete.
648  */
649 bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
650 			     u32 abort_code, int error, const char *why)
651 {
652 	bool aborted;
653 
654 	_enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why);
655 
656 	mutex_lock(&call->user_mutex);
657 
658 	aborted = rxrpc_abort_call(why, call, 0, abort_code, error);
659 	if (aborted)
660 		rxrpc_send_abort_packet(call);
661 
662 	mutex_unlock(&call->user_mutex);
663 	return aborted;
664 }
665 
666 EXPORT_SYMBOL(rxrpc_kernel_abort_call);
667