xref: /linux/net/rxrpc/output.c (revision 45d8b572fac3aa8b49d53c946b3685eaf78a2824)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC packet transmission
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/net.h>
11 #include <linux/gfp.h>
12 #include <linux/skbuff.h>
13 #include <linux/export.h>
14 #include <net/sock.h>
15 #include <net/af_rxrpc.h>
16 #include <net/udp.h>
17 #include "ar-internal.h"
18 
19 extern int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
20 
21 static ssize_t do_udp_sendmsg(struct socket *socket, struct msghdr *msg, size_t len)
22 {
23 	struct sockaddr *sa = msg->msg_name;
24 	struct sock *sk = socket->sk;
25 
26 	if (IS_ENABLED(CONFIG_AF_RXRPC_IPV6)) {
27 		if (sa->sa_family == AF_INET6) {
28 			if (sk->sk_family != AF_INET6) {
29 				pr_warn("AF_INET6 address on AF_INET socket\n");
30 				return -ENOPROTOOPT;
31 			}
32 			return udpv6_sendmsg(sk, msg, len);
33 		}
34 	}
35 	return udp_sendmsg(sk, msg, len);
36 }
37 
38 struct rxrpc_abort_buffer {
39 	struct rxrpc_wire_header whdr;
40 	__be32 abort_code;
41 };
42 
43 static const char rxrpc_keepalive_string[] = "";
44 
45 /*
46  * Increase Tx backoff on transmission failure and clear it on success.
47  */
48 static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
49 {
50 	if (ret < 0) {
51 		if (call->tx_backoff < 1000)
52 			call->tx_backoff += 100;
53 	} else {
54 		call->tx_backoff = 0;
55 	}
56 }
57 
58 /*
59  * Arrange for a keepalive ping a certain time after we last transmitted.  This
60  * lets the far side know we're still interested in this call and helps keep
61  * the route through any intervening firewall open.
62  *
63  * Receiving a response to the ping will prevent the ->expect_rx_by timer from
64  * expiring.
65  */
66 static void rxrpc_set_keepalive(struct rxrpc_call *call, ktime_t now)
67 {
68 	ktime_t delay = ms_to_ktime(READ_ONCE(call->next_rx_timo) / 6);
69 
70 	call->keepalive_at = ktime_add(ktime_get_real(), delay);
71 	trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_keepalive);
72 }
73 
74 /*
75  * Fill out an ACK packet.
76  */
77 static void rxrpc_fill_out_ack(struct rxrpc_call *call,
78 			       struct rxrpc_txbuf *txb,
79 			       u8 ack_reason,
80 			       rxrpc_serial_t serial)
81 {
82 	struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
83 	struct rxrpc_acktrailer *trailer = txb->kvec[2].iov_base + 3;
84 	struct rxrpc_ackpacket *ack = (struct rxrpc_ackpacket *)(whdr + 1);
85 	unsigned int qsize, sack, wrap, to;
86 	rxrpc_seq_t window, wtop;
87 	int rsize;
88 	u32 mtu, jmax;
89 	u8 *filler = txb->kvec[2].iov_base;
90 	u8 *sackp = txb->kvec[1].iov_base;
91 
92 	rxrpc_inc_stat(call->rxnet, stat_tx_ack_fill);
93 
94 	window = call->ackr_window;
95 	wtop   = call->ackr_wtop;
96 	sack   = call->ackr_sack_base % RXRPC_SACK_SIZE;
97 
98 	whdr->seq		= 0;
99 	whdr->type		= RXRPC_PACKET_TYPE_ACK;
100 	txb->flags		|= RXRPC_SLOW_START_OK;
101 	ack->bufferSpace	= 0;
102 	ack->maxSkew		= 0;
103 	ack->firstPacket	= htonl(window);
104 	ack->previousPacket	= htonl(call->rx_highest_seq);
105 	ack->serial		= htonl(serial);
106 	ack->reason		= ack_reason;
107 	ack->nAcks		= wtop - window;
108 	filler[0]		= 0;
109 	filler[1]		= 0;
110 	filler[2]		= 0;
111 
112 	if (ack_reason == RXRPC_ACK_PING)
113 		txb->flags |= RXRPC_REQUEST_ACK;
114 
115 	if (after(wtop, window)) {
116 		txb->len += ack->nAcks;
117 		txb->kvec[1].iov_base = sackp;
118 		txb->kvec[1].iov_len = ack->nAcks;
119 
120 		wrap = RXRPC_SACK_SIZE - sack;
121 		to = min_t(unsigned int, ack->nAcks, RXRPC_SACK_SIZE);
122 
123 		if (sack + ack->nAcks <= RXRPC_SACK_SIZE) {
124 			memcpy(sackp, call->ackr_sack_table + sack, ack->nAcks);
125 		} else {
126 			memcpy(sackp, call->ackr_sack_table + sack, wrap);
127 			memcpy(sackp + wrap, call->ackr_sack_table, to - wrap);
128 		}
129 	} else if (before(wtop, window)) {
130 		pr_warn("ack window backward %x %x", window, wtop);
131 	} else if (ack->reason == RXRPC_ACK_DELAY) {
132 		ack->reason = RXRPC_ACK_IDLE;
133 	}
134 
135 	mtu = call->peer->if_mtu;
136 	mtu -= call->peer->hdrsize;
137 	jmax = rxrpc_rx_jumbo_max;
138 	qsize = (window - 1) - call->rx_consumed;
139 	rsize = max_t(int, call->rx_winsize - qsize, 0);
140 	txb->ack_rwind = rsize;
141 	trailer->maxMTU		= htonl(rxrpc_rx_mtu);
142 	trailer->ifMTU		= htonl(mtu);
143 	trailer->rwind		= htonl(rsize);
144 	trailer->jumbo_max	= htonl(jmax);
145 }
146 
147 /*
148  * Record the beginning of an RTT probe.
149  */
150 static void rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
151 				  ktime_t now, enum rxrpc_rtt_tx_trace why)
152 {
153 	unsigned long avail = call->rtt_avail;
154 	int rtt_slot = 9;
155 
156 	if (!(avail & RXRPC_CALL_RTT_AVAIL_MASK))
157 		goto no_slot;
158 
159 	rtt_slot = __ffs(avail & RXRPC_CALL_RTT_AVAIL_MASK);
160 	if (!test_and_clear_bit(rtt_slot, &call->rtt_avail))
161 		goto no_slot;
162 
163 	call->rtt_serial[rtt_slot] = serial;
164 	call->rtt_sent_at[rtt_slot] = now;
165 	smp_wmb(); /* Write data before avail bit */
166 	set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
167 
168 	trace_rxrpc_rtt_tx(call, why, rtt_slot, serial);
169 	return;
170 
171 no_slot:
172 	trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial);
173 }
174 
175 /*
176  * Transmit an ACK packet.
177  */
178 static void rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
179 {
180 	struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
181 	struct rxrpc_connection *conn;
182 	struct rxrpc_ackpacket *ack = (struct rxrpc_ackpacket *)(whdr + 1);
183 	struct msghdr msg;
184 	ktime_t now;
185 	int ret;
186 
187 	if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
188 		return;
189 
190 	conn = call->conn;
191 
192 	msg.msg_name	= &call->peer->srx.transport;
193 	msg.msg_namelen	= call->peer->srx.transport_len;
194 	msg.msg_control	= NULL;
195 	msg.msg_controllen = 0;
196 	msg.msg_flags	= MSG_SPLICE_PAGES;
197 
198 	whdr->flags = txb->flags & RXRPC_TXBUF_WIRE_FLAGS;
199 
200 	txb->serial = rxrpc_get_next_serial(conn);
201 	whdr->serial = htonl(txb->serial);
202 	trace_rxrpc_tx_ack(call->debug_id, txb->serial,
203 			   ntohl(ack->firstPacket),
204 			   ntohl(ack->serial), ack->reason, ack->nAcks,
205 			   txb->ack_rwind);
206 
207 	rxrpc_inc_stat(call->rxnet, stat_tx_ack_send);
208 
209 	iov_iter_kvec(&msg.msg_iter, WRITE, txb->kvec, txb->nr_kvec, txb->len);
210 	rxrpc_local_dont_fragment(conn->local, false);
211 	ret = do_udp_sendmsg(conn->local->socket, &msg, txb->len);
212 	call->peer->last_tx_at = ktime_get_seconds();
213 	if (ret < 0) {
214 		trace_rxrpc_tx_fail(call->debug_id, txb->serial, ret,
215 				    rxrpc_tx_point_call_ack);
216 	} else {
217 		trace_rxrpc_tx_packet(call->debug_id, whdr,
218 				      rxrpc_tx_point_call_ack);
219 		now = ktime_get_real();
220 		if (ack->reason == RXRPC_ACK_PING)
221 			rxrpc_begin_rtt_probe(call, txb->serial, now, rxrpc_rtt_tx_ping);
222 		if (txb->flags & RXRPC_REQUEST_ACK)
223 			call->peer->rtt_last_req = now;
224 		rxrpc_set_keepalive(call, now);
225 	}
226 	rxrpc_tx_backoff(call, ret);
227 }
228 
229 /*
230  * Queue an ACK for immediate transmission.
231  */
232 void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
233 		    rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why)
234 {
235 	struct rxrpc_txbuf *txb;
236 
237 	if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
238 		return;
239 
240 	rxrpc_inc_stat(call->rxnet, stat_tx_acks[ack_reason]);
241 
242 	txb = rxrpc_alloc_ack_txbuf(call, call->ackr_wtop - call->ackr_window);
243 	if (!txb) {
244 		kleave(" = -ENOMEM");
245 		return;
246 	}
247 
248 	txb->ack_why = why;
249 
250 	rxrpc_fill_out_ack(call, txb, ack_reason, serial);
251 	call->ackr_nr_unacked = 0;
252 	atomic_set(&call->ackr_nr_consumed, 0);
253 	clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags);
254 
255 	trace_rxrpc_send_ack(call, why, ack_reason, serial);
256 	rxrpc_send_ack_packet(call, txb);
257 	rxrpc_put_txbuf(txb, rxrpc_txbuf_put_ack_tx);
258 }
259 
260 /*
261  * Send an ABORT call packet.
262  */
263 int rxrpc_send_abort_packet(struct rxrpc_call *call)
264 {
265 	struct rxrpc_connection *conn;
266 	struct rxrpc_abort_buffer pkt;
267 	struct msghdr msg;
268 	struct kvec iov[1];
269 	rxrpc_serial_t serial;
270 	int ret;
271 
272 	/* Don't bother sending aborts for a client call once the server has
273 	 * hard-ACK'd all of its request data.  After that point, we're not
274 	 * going to stop the operation proceeding, and whilst we might limit
275 	 * the reply, it's not worth it if we can send a new call on the same
276 	 * channel instead, thereby closing off this call.
277 	 */
278 	if (rxrpc_is_client_call(call) &&
279 	    test_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags))
280 		return 0;
281 
282 	if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
283 		return -ECONNRESET;
284 
285 	conn = call->conn;
286 
287 	msg.msg_name	= &call->peer->srx.transport;
288 	msg.msg_namelen	= call->peer->srx.transport_len;
289 	msg.msg_control	= NULL;
290 	msg.msg_controllen = 0;
291 	msg.msg_flags	= 0;
292 
293 	pkt.whdr.epoch		= htonl(conn->proto.epoch);
294 	pkt.whdr.cid		= htonl(call->cid);
295 	pkt.whdr.callNumber	= htonl(call->call_id);
296 	pkt.whdr.seq		= 0;
297 	pkt.whdr.type		= RXRPC_PACKET_TYPE_ABORT;
298 	pkt.whdr.flags		= conn->out_clientflag;
299 	pkt.whdr.userStatus	= 0;
300 	pkt.whdr.securityIndex	= call->security_ix;
301 	pkt.whdr._rsvd		= 0;
302 	pkt.whdr.serviceId	= htons(call->dest_srx.srx_service);
303 	pkt.abort_code		= htonl(call->abort_code);
304 
305 	iov[0].iov_base	= &pkt;
306 	iov[0].iov_len	= sizeof(pkt);
307 
308 	serial = rxrpc_get_next_serial(conn);
309 	pkt.whdr.serial = htonl(serial);
310 
311 	iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, sizeof(pkt));
312 	ret = do_udp_sendmsg(conn->local->socket, &msg, sizeof(pkt));
313 	conn->peer->last_tx_at = ktime_get_seconds();
314 	if (ret < 0)
315 		trace_rxrpc_tx_fail(call->debug_id, serial, ret,
316 				    rxrpc_tx_point_call_abort);
317 	else
318 		trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
319 				      rxrpc_tx_point_call_abort);
320 	rxrpc_tx_backoff(call, ret);
321 	return ret;
322 }
323 
324 /*
325  * Prepare a (sub)packet for transmission.
326  */
327 static void rxrpc_prepare_data_subpacket(struct rxrpc_call *call, struct rxrpc_txbuf *txb,
328 					 rxrpc_serial_t serial)
329 {
330 	struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
331 	enum rxrpc_req_ack_trace why;
332 	struct rxrpc_connection *conn = call->conn;
333 
334 	_enter("%x,{%d}", txb->seq, txb->len);
335 
336 	txb->serial = serial;
337 
338 	if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) &&
339 	    txb->seq == 1)
340 		whdr->userStatus = RXRPC_USERSTATUS_SERVICE_UPGRADE;
341 
342 	/* If our RTT cache needs working on, request an ACK.  Also request
343 	 * ACKs if a DATA packet appears to have been lost.
344 	 *
345 	 * However, we mustn't request an ACK on the last reply packet of a
346 	 * service call, lest OpenAFS incorrectly send us an ACK with some
347 	 * soft-ACKs in it and then never follow up with a proper hard ACK.
348 	 */
349 	if (txb->flags & RXRPC_REQUEST_ACK)
350 		why = rxrpc_reqack_already_on;
351 	else if ((txb->flags & RXRPC_LAST_PACKET) && rxrpc_sending_to_client(txb))
352 		why = rxrpc_reqack_no_srv_last;
353 	else if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events))
354 		why = rxrpc_reqack_ack_lost;
355 	else if (txb->flags & RXRPC_TXBUF_RESENT)
356 		why = rxrpc_reqack_retrans;
357 	else if (call->cong_mode == RXRPC_CALL_SLOW_START && call->cong_cwnd <= 2)
358 		why = rxrpc_reqack_slow_start;
359 	else if (call->tx_winsize <= 2)
360 		why = rxrpc_reqack_small_txwin;
361 	else if (call->peer->rtt_count < 3 && txb->seq & 1)
362 		why = rxrpc_reqack_more_rtt;
363 	else if (ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), ktime_get_real()))
364 		why = rxrpc_reqack_old_rtt;
365 	else
366 		goto dont_set_request_ack;
367 
368 	rxrpc_inc_stat(call->rxnet, stat_why_req_ack[why]);
369 	trace_rxrpc_req_ack(call->debug_id, txb->seq, why);
370 	if (why != rxrpc_reqack_no_srv_last)
371 		txb->flags |= RXRPC_REQUEST_ACK;
372 dont_set_request_ack:
373 
374 	whdr->flags = txb->flags & RXRPC_TXBUF_WIRE_FLAGS;
375 	whdr->serial	= htonl(txb->serial);
376 	whdr->cksum	= txb->cksum;
377 
378 	trace_rxrpc_tx_data(call, txb->seq, txb->serial, txb->flags, false);
379 }
380 
381 /*
382  * Prepare a packet for transmission.
383  */
384 static size_t rxrpc_prepare_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
385 {
386 	rxrpc_serial_t serial;
387 
388 	/* Each transmission of a Tx packet needs a new serial number */
389 	serial = rxrpc_get_next_serial(call->conn);
390 
391 	rxrpc_prepare_data_subpacket(call, txb, serial);
392 
393 	return txb->len;
394 }
395 
396 /*
397  * Set timeouts after transmitting a packet.
398  */
399 static void rxrpc_tstamp_data_packets(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
400 {
401 	ktime_t now = ktime_get_real();
402 	bool ack_requested = txb->flags & RXRPC_REQUEST_ACK;
403 
404 	call->tx_last_sent = now;
405 	txb->last_sent = now;
406 
407 	if (ack_requested) {
408 		rxrpc_begin_rtt_probe(call, txb->serial, now, rxrpc_rtt_tx_data);
409 
410 		call->peer->rtt_last_req = now;
411 		if (call->peer->rtt_count > 1) {
412 			ktime_t delay = rxrpc_get_rto_backoff(call->peer, false);
413 
414 			call->ack_lost_at = ktime_add(now, delay);
415 			trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_lost_ack);
416 		}
417 	}
418 
419 	if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags)) {
420 		ktime_t delay = ms_to_ktime(READ_ONCE(call->next_rx_timo));
421 
422 		call->expect_rx_by = ktime_add(now, delay);
423 		trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_expect_rx);
424 	}
425 
426 	rxrpc_set_keepalive(call, now);
427 }
428 
429 /*
430  * send a packet through the transport endpoint
431  */
432 static int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
433 {
434 	struct rxrpc_wire_header *whdr = txb->kvec[0].iov_base;
435 	struct rxrpc_connection *conn = call->conn;
436 	enum rxrpc_tx_point frag;
437 	struct msghdr msg;
438 	size_t len;
439 	int ret;
440 
441 	_enter("%x,{%d}", txb->seq, txb->len);
442 
443 	len = rxrpc_prepare_data_packet(call, txb);
444 
445 	if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
446 		static int lose;
447 		if ((lose++ & 7) == 7) {
448 			ret = 0;
449 			trace_rxrpc_tx_data(call, txb->seq, txb->serial,
450 					    txb->flags, true);
451 			goto done;
452 		}
453 	}
454 
455 	iov_iter_kvec(&msg.msg_iter, WRITE, txb->kvec, txb->nr_kvec, len);
456 
457 	msg.msg_name	= &call->peer->srx.transport;
458 	msg.msg_namelen	= call->peer->srx.transport_len;
459 	msg.msg_control	= NULL;
460 	msg.msg_controllen = 0;
461 	msg.msg_flags	= MSG_SPLICE_PAGES;
462 
463 	/* Track what we've attempted to transmit at least once so that the
464 	 * retransmission algorithm doesn't try to resend what we haven't sent
465 	 * yet.
466 	 */
467 	if (txb->seq == call->tx_transmitted + 1)
468 		call->tx_transmitted = txb->seq;
469 
470 	/* send the packet with the don't fragment bit set if we currently
471 	 * think it's small enough */
472 	if (txb->len >= call->peer->maxdata) {
473 		rxrpc_local_dont_fragment(conn->local, false);
474 		frag = rxrpc_tx_point_call_data_frag;
475 	} else {
476 		rxrpc_local_dont_fragment(conn->local, true);
477 		frag = rxrpc_tx_point_call_data_nofrag;
478 	}
479 
480 retry:
481 	/* send the packet by UDP
482 	 * - returns -EMSGSIZE if UDP would have to fragment the packet
483 	 *   to go out of the interface
484 	 *   - in which case, we'll have processed the ICMP error
485 	 *     message and update the peer record
486 	 */
487 	rxrpc_inc_stat(call->rxnet, stat_tx_data_send);
488 	ret = do_udp_sendmsg(conn->local->socket, &msg, len);
489 	conn->peer->last_tx_at = ktime_get_seconds();
490 
491 	if (ret < 0) {
492 		rxrpc_inc_stat(call->rxnet, stat_tx_data_send_fail);
493 		trace_rxrpc_tx_fail(call->debug_id, txb->serial, ret, frag);
494 	} else {
495 		trace_rxrpc_tx_packet(call->debug_id, whdr, frag);
496 	}
497 
498 	rxrpc_tx_backoff(call, ret);
499 	if (ret == -EMSGSIZE && frag == rxrpc_tx_point_call_data_frag) {
500 		rxrpc_local_dont_fragment(conn->local, false);
501 		frag = rxrpc_tx_point_call_data_frag;
502 		goto retry;
503 	}
504 
505 done:
506 	if (ret >= 0) {
507 		rxrpc_tstamp_data_packets(call, txb);
508 	} else {
509 		/* Cancel the call if the initial transmission fails,
510 		 * particularly if that's due to network routing issues that
511 		 * aren't going away anytime soon.  The layer above can arrange
512 		 * the retransmission.
513 		 */
514 		if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
515 			rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
516 						  RX_USER_ABORT, ret);
517 	}
518 
519 	_leave(" = %d [%u]", ret, call->peer->maxdata);
520 	return ret;
521 }
522 
523 /*
524  * Transmit a connection-level abort.
525  */
526 void rxrpc_send_conn_abort(struct rxrpc_connection *conn)
527 {
528 	struct rxrpc_wire_header whdr;
529 	struct msghdr msg;
530 	struct kvec iov[2];
531 	__be32 word;
532 	size_t len;
533 	u32 serial;
534 	int ret;
535 
536 	msg.msg_name	= &conn->peer->srx.transport;
537 	msg.msg_namelen	= conn->peer->srx.transport_len;
538 	msg.msg_control	= NULL;
539 	msg.msg_controllen = 0;
540 	msg.msg_flags	= 0;
541 
542 	whdr.epoch	= htonl(conn->proto.epoch);
543 	whdr.cid	= htonl(conn->proto.cid);
544 	whdr.callNumber	= 0;
545 	whdr.seq	= 0;
546 	whdr.type	= RXRPC_PACKET_TYPE_ABORT;
547 	whdr.flags	= conn->out_clientflag;
548 	whdr.userStatus	= 0;
549 	whdr.securityIndex = conn->security_ix;
550 	whdr._rsvd	= 0;
551 	whdr.serviceId	= htons(conn->service_id);
552 
553 	word		= htonl(conn->abort_code);
554 
555 	iov[0].iov_base	= &whdr;
556 	iov[0].iov_len	= sizeof(whdr);
557 	iov[1].iov_base	= &word;
558 	iov[1].iov_len	= sizeof(word);
559 
560 	len = iov[0].iov_len + iov[1].iov_len;
561 
562 	serial = rxrpc_get_next_serial(conn);
563 	whdr.serial = htonl(serial);
564 
565 	iov_iter_kvec(&msg.msg_iter, WRITE, iov, 2, len);
566 	ret = do_udp_sendmsg(conn->local->socket, &msg, len);
567 	if (ret < 0) {
568 		trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
569 				    rxrpc_tx_point_conn_abort);
570 		_debug("sendmsg failed: %d", ret);
571 		return;
572 	}
573 
574 	trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
575 
576 	conn->peer->last_tx_at = ktime_get_seconds();
577 }
578 
579 /*
580  * Reject a packet through the local endpoint.
581  */
582 void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
583 {
584 	struct rxrpc_wire_header whdr;
585 	struct sockaddr_rxrpc srx;
586 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
587 	struct msghdr msg;
588 	struct kvec iov[2];
589 	size_t size;
590 	__be32 code;
591 	int ret, ioc;
592 
593 	rxrpc_see_skb(skb, rxrpc_skb_see_reject);
594 
595 	iov[0].iov_base = &whdr;
596 	iov[0].iov_len = sizeof(whdr);
597 	iov[1].iov_base = &code;
598 	iov[1].iov_len = sizeof(code);
599 
600 	msg.msg_name = &srx.transport;
601 	msg.msg_control = NULL;
602 	msg.msg_controllen = 0;
603 	msg.msg_flags = 0;
604 
605 	memset(&whdr, 0, sizeof(whdr));
606 
607 	switch (skb->mark) {
608 	case RXRPC_SKB_MARK_REJECT_BUSY:
609 		whdr.type = RXRPC_PACKET_TYPE_BUSY;
610 		size = sizeof(whdr);
611 		ioc = 1;
612 		break;
613 	case RXRPC_SKB_MARK_REJECT_ABORT:
614 		whdr.type = RXRPC_PACKET_TYPE_ABORT;
615 		code = htonl(skb->priority);
616 		size = sizeof(whdr) + sizeof(code);
617 		ioc = 2;
618 		break;
619 	default:
620 		return;
621 	}
622 
623 	if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) {
624 		msg.msg_namelen = srx.transport_len;
625 
626 		whdr.epoch	= htonl(sp->hdr.epoch);
627 		whdr.cid	= htonl(sp->hdr.cid);
628 		whdr.callNumber	= htonl(sp->hdr.callNumber);
629 		whdr.serviceId	= htons(sp->hdr.serviceId);
630 		whdr.flags	= sp->hdr.flags;
631 		whdr.flags	^= RXRPC_CLIENT_INITIATED;
632 		whdr.flags	&= RXRPC_CLIENT_INITIATED;
633 
634 		iov_iter_kvec(&msg.msg_iter, WRITE, iov, ioc, size);
635 		ret = do_udp_sendmsg(local->socket, &msg, size);
636 		if (ret < 0)
637 			trace_rxrpc_tx_fail(local->debug_id, 0, ret,
638 					    rxrpc_tx_point_reject);
639 		else
640 			trace_rxrpc_tx_packet(local->debug_id, &whdr,
641 					      rxrpc_tx_point_reject);
642 	}
643 }
644 
645 /*
646  * Send a VERSION reply to a peer as a keepalive.
647  */
648 void rxrpc_send_keepalive(struct rxrpc_peer *peer)
649 {
650 	struct rxrpc_wire_header whdr;
651 	struct msghdr msg;
652 	struct kvec iov[2];
653 	size_t len;
654 	int ret;
655 
656 	_enter("");
657 
658 	msg.msg_name	= &peer->srx.transport;
659 	msg.msg_namelen	= peer->srx.transport_len;
660 	msg.msg_control	= NULL;
661 	msg.msg_controllen = 0;
662 	msg.msg_flags	= 0;
663 
664 	whdr.epoch	= htonl(peer->local->rxnet->epoch);
665 	whdr.cid	= 0;
666 	whdr.callNumber	= 0;
667 	whdr.seq	= 0;
668 	whdr.serial	= 0;
669 	whdr.type	= RXRPC_PACKET_TYPE_VERSION; /* Not client-initiated */
670 	whdr.flags	= RXRPC_LAST_PACKET;
671 	whdr.userStatus	= 0;
672 	whdr.securityIndex = 0;
673 	whdr._rsvd	= 0;
674 	whdr.serviceId	= 0;
675 
676 	iov[0].iov_base	= &whdr;
677 	iov[0].iov_len	= sizeof(whdr);
678 	iov[1].iov_base	= (char *)rxrpc_keepalive_string;
679 	iov[1].iov_len	= sizeof(rxrpc_keepalive_string);
680 
681 	len = iov[0].iov_len + iov[1].iov_len;
682 
683 	iov_iter_kvec(&msg.msg_iter, WRITE, iov, 2, len);
684 	ret = do_udp_sendmsg(peer->local->socket, &msg, len);
685 	if (ret < 0)
686 		trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
687 				    rxrpc_tx_point_version_keepalive);
688 	else
689 		trace_rxrpc_tx_packet(peer->debug_id, &whdr,
690 				      rxrpc_tx_point_version_keepalive);
691 
692 	peer->last_tx_at = ktime_get_seconds();
693 	_leave("");
694 }
695 
696 /*
697  * Schedule an instant Tx resend.
698  */
699 static inline void rxrpc_instant_resend(struct rxrpc_call *call,
700 					struct rxrpc_txbuf *txb)
701 {
702 	if (!__rxrpc_call_is_complete(call))
703 		kdebug("resend");
704 }
705 
706 /*
707  * Transmit one packet.
708  */
709 void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
710 {
711 	int ret;
712 
713 	ret = rxrpc_send_data_packet(call, txb);
714 	if (ret < 0) {
715 		switch (ret) {
716 		case -ENETUNREACH:
717 		case -EHOSTUNREACH:
718 		case -ECONNREFUSED:
719 			rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
720 						  0, ret);
721 			break;
722 		default:
723 			_debug("need instant resend %d", ret);
724 			rxrpc_instant_resend(call, txb);
725 		}
726 	} else {
727 		ktime_t delay = ns_to_ktime(call->peer->rto_us * NSEC_PER_USEC);
728 
729 		call->resend_at = ktime_add(ktime_get_real(), delay);
730 		trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_resend_tx);
731 	}
732 }
733