1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC packet reception
3 *
4 * Copyright (C) 2007, 2016, 2022 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include "ar-internal.h"
11
12 static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
13 struct sockaddr_rxrpc *peer_srx,
14 struct sk_buff *skb);
15
16 /*
17 * handle data received on the local endpoint
18 * - may be called in interrupt context
19 *
20 * [!] Note that as this is called from the encap_rcv hook, the socket is not
21 * held locked by the caller and nothing prevents sk_user_data on the UDP from
22 * being cleared in the middle of processing this function.
23 *
24 * Called with the RCU read lock held from the IP layer via UDP.
25 */
rxrpc_encap_rcv(struct sock * udp_sk,struct sk_buff * skb)26 int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
27 {
28 struct sk_buff_head *rx_queue;
29 struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
30 struct task_struct *io_thread;
31
32 if (unlikely(!local)) {
33 kfree_skb(skb);
34 return 0;
35 }
36 io_thread = READ_ONCE(local->io_thread);
37 if (!io_thread) {
38 kfree_skb(skb);
39 return 0;
40 }
41 if (skb->tstamp == 0)
42 skb->tstamp = ktime_get_real();
43
44 skb->mark = RXRPC_SKB_MARK_PACKET;
45 rxrpc_new_skb(skb, rxrpc_skb_new_encap_rcv);
46 rx_queue = &local->rx_queue;
47 #ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
48 if (rxrpc_inject_rx_delay ||
49 !skb_queue_empty(&local->rx_delay_queue)) {
50 skb->tstamp = ktime_add_ms(skb->tstamp, rxrpc_inject_rx_delay);
51 rx_queue = &local->rx_delay_queue;
52 }
53 #endif
54
55 skb_queue_tail(rx_queue, skb);
56 wake_up_process(io_thread);
57 return 0;
58 }
59
60 /*
61 * Handle an error received on the local endpoint.
62 */
rxrpc_error_report(struct sock * sk)63 void rxrpc_error_report(struct sock *sk)
64 {
65 struct rxrpc_local *local;
66 struct sk_buff *skb;
67
68 rcu_read_lock();
69 local = rcu_dereference_sk_user_data(sk);
70 if (unlikely(!local)) {
71 rcu_read_unlock();
72 return;
73 }
74
75 while ((skb = skb_dequeue(&sk->sk_error_queue))) {
76 skb->mark = RXRPC_SKB_MARK_ERROR;
77 rxrpc_new_skb(skb, rxrpc_skb_new_error_report);
78 skb_queue_tail(&local->rx_queue, skb);
79 }
80
81 rxrpc_wake_up_io_thread(local);
82 rcu_read_unlock();
83 }
84
85 /*
86 * Directly produce an abort from a packet.
87 */
rxrpc_direct_abort(struct sk_buff * skb,enum rxrpc_abort_reason why,s32 abort_code,int err)88 bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
89 s32 abort_code, int err)
90 {
91 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
92
93 trace_rxrpc_abort(0, why, sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
94 abort_code, err);
95 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
96 skb->priority = abort_code;
97 return false;
98 }
99
100 /*
101 * Directly produce a connection abort from a packet.
102 */
rxrpc_direct_conn_abort(struct sk_buff * skb,enum rxrpc_abort_reason why,s32 abort_code,int err)103 bool rxrpc_direct_conn_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
104 s32 abort_code, int err)
105 {
106 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
107
108 trace_rxrpc_abort(0, why, sp->hdr.cid, 0, sp->hdr.seq, abort_code, err);
109 skb->mark = RXRPC_SKB_MARK_REJECT_CONN_ABORT;
110 skb->priority = abort_code;
111 return false;
112 }
113
rxrpc_bad_message(struct sk_buff * skb,enum rxrpc_abort_reason why)114 static bool rxrpc_bad_message(struct sk_buff *skb, enum rxrpc_abort_reason why)
115 {
116 return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EBADMSG);
117 }
118
119 #define just_discard true
120
121 /*
122 * Process event packets targeted at a local endpoint.
123 */
rxrpc_input_version(struct rxrpc_local * local,struct sk_buff * skb)124 static bool rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb)
125 {
126 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
127 char v;
128
129 _enter("");
130
131 rxrpc_see_skb(skb, rxrpc_skb_see_version);
132 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), &v, 1) >= 0) {
133 if (v == 0)
134 rxrpc_send_version_request(local, &sp->hdr, skb);
135 }
136
137 return true;
138 }
139
140 /*
141 * Extract the wire header from a packet and translate the byte order.
142 */
rxrpc_extract_header(struct rxrpc_skb_priv * sp,struct sk_buff * skb)143 static bool rxrpc_extract_header(struct rxrpc_skb_priv *sp,
144 struct sk_buff *skb)
145 {
146 struct rxrpc_wire_header whdr;
147 struct rxrpc_ackpacket ack;
148
149 /* dig out the RxRPC connection details */
150 if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
151 return rxrpc_bad_message(skb, rxrpc_badmsg_short_hdr);
152
153 memset(sp, 0, sizeof(*sp));
154 sp->hdr.epoch = ntohl(whdr.epoch);
155 sp->hdr.cid = ntohl(whdr.cid);
156 sp->hdr.callNumber = ntohl(whdr.callNumber);
157 sp->hdr.seq = ntohl(whdr.seq);
158 sp->hdr.serial = ntohl(whdr.serial);
159 sp->hdr.flags = whdr.flags;
160 sp->hdr.type = whdr.type;
161 sp->hdr.userStatus = whdr.userStatus;
162 sp->hdr.securityIndex = whdr.securityIndex;
163 sp->hdr._rsvd = ntohs(whdr._rsvd);
164 sp->hdr.serviceId = ntohs(whdr.serviceId);
165
166 if (sp->hdr.type == RXRPC_PACKET_TYPE_ACK) {
167 if (skb_copy_bits(skb, sizeof(whdr), &ack, sizeof(ack)) < 0)
168 return rxrpc_bad_message(skb, rxrpc_badmsg_short_ack);
169 sp->ack.first_ack = ntohl(ack.firstPacket);
170 sp->ack.prev_ack = ntohl(ack.previousPacket);
171 sp->ack.acked_serial = ntohl(ack.serial);
172 sp->ack.reason = ack.reason;
173 sp->ack.nr_acks = ack.nAcks;
174 }
175 return true;
176 }
177
178 /*
179 * Extract the abort code from an ABORT packet and stash it in skb->priority.
180 */
rxrpc_extract_abort(struct sk_buff * skb)181 static bool rxrpc_extract_abort(struct sk_buff *skb)
182 {
183 __be32 wtmp;
184
185 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
186 &wtmp, sizeof(wtmp)) < 0)
187 return false;
188 skb->priority = ntohl(wtmp);
189 return true;
190 }
191
192 /*
193 * Process packets received on the local endpoint
194 */
rxrpc_input_packet(struct rxrpc_local * local,struct sk_buff * skb)195 static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff *skb)
196 {
197 struct rxrpc_connection *conn;
198 struct sockaddr_rxrpc peer_srx;
199 struct rxrpc_skb_priv *sp;
200 struct rxrpc_peer *peer = NULL;
201 bool ret = false;
202
203 skb_pull(skb, sizeof(struct udphdr));
204
205 sp = rxrpc_skb(skb);
206
207 /* dig out the RxRPC connection details */
208 if (!rxrpc_extract_header(sp, skb))
209 return just_discard;
210
211 if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
212 static int lose;
213 if ((lose++ & 7) == 7) {
214 trace_rxrpc_rx_lose(sp);
215 return just_discard;
216 }
217 }
218
219 trace_rxrpc_rx_packet(sp);
220
221 switch (sp->hdr.type) {
222 case RXRPC_PACKET_TYPE_VERSION:
223 if (rxrpc_to_client(sp))
224 return just_discard;
225 return rxrpc_input_version(local, skb);
226
227 case RXRPC_PACKET_TYPE_BUSY:
228 if (rxrpc_to_server(sp))
229 return just_discard;
230 fallthrough;
231 case RXRPC_PACKET_TYPE_ACK:
232 case RXRPC_PACKET_TYPE_ACKALL:
233 if (sp->hdr.callNumber == 0)
234 return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call);
235 break;
236 case RXRPC_PACKET_TYPE_ABORT:
237 if (!rxrpc_extract_abort(skb))
238 return just_discard; /* Just discard if malformed */
239 break;
240
241 case RXRPC_PACKET_TYPE_DATA:
242 if (sp->hdr.callNumber == 0)
243 return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call);
244 if (sp->hdr.seq == 0)
245 return rxrpc_bad_message(skb, rxrpc_badmsg_zero_seq);
246 break;
247
248 case RXRPC_PACKET_TYPE_CHALLENGE:
249 if (rxrpc_to_server(sp))
250 return just_discard;
251 break;
252 case RXRPC_PACKET_TYPE_RESPONSE:
253 if (rxrpc_to_client(sp))
254 return just_discard;
255 break;
256
257 /* Packet types 9-11 should just be ignored. */
258 case RXRPC_PACKET_TYPE_PARAMS:
259 case RXRPC_PACKET_TYPE_10:
260 case RXRPC_PACKET_TYPE_11:
261 return just_discard;
262
263 default:
264 return rxrpc_bad_message(skb, rxrpc_badmsg_unsupported_packet);
265 }
266
267 if (sp->hdr.serviceId == 0)
268 return rxrpc_bad_message(skb, rxrpc_badmsg_zero_service);
269
270 if (WARN_ON_ONCE(rxrpc_extract_addr_from_skb(&peer_srx, skb) < 0))
271 return just_discard; /* Unsupported address type. */
272
273 if (peer_srx.transport.family != local->srx.transport.family &&
274 (peer_srx.transport.family == AF_INET &&
275 local->srx.transport.family != AF_INET6)) {
276 pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
277 peer_srx.transport.family,
278 local->srx.transport.family);
279 return just_discard; /* Wrong address type. */
280 }
281
282 if (rxrpc_to_client(sp)) {
283 rcu_read_lock();
284 conn = rxrpc_find_client_connection_rcu(local, &peer_srx, skb);
285 conn = rxrpc_get_connection_maybe(conn, rxrpc_conn_get_call_input);
286 rcu_read_unlock();
287 if (!conn)
288 return rxrpc_protocol_error(skb, rxrpc_eproto_no_client_conn);
289
290 ret = rxrpc_input_packet_on_conn(conn, &peer_srx, skb);
291 rxrpc_put_connection(conn, rxrpc_conn_put_call_input);
292 return ret;
293 }
294
295 /* We need to look up service connections by the full protocol
296 * parameter set. We look up the peer first as an intermediate step
297 * and then the connection from the peer's tree.
298 */
299 rcu_read_lock();
300
301 peer = rxrpc_lookup_peer_rcu(local, &peer_srx);
302 if (!peer) {
303 rcu_read_unlock();
304 return rxrpc_new_incoming_call(local, NULL, NULL, &peer_srx, skb);
305 }
306
307 conn = rxrpc_find_service_conn_rcu(peer, skb);
308 conn = rxrpc_get_connection_maybe(conn, rxrpc_conn_get_call_input);
309 if (conn) {
310 rcu_read_unlock();
311 ret = rxrpc_input_packet_on_conn(conn, &peer_srx, skb);
312 rxrpc_put_connection(conn, rxrpc_conn_put_call_input);
313 return ret;
314 }
315
316 peer = rxrpc_get_peer_maybe(peer, rxrpc_peer_get_input);
317 rcu_read_unlock();
318
319 ret = rxrpc_new_incoming_call(local, peer, NULL, &peer_srx, skb);
320 rxrpc_put_peer(peer, rxrpc_peer_put_input);
321 return ret;
322 }
323
324 /*
325 * Deal with a packet that's associated with an extant connection.
326 */
rxrpc_input_packet_on_conn(struct rxrpc_connection * conn,struct sockaddr_rxrpc * peer_srx,struct sk_buff * skb)327 static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
328 struct sockaddr_rxrpc *peer_srx,
329 struct sk_buff *skb)
330 {
331 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
332 struct rxrpc_channel *chan;
333 struct rxrpc_call *call = NULL;
334 unsigned int channel;
335
336 if (sp->hdr.securityIndex != conn->security_ix)
337 return rxrpc_direct_abort(skb, rxrpc_eproto_wrong_security,
338 RXKADINCONSISTENCY, -EBADMSG);
339
340 if (sp->hdr.serviceId != conn->service_id) {
341 int old_id;
342
343 if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags))
344 return rxrpc_protocol_error(skb, rxrpc_eproto_reupgrade);
345
346 old_id = cmpxchg(&conn->service_id, conn->orig_service_id,
347 sp->hdr.serviceId);
348 if (old_id != conn->orig_service_id &&
349 old_id != sp->hdr.serviceId)
350 return rxrpc_protocol_error(skb, rxrpc_eproto_bad_upgrade);
351 }
352
353 if (after(sp->hdr.serial, conn->hi_serial))
354 conn->hi_serial = sp->hdr.serial;
355
356 /* It's a connection-level packet if the call number is 0. */
357 if (sp->hdr.callNumber == 0)
358 return rxrpc_input_conn_packet(conn, skb);
359
360 /* Deal with path MTU discovery probing. */
361 if (sp->hdr.type == RXRPC_PACKET_TYPE_ACK &&
362 conn->pmtud_probe &&
363 after_eq(sp->ack.acked_serial, conn->pmtud_probe))
364 rxrpc_input_probe_for_pmtud(conn, sp->ack.acked_serial, false);
365
366 /* Call-bound packets are routed by connection channel. */
367 channel = sp->hdr.cid & RXRPC_CHANNELMASK;
368 chan = &conn->channels[channel];
369
370 /* Ignore really old calls */
371 if (sp->hdr.callNumber < chan->last_call)
372 return just_discard;
373
374 if (sp->hdr.callNumber == chan->last_call) {
375 if (chan->call ||
376 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
377 return just_discard;
378
379 /* For the previous service call, if completed successfully, we
380 * discard all further packets.
381 */
382 if (rxrpc_conn_is_service(conn) &&
383 chan->last_type == RXRPC_PACKET_TYPE_ACK)
384 return just_discard;
385
386 /* But otherwise we need to retransmit the final packet from
387 * data cached in the connection record.
388 */
389 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA)
390 trace_rxrpc_rx_data(chan->call_debug_id,
391 sp->hdr.seq,
392 sp->hdr.serial,
393 sp->hdr.flags);
394 rxrpc_conn_retransmit_call(conn, skb, channel);
395 return just_discard;
396 }
397
398 call = rxrpc_try_get_call(chan->call, rxrpc_call_get_input);
399
400 if (sp->hdr.callNumber > chan->call_id) {
401 if (rxrpc_to_client(sp)) {
402 if (call)
403 rxrpc_put_call(call, rxrpc_call_put_input);
404 return rxrpc_protocol_error(skb,
405 rxrpc_eproto_unexpected_implicit_end);
406 }
407
408 if (call) {
409 rxrpc_implicit_end_call(call, skb);
410 rxrpc_put_call(call, rxrpc_call_put_input);
411 call = NULL;
412 }
413 }
414
415 if (!call) {
416 if (rxrpc_to_client(sp))
417 return rxrpc_protocol_error(skb, rxrpc_eproto_no_client_call);
418 return rxrpc_new_incoming_call(conn->local, conn->peer, conn,
419 peer_srx, skb);
420 }
421
422 rxrpc_queue_rx_call_packet(call, skb);
423 rxrpc_put_call(call, rxrpc_call_put_input);
424 return true;
425 }
426
427 /*
428 * I/O and event handling thread.
429 */
rxrpc_io_thread(void * data)430 int rxrpc_io_thread(void *data)
431 {
432 struct rxrpc_connection *conn;
433 struct sk_buff_head rx_queue;
434 struct rxrpc_local *local = data;
435 struct rxrpc_call *call;
436 struct sk_buff *skb;
437 #ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
438 ktime_t now;
439 #endif
440 bool should_stop;
441 LIST_HEAD(conn_attend_q);
442 LIST_HEAD(call_attend_q);
443
444 complete(&local->io_thread_ready);
445
446 skb_queue_head_init(&rx_queue);
447
448 set_user_nice(current, MIN_NICE);
449
450 for (;;) {
451 rxrpc_inc_stat(local->rxnet, stat_io_loop);
452
453 /* Inject a delay into packets if requested. */
454 #ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
455 now = ktime_get_real();
456 while ((skb = skb_peek(&local->rx_delay_queue))) {
457 if (ktime_before(now, skb->tstamp))
458 break;
459 skb = skb_dequeue(&local->rx_delay_queue);
460 skb_queue_tail(&local->rx_queue, skb);
461 }
462 #endif
463
464 if (!skb_queue_empty(&local->rx_queue)) {
465 spin_lock_irq(&local->rx_queue.lock);
466 skb_queue_splice_tail_init(&local->rx_queue, &rx_queue);
467 spin_unlock_irq(&local->rx_queue.lock);
468 trace_rxrpc_iothread_rx(local, skb_queue_len(&rx_queue));
469 }
470
471 /* Distribute packets and errors. */
472 while ((skb = __skb_dequeue(&rx_queue))) {
473 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
474 switch (skb->mark) {
475 case RXRPC_SKB_MARK_PACKET:
476 skb->priority = 0;
477 if (!rxrpc_input_packet(local, skb))
478 rxrpc_reject_packet(local, skb);
479 trace_rxrpc_rx_done(skb->mark, skb->priority);
480 rxrpc_free_skb(skb, rxrpc_skb_put_input);
481 break;
482 case RXRPC_SKB_MARK_ERROR:
483 rxrpc_input_error(local, skb);
484 rxrpc_free_skb(skb, rxrpc_skb_put_error_report);
485 break;
486 case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
487 rxrpc_input_conn_event(sp->poke_conn, skb);
488 rxrpc_put_connection(sp->poke_conn, rxrpc_conn_put_poke);
489 rxrpc_free_skb(skb, rxrpc_skb_put_conn_secured);
490 break;
491 default:
492 WARN_ON_ONCE(1);
493 rxrpc_free_skb(skb, rxrpc_skb_put_unknown);
494 break;
495 }
496 }
497
498 /* Deal with connections that want immediate attention. */
499 if (!list_empty_careful(&local->conn_attend_q)) {
500 spin_lock_irq(&local->lock);
501 list_splice_tail_init(&local->conn_attend_q, &conn_attend_q);
502 spin_unlock_irq(&local->lock);
503 }
504
505 while ((conn = list_first_entry_or_null(&conn_attend_q,
506 struct rxrpc_connection,
507 attend_link))) {
508 spin_lock_irq(&local->lock);
509 list_del_init(&conn->attend_link);
510 spin_unlock_irq(&local->lock);
511 rxrpc_input_conn_event(conn, NULL);
512 rxrpc_put_connection(conn, rxrpc_conn_put_poke);
513 }
514
515 if (test_and_clear_bit(RXRPC_CLIENT_CONN_REAP_TIMER,
516 &local->client_conn_flags))
517 rxrpc_discard_expired_client_conns(local);
518
519 /* Deal with calls that want immediate attention. */
520 spin_lock_irq(&local->lock);
521 list_splice_tail_init(&local->call_attend_q, &call_attend_q);
522 spin_unlock_irq(&local->lock);
523
524 while ((call = list_first_entry_or_null(&call_attend_q,
525 struct rxrpc_call,
526 attend_link))) {
527 spin_lock_irq(&local->lock);
528 list_del_init(&call->attend_link);
529 spin_unlock_irq(&local->lock);
530 trace_rxrpc_call_poked(call);
531 rxrpc_input_call_event(call);
532 rxrpc_put_call(call, rxrpc_call_put_poke);
533 }
534
535 if (!list_empty(&local->new_client_calls))
536 rxrpc_connect_client_calls(local);
537
538 set_current_state(TASK_INTERRUPTIBLE);
539 should_stop = kthread_should_stop();
540 if (!skb_queue_empty(&local->rx_queue) ||
541 !list_empty(&local->call_attend_q) ||
542 !list_empty(&local->conn_attend_q) ||
543 !list_empty(&local->new_client_calls) ||
544 test_bit(RXRPC_CLIENT_CONN_REAP_TIMER,
545 &local->client_conn_flags)) {
546 __set_current_state(TASK_RUNNING);
547 continue;
548 }
549
550 if (should_stop)
551 break;
552
553 #ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY
554 skb = skb_peek(&local->rx_delay_queue);
555 if (skb) {
556 unsigned long timeout;
557 ktime_t tstamp = skb->tstamp;
558 ktime_t now = ktime_get_real();
559 s64 delay_ns = ktime_to_ns(ktime_sub(tstamp, now));
560
561 if (delay_ns <= 0) {
562 __set_current_state(TASK_RUNNING);
563 continue;
564 }
565
566 timeout = nsecs_to_jiffies(delay_ns);
567 timeout = umax(timeout, 1);
568 schedule_timeout(timeout);
569 __set_current_state(TASK_RUNNING);
570 continue;
571 }
572 #endif
573
574 schedule();
575 }
576
577 __set_current_state(TASK_RUNNING);
578 rxrpc_see_local(local, rxrpc_local_stop);
579 rxrpc_destroy_local(local);
580 WRITE_ONCE(local->io_thread, NULL);
581 rxrpc_see_local(local, rxrpc_local_stopped);
582 return 0;
583 }
584