1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* RxRPC packet reception 3 * 4 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include "ar-internal.h" 11 12 static void rxrpc_proto_abort(const char *why, 13 struct rxrpc_call *call, rxrpc_seq_t seq) 14 { 15 if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG)) { 16 set_bit(RXRPC_CALL_EV_ABORT, &call->events); 17 rxrpc_queue_call(call); 18 } 19 } 20 21 /* 22 * Do TCP-style congestion management [RFC 5681]. 23 */ 24 static void rxrpc_congestion_management(struct rxrpc_call *call, 25 struct sk_buff *skb, 26 struct rxrpc_ack_summary *summary, 27 rxrpc_serial_t acked_serial) 28 { 29 enum rxrpc_congest_change change = rxrpc_cong_no_change; 30 unsigned int cumulative_acks = call->cong_cumul_acks; 31 unsigned int cwnd = call->cong_cwnd; 32 bool resend = false; 33 34 summary->flight_size = 35 (call->tx_top - call->acks_hard_ack) - summary->nr_acks; 36 37 if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) { 38 summary->retrans_timeo = true; 39 call->cong_ssthresh = max_t(unsigned int, 40 summary->flight_size / 2, 2); 41 cwnd = 1; 42 if (cwnd >= call->cong_ssthresh && 43 call->cong_mode == RXRPC_CALL_SLOW_START) { 44 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; 45 call->cong_tstamp = skb->tstamp; 46 cumulative_acks = 0; 47 } 48 } 49 50 cumulative_acks += summary->nr_new_acks; 51 cumulative_acks += summary->nr_rot_new_acks; 52 if (cumulative_acks > 255) 53 cumulative_acks = 255; 54 55 summary->mode = call->cong_mode; 56 summary->cwnd = call->cong_cwnd; 57 summary->ssthresh = call->cong_ssthresh; 58 summary->cumulative_acks = cumulative_acks; 59 summary->dup_acks = call->cong_dup_acks; 60 61 /* If we haven't transmitted anything for >1RTT, we should reset the 62 * congestion management state. 63 */ 64 if ((call->cong_mode == RXRPC_CALL_SLOW_START || 65 call->cong_mode == RXRPC_CALL_CONGEST_AVOIDANCE) && 66 ktime_before(ktime_add_us(call->tx_last_sent, 67 call->peer->srtt_us >> 3), 68 ktime_get_real()) 69 ) { 70 change = rxrpc_cong_idle_reset; 71 summary->mode = RXRPC_CALL_SLOW_START; 72 if (RXRPC_TX_SMSS > 2190) 73 summary->cwnd = 2; 74 else if (RXRPC_TX_SMSS > 1095) 75 summary->cwnd = 3; 76 else 77 summary->cwnd = 4; 78 } 79 80 switch (call->cong_mode) { 81 case RXRPC_CALL_SLOW_START: 82 if (summary->saw_nacks) 83 goto packet_loss_detected; 84 if (summary->cumulative_acks > 0) 85 cwnd += 1; 86 if (cwnd >= call->cong_ssthresh) { 87 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; 88 call->cong_tstamp = skb->tstamp; 89 } 90 goto out; 91 92 case RXRPC_CALL_CONGEST_AVOIDANCE: 93 if (summary->saw_nacks) 94 goto packet_loss_detected; 95 96 /* We analyse the number of packets that get ACK'd per RTT 97 * period and increase the window if we managed to fill it. 98 */ 99 if (call->peer->rtt_count == 0) 100 goto out; 101 if (ktime_before(skb->tstamp, 102 ktime_add_us(call->cong_tstamp, 103 call->peer->srtt_us >> 3))) 104 goto out_no_clear_ca; 105 change = rxrpc_cong_rtt_window_end; 106 call->cong_tstamp = skb->tstamp; 107 if (cumulative_acks >= cwnd) 108 cwnd++; 109 goto out; 110 111 case RXRPC_CALL_PACKET_LOSS: 112 if (!summary->saw_nacks) 113 goto resume_normality; 114 115 if (summary->new_low_nack) { 116 change = rxrpc_cong_new_low_nack; 117 call->cong_dup_acks = 1; 118 if (call->cong_extra > 1) 119 call->cong_extra = 1; 120 goto send_extra_data; 121 } 122 123 call->cong_dup_acks++; 124 if (call->cong_dup_acks < 3) 125 goto send_extra_data; 126 127 change = rxrpc_cong_begin_retransmission; 128 call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT; 129 call->cong_ssthresh = max_t(unsigned int, 130 summary->flight_size / 2, 2); 131 cwnd = call->cong_ssthresh + 3; 132 call->cong_extra = 0; 133 call->cong_dup_acks = 0; 134 resend = true; 135 goto out; 136 137 case RXRPC_CALL_FAST_RETRANSMIT: 138 if (!summary->new_low_nack) { 139 if (summary->nr_new_acks == 0) 140 cwnd += 1; 141 call->cong_dup_acks++; 142 if (call->cong_dup_acks == 2) { 143 change = rxrpc_cong_retransmit_again; 144 call->cong_dup_acks = 0; 145 resend = true; 146 } 147 } else { 148 change = rxrpc_cong_progress; 149 cwnd = call->cong_ssthresh; 150 if (!summary->saw_nacks) 151 goto resume_normality; 152 } 153 goto out; 154 155 default: 156 BUG(); 157 goto out; 158 } 159 160 resume_normality: 161 change = rxrpc_cong_cleared_nacks; 162 call->cong_dup_acks = 0; 163 call->cong_extra = 0; 164 call->cong_tstamp = skb->tstamp; 165 if (cwnd < call->cong_ssthresh) 166 call->cong_mode = RXRPC_CALL_SLOW_START; 167 else 168 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; 169 out: 170 cumulative_acks = 0; 171 out_no_clear_ca: 172 if (cwnd >= RXRPC_TX_MAX_WINDOW) 173 cwnd = RXRPC_TX_MAX_WINDOW; 174 call->cong_cwnd = cwnd; 175 call->cong_cumul_acks = cumulative_acks; 176 trace_rxrpc_congest(call, summary, acked_serial, change); 177 if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) 178 rxrpc_queue_call(call); 179 return; 180 181 packet_loss_detected: 182 change = rxrpc_cong_saw_nack; 183 call->cong_mode = RXRPC_CALL_PACKET_LOSS; 184 call->cong_dup_acks = 0; 185 goto send_extra_data; 186 187 send_extra_data: 188 /* Send some previously unsent DATA if we have some to advance the ACK 189 * state. 190 */ 191 if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) || 192 summary->nr_acks != call->tx_top - call->acks_hard_ack) { 193 call->cong_extra++; 194 wake_up(&call->waitq); 195 } 196 goto out_no_clear_ca; 197 } 198 199 /* 200 * Apply a hard ACK by advancing the Tx window. 201 */ 202 static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, 203 struct rxrpc_ack_summary *summary) 204 { 205 struct rxrpc_txbuf *txb; 206 bool rot_last = false; 207 208 list_for_each_entry_rcu(txb, &call->tx_buffer, call_link, false) { 209 if (before_eq(txb->seq, call->acks_hard_ack)) 210 continue; 211 summary->nr_rot_new_acks++; 212 if (test_bit(RXRPC_TXBUF_LAST, &txb->flags)) { 213 set_bit(RXRPC_CALL_TX_LAST, &call->flags); 214 rot_last = true; 215 } 216 if (txb->seq == to) 217 break; 218 } 219 220 if (rot_last) 221 set_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags); 222 223 _enter("%x,%x,%x,%d", to, call->acks_hard_ack, call->tx_top, rot_last); 224 225 if (call->acks_lowest_nak == call->acks_hard_ack) { 226 call->acks_lowest_nak = to; 227 } else if (after(to, call->acks_lowest_nak)) { 228 summary->new_low_nack = true; 229 call->acks_lowest_nak = to; 230 } 231 232 smp_store_release(&call->acks_hard_ack, to); 233 234 trace_rxrpc_txqueue(call, (rot_last ? 235 rxrpc_txqueue_rotate_last : 236 rxrpc_txqueue_rotate)); 237 wake_up(&call->waitq); 238 return rot_last; 239 } 240 241 /* 242 * End the transmission phase of a call. 243 * 244 * This occurs when we get an ACKALL packet, the first DATA packet of a reply, 245 * or a final ACK packet. 246 */ 247 static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, 248 const char *abort_why) 249 { 250 unsigned int state; 251 252 ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags)); 253 254 write_lock(&call->state_lock); 255 256 state = call->state; 257 switch (state) { 258 case RXRPC_CALL_CLIENT_SEND_REQUEST: 259 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 260 if (reply_begun) 261 call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY; 262 else 263 call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY; 264 break; 265 266 case RXRPC_CALL_SERVER_AWAIT_ACK: 267 __rxrpc_call_completed(call); 268 state = call->state; 269 break; 270 271 default: 272 goto bad_state; 273 } 274 275 write_unlock(&call->state_lock); 276 if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY) 277 trace_rxrpc_txqueue(call, rxrpc_txqueue_await_reply); 278 else 279 trace_rxrpc_txqueue(call, rxrpc_txqueue_end); 280 _leave(" = ok"); 281 return true; 282 283 bad_state: 284 write_unlock(&call->state_lock); 285 kdebug("end_tx %s", rxrpc_call_states[call->state]); 286 rxrpc_proto_abort(abort_why, call, call->tx_top); 287 return false; 288 } 289 290 /* 291 * Begin the reply reception phase of a call. 292 */ 293 static bool rxrpc_receiving_reply(struct rxrpc_call *call) 294 { 295 struct rxrpc_ack_summary summary = { 0 }; 296 unsigned long now, timo; 297 rxrpc_seq_t top = READ_ONCE(call->tx_top); 298 299 if (call->ackr_reason) { 300 now = jiffies; 301 timo = now + MAX_JIFFY_OFFSET; 302 WRITE_ONCE(call->resend_at, timo); 303 WRITE_ONCE(call->delay_ack_at, timo); 304 trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now); 305 } 306 307 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { 308 if (!rxrpc_rotate_tx_window(call, top, &summary)) { 309 rxrpc_proto_abort("TXL", call, top); 310 return false; 311 } 312 } 313 return rxrpc_end_tx_phase(call, true, "ETD"); 314 } 315 316 static void rxrpc_input_update_ack_window(struct rxrpc_call *call, 317 rxrpc_seq_t window, rxrpc_seq_t wtop) 318 { 319 atomic64_set_release(&call->ackr_window, ((u64)wtop) << 32 | window); 320 } 321 322 /* 323 * Push a DATA packet onto the Rx queue. 324 */ 325 static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb, 326 rxrpc_seq_t window, rxrpc_seq_t wtop, 327 enum rxrpc_receive_trace why) 328 { 329 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 330 bool last = sp->hdr.flags & RXRPC_LAST_PACKET; 331 332 __skb_queue_tail(&call->recvmsg_queue, skb); 333 rxrpc_input_update_ack_window(call, window, wtop); 334 335 trace_rxrpc_receive(call, last ? why + 1 : why, sp->hdr.serial, sp->hdr.seq); 336 } 337 338 /* 339 * Process a DATA packet. 340 */ 341 static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb) 342 { 343 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 344 struct sk_buff *oos; 345 rxrpc_serial_t serial = sp->hdr.serial; 346 u64 win = atomic64_read(&call->ackr_window); 347 rxrpc_seq_t window = lower_32_bits(win); 348 rxrpc_seq_t wtop = upper_32_bits(win); 349 rxrpc_seq_t wlimit = window + call->rx_winsize - 1; 350 rxrpc_seq_t seq = sp->hdr.seq; 351 bool last = sp->hdr.flags & RXRPC_LAST_PACKET; 352 int ack_reason = -1; 353 354 rxrpc_inc_stat(call->rxnet, stat_rx_data); 355 if (sp->hdr.flags & RXRPC_REQUEST_ACK) 356 rxrpc_inc_stat(call->rxnet, stat_rx_data_reqack); 357 if (sp->hdr.flags & RXRPC_JUMBO_PACKET) 358 rxrpc_inc_stat(call->rxnet, stat_rx_data_jumbo); 359 360 if (last) { 361 if (test_and_set_bit(RXRPC_CALL_RX_LAST, &call->flags) && 362 seq + 1 != wtop) { 363 rxrpc_proto_abort("LSN", call, seq); 364 goto err_free; 365 } 366 } else { 367 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 368 after_eq(seq, wtop)) { 369 pr_warn("Packet beyond last: c=%x q=%x window=%x-%x wlimit=%x\n", 370 call->debug_id, seq, window, wtop, wlimit); 371 rxrpc_proto_abort("LSA", call, seq); 372 goto err_free; 373 } 374 } 375 376 if (after(seq, call->rx_highest_seq)) 377 call->rx_highest_seq = seq; 378 379 trace_rxrpc_rx_data(call->debug_id, seq, serial, sp->hdr.flags); 380 381 if (before(seq, window)) { 382 ack_reason = RXRPC_ACK_DUPLICATE; 383 goto send_ack; 384 } 385 if (after(seq, wlimit)) { 386 ack_reason = RXRPC_ACK_EXCEEDS_WINDOW; 387 goto send_ack; 388 } 389 390 /* Queue the packet. */ 391 if (seq == window) { 392 rxrpc_seq_t reset_from; 393 bool reset_sack = false; 394 395 if (sp->hdr.flags & RXRPC_REQUEST_ACK) 396 ack_reason = RXRPC_ACK_REQUESTED; 397 /* Send an immediate ACK if we fill in a hole */ 398 else if (!skb_queue_empty(&call->rx_oos_queue)) 399 ack_reason = RXRPC_ACK_DELAY; 400 401 window++; 402 if (after(window, wtop)) 403 wtop = window; 404 405 spin_lock(&call->recvmsg_queue.lock); 406 rxrpc_input_queue_data(call, skb, window, wtop, rxrpc_receive_queue); 407 skb = NULL; 408 409 while ((oos = skb_peek(&call->rx_oos_queue))) { 410 struct rxrpc_skb_priv *osp = rxrpc_skb(oos); 411 412 if (after(osp->hdr.seq, window)) 413 break; 414 415 __skb_unlink(oos, &call->rx_oos_queue); 416 last = osp->hdr.flags & RXRPC_LAST_PACKET; 417 seq = osp->hdr.seq; 418 if (!reset_sack) { 419 reset_from = seq; 420 reset_sack = true; 421 } 422 423 window++; 424 rxrpc_input_queue_data(call, oos, window, wtop, 425 rxrpc_receive_queue_oos); 426 } 427 428 spin_unlock(&call->recvmsg_queue.lock); 429 430 if (reset_sack) { 431 do { 432 call->ackr_sack_table[reset_from % RXRPC_SACK_SIZE] = 0; 433 } while (reset_from++, before(reset_from, window)); 434 } 435 } else { 436 bool keep = false; 437 438 ack_reason = RXRPC_ACK_OUT_OF_SEQUENCE; 439 440 if (!call->ackr_sack_table[seq % RXRPC_SACK_SIZE]) { 441 call->ackr_sack_table[seq % RXRPC_SACK_SIZE] = 1; 442 keep = 1; 443 } 444 445 if (after(seq + 1, wtop)) { 446 wtop = seq + 1; 447 rxrpc_input_update_ack_window(call, window, wtop); 448 } 449 450 if (!keep) { 451 ack_reason = RXRPC_ACK_DUPLICATE; 452 goto send_ack; 453 } 454 455 skb_queue_walk(&call->rx_oos_queue, oos) { 456 struct rxrpc_skb_priv *osp = rxrpc_skb(oos); 457 458 if (after(osp->hdr.seq, seq)) { 459 __skb_queue_before(&call->rx_oos_queue, oos, skb); 460 goto oos_queued; 461 } 462 } 463 464 __skb_queue_tail(&call->rx_oos_queue, skb); 465 oos_queued: 466 trace_rxrpc_receive(call, last ? rxrpc_receive_oos_last : rxrpc_receive_oos, 467 sp->hdr.serial, sp->hdr.seq); 468 skb = NULL; 469 } 470 471 send_ack: 472 if (ack_reason < 0 && 473 atomic_inc_return(&call->ackr_nr_unacked) > 2 && 474 test_and_set_bit(RXRPC_CALL_IDLE_ACK_PENDING, &call->flags)) { 475 ack_reason = RXRPC_ACK_IDLE; 476 } else if (ack_reason >= 0) { 477 set_bit(RXRPC_CALL_IDLE_ACK_PENDING, &call->flags); 478 } 479 480 if (ack_reason >= 0) 481 rxrpc_send_ACK(call, ack_reason, serial, 482 rxrpc_propose_ack_input_data); 483 else 484 rxrpc_propose_delay_ACK(call, serial, 485 rxrpc_propose_ack_input_data); 486 487 err_free: 488 rxrpc_free_skb(skb, rxrpc_skb_freed); 489 } 490 491 /* 492 * Split a jumbo packet and file the bits separately. 493 */ 494 static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb) 495 { 496 struct rxrpc_jumbo_header jhdr; 497 struct rxrpc_skb_priv *sp = rxrpc_skb(skb), *jsp; 498 struct sk_buff *jskb; 499 unsigned int offset = sizeof(struct rxrpc_wire_header); 500 unsigned int len = skb->len - offset; 501 502 while (sp->hdr.flags & RXRPC_JUMBO_PACKET) { 503 if (len < RXRPC_JUMBO_SUBPKTLEN) 504 goto protocol_error; 505 if (sp->hdr.flags & RXRPC_LAST_PACKET) 506 goto protocol_error; 507 if (skb_copy_bits(skb, offset + RXRPC_JUMBO_DATALEN, 508 &jhdr, sizeof(jhdr)) < 0) 509 goto protocol_error; 510 511 jskb = skb_clone(skb, GFP_ATOMIC); 512 if (!jskb) { 513 kdebug("couldn't clone"); 514 return false; 515 } 516 rxrpc_new_skb(jskb, rxrpc_skb_cloned_jumbo); 517 jsp = rxrpc_skb(jskb); 518 jsp->offset = offset; 519 jsp->len = RXRPC_JUMBO_DATALEN; 520 rxrpc_input_data_one(call, jskb); 521 522 sp->hdr.flags = jhdr.flags; 523 sp->hdr._rsvd = ntohs(jhdr._rsvd); 524 sp->hdr.seq++; 525 sp->hdr.serial++; 526 offset += RXRPC_JUMBO_SUBPKTLEN; 527 len -= RXRPC_JUMBO_SUBPKTLEN; 528 } 529 530 sp->offset = offset; 531 sp->len = len; 532 rxrpc_input_data_one(call, skb); 533 return true; 534 535 protocol_error: 536 return false; 537 } 538 539 /* 540 * Process a DATA packet, adding the packet to the Rx ring. The caller's 541 * packet ref must be passed on or discarded. 542 */ 543 static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) 544 { 545 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 546 enum rxrpc_call_state state; 547 rxrpc_serial_t serial = sp->hdr.serial; 548 rxrpc_seq_t seq0 = sp->hdr.seq; 549 550 _enter("{%llx,%x},{%u,%x}", 551 atomic64_read(&call->ackr_window), call->rx_highest_seq, 552 skb->len, seq0); 553 554 _proto("Rx DATA %%%u { #%u f=%02x }", 555 sp->hdr.serial, seq0, sp->hdr.flags); 556 557 state = READ_ONCE(call->state); 558 if (state >= RXRPC_CALL_COMPLETE) { 559 rxrpc_free_skb(skb, rxrpc_skb_freed); 560 return; 561 } 562 563 /* Unshare the packet so that it can be modified for in-place 564 * decryption. 565 */ 566 if (sp->hdr.securityIndex != 0) { 567 struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC); 568 if (!nskb) { 569 rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem); 570 return; 571 } 572 573 if (nskb != skb) { 574 rxrpc_eaten_skb(skb, rxrpc_skb_received); 575 skb = nskb; 576 rxrpc_new_skb(skb, rxrpc_skb_unshared); 577 sp = rxrpc_skb(skb); 578 } 579 } 580 581 if (state == RXRPC_CALL_SERVER_RECV_REQUEST) { 582 unsigned long timo = READ_ONCE(call->next_req_timo); 583 unsigned long now, expect_req_by; 584 585 if (timo) { 586 now = jiffies; 587 expect_req_by = now + timo; 588 WRITE_ONCE(call->expect_req_by, expect_req_by); 589 rxrpc_reduce_call_timer(call, expect_req_by, now, 590 rxrpc_timer_set_for_idle); 591 } 592 } 593 594 spin_lock(&call->input_lock); 595 596 /* Received data implicitly ACKs all of the request packets we sent 597 * when we're acting as a client. 598 */ 599 if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST || 600 state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && 601 !rxrpc_receiving_reply(call)) 602 goto out; 603 604 if (!rxrpc_input_split_jumbo(call, skb)) { 605 rxrpc_proto_abort("VLD", call, sp->hdr.seq); 606 goto out; 607 } 608 skb = NULL; 609 610 out: 611 trace_rxrpc_notify_socket(call->debug_id, serial); 612 rxrpc_notify_socket(call); 613 614 spin_unlock(&call->input_lock); 615 rxrpc_free_skb(skb, rxrpc_skb_freed); 616 _leave(" [queued]"); 617 } 618 619 /* 620 * See if there's a cached RTT probe to complete. 621 */ 622 static void rxrpc_complete_rtt_probe(struct rxrpc_call *call, 623 ktime_t resp_time, 624 rxrpc_serial_t acked_serial, 625 rxrpc_serial_t ack_serial, 626 enum rxrpc_rtt_rx_trace type) 627 { 628 rxrpc_serial_t orig_serial; 629 unsigned long avail; 630 ktime_t sent_at; 631 bool matched = false; 632 int i; 633 634 avail = READ_ONCE(call->rtt_avail); 635 smp_rmb(); /* Read avail bits before accessing data. */ 636 637 for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) { 638 if (!test_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &avail)) 639 continue; 640 641 sent_at = call->rtt_sent_at[i]; 642 orig_serial = call->rtt_serial[i]; 643 644 if (orig_serial == acked_serial) { 645 clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); 646 smp_mb(); /* Read data before setting avail bit */ 647 set_bit(i, &call->rtt_avail); 648 if (type != rxrpc_rtt_rx_cancel) 649 rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial, 650 sent_at, resp_time); 651 else 652 trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i, 653 orig_serial, acked_serial, 0, 0); 654 matched = true; 655 } 656 657 /* If a later serial is being acked, then mark this slot as 658 * being available. 659 */ 660 if (after(acked_serial, orig_serial)) { 661 trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_obsolete, i, 662 orig_serial, acked_serial, 0, 0); 663 clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail); 664 smp_wmb(); 665 set_bit(i, &call->rtt_avail); 666 } 667 } 668 669 if (!matched) 670 trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0); 671 } 672 673 /* 674 * Process the response to a ping that we sent to find out if we lost an ACK. 675 * 676 * If we got back a ping response that indicates a lower tx_top than what we 677 * had at the time of the ping transmission, we adjudge all the DATA packets 678 * sent between the response tx_top and the ping-time tx_top to have been lost. 679 */ 680 static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call) 681 { 682 if (after(call->acks_lost_top, call->acks_prev_seq) && 683 !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) 684 rxrpc_queue_call(call); 685 } 686 687 /* 688 * Process a ping response. 689 */ 690 static void rxrpc_input_ping_response(struct rxrpc_call *call, 691 ktime_t resp_time, 692 rxrpc_serial_t acked_serial, 693 rxrpc_serial_t ack_serial) 694 { 695 if (acked_serial == call->acks_lost_ping) 696 rxrpc_input_check_for_lost_ack(call); 697 } 698 699 /* 700 * Process the extra information that may be appended to an ACK packet 701 */ 702 static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, 703 struct rxrpc_ackinfo *ackinfo) 704 { 705 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 706 struct rxrpc_peer *peer; 707 unsigned int mtu; 708 bool wake = false; 709 u32 rwind = ntohl(ackinfo->rwind); 710 711 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }", 712 sp->hdr.serial, 713 ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU), 714 rwind, ntohl(ackinfo->jumbo_max)); 715 716 if (rwind > RXRPC_TX_MAX_WINDOW) 717 rwind = RXRPC_TX_MAX_WINDOW; 718 if (call->tx_winsize != rwind) { 719 if (rwind > call->tx_winsize) 720 wake = true; 721 trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake); 722 call->tx_winsize = rwind; 723 } 724 725 if (call->cong_ssthresh > rwind) 726 call->cong_ssthresh = rwind; 727 728 mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU)); 729 730 peer = call->peer; 731 if (mtu < peer->maxdata) { 732 spin_lock_bh(&peer->lock); 733 peer->maxdata = mtu; 734 peer->mtu = mtu + peer->hdrsize; 735 spin_unlock_bh(&peer->lock); 736 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata); 737 } 738 739 if (wake) 740 wake_up(&call->waitq); 741 } 742 743 /* 744 * Process individual soft ACKs. 745 * 746 * Each ACK in the array corresponds to one packet and can be either an ACK or 747 * a NAK. If we get find an explicitly NAK'd packet we resend immediately; 748 * packets that lie beyond the end of the ACK list are scheduled for resend by 749 * the timer on the basis that the peer might just not have processed them at 750 * the time the ACK was sent. 751 */ 752 static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks, 753 rxrpc_seq_t seq, int nr_acks, 754 struct rxrpc_ack_summary *summary) 755 { 756 unsigned int i; 757 758 for (i = 0; i < nr_acks; i++) { 759 if (acks[i] == RXRPC_ACK_TYPE_ACK) { 760 summary->nr_acks++; 761 summary->nr_new_acks++; 762 } else { 763 if (!summary->saw_nacks && 764 call->acks_lowest_nak != seq + i) { 765 call->acks_lowest_nak = seq + i; 766 summary->new_low_nack = true; 767 } 768 summary->saw_nacks = true; 769 } 770 } 771 } 772 773 /* 774 * Return true if the ACK is valid - ie. it doesn't appear to have regressed 775 * with respect to the ack state conveyed by preceding ACKs. 776 */ 777 static bool rxrpc_is_ack_valid(struct rxrpc_call *call, 778 rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt) 779 { 780 rxrpc_seq_t base = READ_ONCE(call->acks_first_seq); 781 782 if (after(first_pkt, base)) 783 return true; /* The window advanced */ 784 785 if (before(first_pkt, base)) 786 return false; /* firstPacket regressed */ 787 788 if (after_eq(prev_pkt, call->acks_prev_seq)) 789 return true; /* previousPacket hasn't regressed. */ 790 791 /* Some rx implementations put a serial number in previousPacket. */ 792 if (after_eq(prev_pkt, base + call->tx_winsize)) 793 return false; 794 return true; 795 } 796 797 /* 798 * Process an ACK packet. 799 * 800 * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet 801 * in the ACK array. Anything before that is hard-ACK'd and may be discarded. 802 * 803 * A hard-ACK means that a packet has been processed and may be discarded; a 804 * soft-ACK means that the packet may be discarded and retransmission 805 * requested. A phase is complete when all packets are hard-ACK'd. 806 */ 807 static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) 808 { 809 struct rxrpc_ack_summary summary = { 0 }; 810 struct rxrpc_ackpacket ack; 811 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 812 struct rxrpc_ackinfo info; 813 struct sk_buff *skb_old = NULL, *skb_put = skb; 814 rxrpc_serial_t ack_serial, acked_serial; 815 rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt; 816 int nr_acks, offset, ioffset; 817 818 _enter(""); 819 820 offset = sizeof(struct rxrpc_wire_header); 821 if (skb_copy_bits(skb, offset, &ack, sizeof(ack)) < 0) { 822 rxrpc_proto_abort("XAK", call, 0); 823 goto out_not_locked; 824 } 825 offset += sizeof(ack); 826 827 ack_serial = sp->hdr.serial; 828 acked_serial = ntohl(ack.serial); 829 first_soft_ack = ntohl(ack.firstPacket); 830 prev_pkt = ntohl(ack.previousPacket); 831 hard_ack = first_soft_ack - 1; 832 nr_acks = ack.nAcks; 833 summary.ack_reason = (ack.reason < RXRPC_ACK__INVALID ? 834 ack.reason : RXRPC_ACK__INVALID); 835 836 trace_rxrpc_rx_ack(call, ack_serial, acked_serial, 837 first_soft_ack, prev_pkt, 838 summary.ack_reason, nr_acks); 839 rxrpc_inc_stat(call->rxnet, stat_rx_acks[ack.reason]); 840 841 switch (ack.reason) { 842 case RXRPC_ACK_PING_RESPONSE: 843 rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, 844 rxrpc_rtt_rx_ping_response); 845 break; 846 case RXRPC_ACK_REQUESTED: 847 rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, 848 rxrpc_rtt_rx_requested_ack); 849 break; 850 default: 851 if (acked_serial != 0) 852 rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, 853 rxrpc_rtt_rx_cancel); 854 break; 855 } 856 857 if (ack.reason == RXRPC_ACK_PING) { 858 _proto("Rx ACK %%%u PING Request", ack_serial); 859 rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial, 860 rxrpc_propose_ack_respond_to_ping); 861 } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) { 862 rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, ack_serial, 863 rxrpc_propose_ack_respond_to_ack); 864 } 865 866 /* If we get an EXCEEDS_WINDOW ACK from the server, it probably 867 * indicates that the client address changed due to NAT. The server 868 * lost the call because it switched to a different peer. 869 */ 870 if (unlikely(ack.reason == RXRPC_ACK_EXCEEDS_WINDOW) && 871 first_soft_ack == 1 && 872 prev_pkt == 0 && 873 rxrpc_is_client_call(call)) { 874 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, 875 0, -ENETRESET); 876 return; 877 } 878 879 /* If we get an OUT_OF_SEQUENCE ACK from the server, that can also 880 * indicate a change of address. However, we can retransmit the call 881 * if we still have it buffered to the beginning. 882 */ 883 if (unlikely(ack.reason == RXRPC_ACK_OUT_OF_SEQUENCE) && 884 first_soft_ack == 1 && 885 prev_pkt == 0 && 886 call->acks_hard_ack == 0 && 887 rxrpc_is_client_call(call)) { 888 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, 889 0, -ENETRESET); 890 return; 891 } 892 893 /* Discard any out-of-order or duplicate ACKs (outside lock). */ 894 if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { 895 trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, 896 first_soft_ack, call->acks_first_seq, 897 prev_pkt, call->acks_prev_seq); 898 goto out_not_locked; 899 } 900 901 info.rxMTU = 0; 902 ioffset = offset + nr_acks + 3; 903 if (skb->len >= ioffset + sizeof(info) && 904 skb_copy_bits(skb, ioffset, &info, sizeof(info)) < 0) { 905 rxrpc_proto_abort("XAI", call, 0); 906 goto out_not_locked; 907 } 908 909 if (nr_acks > 0) 910 skb_condense(skb); 911 912 spin_lock(&call->input_lock); 913 914 /* Discard any out-of-order or duplicate ACKs (inside lock). */ 915 if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { 916 trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial, 917 first_soft_ack, call->acks_first_seq, 918 prev_pkt, call->acks_prev_seq); 919 goto out; 920 } 921 call->acks_latest_ts = skb->tstamp; 922 923 call->acks_first_seq = first_soft_ack; 924 call->acks_prev_seq = prev_pkt; 925 926 switch (ack.reason) { 927 case RXRPC_ACK_PING: 928 break; 929 case RXRPC_ACK_PING_RESPONSE: 930 rxrpc_input_ping_response(call, skb->tstamp, acked_serial, 931 ack_serial); 932 fallthrough; 933 default: 934 if (after(acked_serial, call->acks_highest_serial)) 935 call->acks_highest_serial = acked_serial; 936 break; 937 } 938 939 /* Parse rwind and mtu sizes if provided. */ 940 if (info.rxMTU) 941 rxrpc_input_ackinfo(call, skb, &info); 942 943 if (first_soft_ack == 0) { 944 rxrpc_proto_abort("AK0", call, 0); 945 goto out; 946 } 947 948 /* Ignore ACKs unless we are or have just been transmitting. */ 949 switch (READ_ONCE(call->state)) { 950 case RXRPC_CALL_CLIENT_SEND_REQUEST: 951 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 952 case RXRPC_CALL_SERVER_SEND_REPLY: 953 case RXRPC_CALL_SERVER_AWAIT_ACK: 954 break; 955 default: 956 goto out; 957 } 958 959 if (before(hard_ack, call->acks_hard_ack) || 960 after(hard_ack, call->tx_top)) { 961 rxrpc_proto_abort("AKW", call, 0); 962 goto out; 963 } 964 if (nr_acks > call->tx_top - hard_ack) { 965 rxrpc_proto_abort("AKN", call, 0); 966 goto out; 967 } 968 969 if (after(hard_ack, call->acks_hard_ack)) { 970 if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) { 971 rxrpc_end_tx_phase(call, false, "ETA"); 972 goto out; 973 } 974 } 975 976 if (nr_acks > 0) { 977 if (offset > (int)skb->len - nr_acks) { 978 rxrpc_proto_abort("XSA", call, 0); 979 goto out; 980 } 981 982 spin_lock(&call->acks_ack_lock); 983 skb_old = call->acks_soft_tbl; 984 call->acks_soft_tbl = skb; 985 spin_unlock(&call->acks_ack_lock); 986 987 rxrpc_input_soft_acks(call, skb->data + offset, first_soft_ack, 988 nr_acks, &summary); 989 skb_put = NULL; 990 } else if (call->acks_soft_tbl) { 991 spin_lock(&call->acks_ack_lock); 992 skb_old = call->acks_soft_tbl; 993 call->acks_soft_tbl = NULL; 994 spin_unlock(&call->acks_ack_lock); 995 } 996 997 if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) && 998 summary.nr_acks == call->tx_top - hard_ack && 999 rxrpc_is_client_call(call)) 1000 rxrpc_propose_ping(call, ack_serial, 1001 rxrpc_propose_ack_ping_for_lost_reply); 1002 1003 rxrpc_congestion_management(call, skb, &summary, acked_serial); 1004 out: 1005 spin_unlock(&call->input_lock); 1006 out_not_locked: 1007 rxrpc_free_skb(skb_put, rxrpc_skb_freed); 1008 rxrpc_free_skb(skb_old, rxrpc_skb_freed); 1009 } 1010 1011 /* 1012 * Process an ACKALL packet. 1013 */ 1014 static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb) 1015 { 1016 struct rxrpc_ack_summary summary = { 0 }; 1017 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 1018 1019 _proto("Rx ACKALL %%%u", sp->hdr.serial); 1020 1021 spin_lock(&call->input_lock); 1022 1023 if (rxrpc_rotate_tx_window(call, call->tx_top, &summary)) 1024 rxrpc_end_tx_phase(call, false, "ETL"); 1025 1026 spin_unlock(&call->input_lock); 1027 } 1028 1029 /* 1030 * Process an ABORT packet directed at a call. 1031 */ 1032 static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb) 1033 { 1034 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 1035 __be32 wtmp; 1036 u32 abort_code = RX_CALL_DEAD; 1037 1038 _enter(""); 1039 1040 if (skb->len >= 4 && 1041 skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), 1042 &wtmp, sizeof(wtmp)) >= 0) 1043 abort_code = ntohl(wtmp); 1044 1045 trace_rxrpc_rx_abort(call, sp->hdr.serial, abort_code); 1046 1047 _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code); 1048 1049 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, 1050 abort_code, -ECONNABORTED); 1051 } 1052 1053 /* 1054 * Process an incoming call packet. 1055 */ 1056 static void rxrpc_input_call_packet(struct rxrpc_call *call, 1057 struct sk_buff *skb) 1058 { 1059 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 1060 unsigned long timo; 1061 1062 _enter("%p,%p", call, skb); 1063 1064 timo = READ_ONCE(call->next_rx_timo); 1065 if (timo) { 1066 unsigned long now = jiffies, expect_rx_by; 1067 1068 expect_rx_by = now + timo; 1069 WRITE_ONCE(call->expect_rx_by, expect_rx_by); 1070 rxrpc_reduce_call_timer(call, expect_rx_by, now, 1071 rxrpc_timer_set_for_normal); 1072 } 1073 1074 switch (sp->hdr.type) { 1075 case RXRPC_PACKET_TYPE_DATA: 1076 rxrpc_input_data(call, skb); 1077 goto no_free; 1078 1079 case RXRPC_PACKET_TYPE_ACK: 1080 rxrpc_input_ack(call, skb); 1081 goto no_free; 1082 1083 case RXRPC_PACKET_TYPE_BUSY: 1084 _proto("Rx BUSY %%%u", sp->hdr.serial); 1085 1086 /* Just ignore BUSY packets from the server; the retry and 1087 * lifespan timers will take care of business. BUSY packets 1088 * from the client don't make sense. 1089 */ 1090 break; 1091 1092 case RXRPC_PACKET_TYPE_ABORT: 1093 rxrpc_input_abort(call, skb); 1094 break; 1095 1096 case RXRPC_PACKET_TYPE_ACKALL: 1097 rxrpc_input_ackall(call, skb); 1098 break; 1099 1100 default: 1101 break; 1102 } 1103 1104 rxrpc_free_skb(skb, rxrpc_skb_freed); 1105 no_free: 1106 _leave(""); 1107 } 1108 1109 /* 1110 * Handle a new service call on a channel implicitly completing the preceding 1111 * call on that channel. This does not apply to client conns. 1112 * 1113 * TODO: If callNumber > call_id + 1, renegotiate security. 1114 */ 1115 static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx, 1116 struct rxrpc_connection *conn, 1117 struct rxrpc_call *call) 1118 { 1119 switch (READ_ONCE(call->state)) { 1120 case RXRPC_CALL_SERVER_AWAIT_ACK: 1121 rxrpc_call_completed(call); 1122 fallthrough; 1123 case RXRPC_CALL_COMPLETE: 1124 break; 1125 default: 1126 if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN)) { 1127 set_bit(RXRPC_CALL_EV_ABORT, &call->events); 1128 rxrpc_queue_call(call); 1129 } 1130 trace_rxrpc_improper_term(call); 1131 break; 1132 } 1133 1134 spin_lock(&rx->incoming_lock); 1135 __rxrpc_disconnect_call(conn, call); 1136 spin_unlock(&rx->incoming_lock); 1137 } 1138 1139 /* 1140 * post connection-level events to the connection 1141 * - this includes challenges, responses, some aborts and call terminal packet 1142 * retransmission. 1143 */ 1144 static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, 1145 struct sk_buff *skb) 1146 { 1147 _enter("%p,%p", conn, skb); 1148 1149 skb_queue_tail(&conn->rx_queue, skb); 1150 rxrpc_queue_conn(conn); 1151 } 1152 1153 /* 1154 * post endpoint-level events to the local endpoint 1155 * - this includes debug and version messages 1156 */ 1157 static void rxrpc_post_packet_to_local(struct rxrpc_local *local, 1158 struct sk_buff *skb) 1159 { 1160 _enter("%p,%p", local, skb); 1161 1162 if (rxrpc_get_local_maybe(local)) { 1163 skb_queue_tail(&local->event_queue, skb); 1164 rxrpc_queue_local(local); 1165 } else { 1166 rxrpc_free_skb(skb, rxrpc_skb_freed); 1167 } 1168 } 1169 1170 /* 1171 * put a packet up for transport-level abort 1172 */ 1173 static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) 1174 { 1175 if (rxrpc_get_local_maybe(local)) { 1176 skb_queue_tail(&local->reject_queue, skb); 1177 rxrpc_queue_local(local); 1178 } else { 1179 rxrpc_free_skb(skb, rxrpc_skb_freed); 1180 } 1181 } 1182 1183 /* 1184 * Extract the wire header from a packet and translate the byte order. 1185 */ 1186 static noinline 1187 int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb) 1188 { 1189 struct rxrpc_wire_header whdr; 1190 1191 /* dig out the RxRPC connection details */ 1192 if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) { 1193 trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, 1194 tracepoint_string("bad_hdr")); 1195 return -EBADMSG; 1196 } 1197 1198 memset(sp, 0, sizeof(*sp)); 1199 sp->hdr.epoch = ntohl(whdr.epoch); 1200 sp->hdr.cid = ntohl(whdr.cid); 1201 sp->hdr.callNumber = ntohl(whdr.callNumber); 1202 sp->hdr.seq = ntohl(whdr.seq); 1203 sp->hdr.serial = ntohl(whdr.serial); 1204 sp->hdr.flags = whdr.flags; 1205 sp->hdr.type = whdr.type; 1206 sp->hdr.userStatus = whdr.userStatus; 1207 sp->hdr.securityIndex = whdr.securityIndex; 1208 sp->hdr._rsvd = ntohs(whdr._rsvd); 1209 sp->hdr.serviceId = ntohs(whdr.serviceId); 1210 return 0; 1211 } 1212 1213 /* 1214 * handle data received on the local endpoint 1215 * - may be called in interrupt context 1216 * 1217 * [!] Note that as this is called from the encap_rcv hook, the socket is not 1218 * held locked by the caller and nothing prevents sk_user_data on the UDP from 1219 * being cleared in the middle of processing this function. 1220 * 1221 * Called with the RCU read lock held from the IP layer via UDP. 1222 */ 1223 int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) 1224 { 1225 struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk); 1226 struct rxrpc_connection *conn; 1227 struct rxrpc_channel *chan; 1228 struct rxrpc_call *call = NULL; 1229 struct rxrpc_skb_priv *sp; 1230 struct rxrpc_peer *peer = NULL; 1231 struct rxrpc_sock *rx = NULL; 1232 unsigned int channel; 1233 1234 _enter("%p", udp_sk); 1235 1236 if (unlikely(!local)) { 1237 kfree_skb(skb); 1238 return 0; 1239 } 1240 if (skb->tstamp == 0) 1241 skb->tstamp = ktime_get_real(); 1242 1243 rxrpc_new_skb(skb, rxrpc_skb_received); 1244 1245 skb_pull(skb, sizeof(struct udphdr)); 1246 1247 /* The UDP protocol already released all skb resources; 1248 * we are free to add our own data there. 1249 */ 1250 sp = rxrpc_skb(skb); 1251 1252 /* dig out the RxRPC connection details */ 1253 if (rxrpc_extract_header(sp, skb) < 0) 1254 goto bad_message; 1255 1256 if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) { 1257 static int lose; 1258 if ((lose++ & 7) == 7) { 1259 trace_rxrpc_rx_lose(sp); 1260 rxrpc_free_skb(skb, rxrpc_skb_lost); 1261 return 0; 1262 } 1263 } 1264 1265 if (skb->tstamp == 0) 1266 skb->tstamp = ktime_get_real(); 1267 trace_rxrpc_rx_packet(sp); 1268 1269 switch (sp->hdr.type) { 1270 case RXRPC_PACKET_TYPE_VERSION: 1271 if (rxrpc_to_client(sp)) 1272 goto discard; 1273 rxrpc_post_packet_to_local(local, skb); 1274 goto out; 1275 1276 case RXRPC_PACKET_TYPE_BUSY: 1277 if (rxrpc_to_server(sp)) 1278 goto discard; 1279 fallthrough; 1280 case RXRPC_PACKET_TYPE_ACK: 1281 case RXRPC_PACKET_TYPE_ACKALL: 1282 if (sp->hdr.callNumber == 0) 1283 goto bad_message; 1284 fallthrough; 1285 case RXRPC_PACKET_TYPE_ABORT: 1286 break; 1287 1288 case RXRPC_PACKET_TYPE_DATA: 1289 if (sp->hdr.callNumber == 0 || 1290 sp->hdr.seq == 0) 1291 goto bad_message; 1292 1293 /* Unshare the packet so that it can be modified for in-place 1294 * decryption. 1295 */ 1296 if (sp->hdr.securityIndex != 0) { 1297 struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC); 1298 if (!nskb) { 1299 rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem); 1300 goto out; 1301 } 1302 1303 if (nskb != skb) { 1304 rxrpc_eaten_skb(skb, rxrpc_skb_received); 1305 skb = nskb; 1306 rxrpc_new_skb(skb, rxrpc_skb_unshared); 1307 sp = rxrpc_skb(skb); 1308 } 1309 } 1310 break; 1311 1312 case RXRPC_PACKET_TYPE_CHALLENGE: 1313 if (rxrpc_to_server(sp)) 1314 goto discard; 1315 break; 1316 case RXRPC_PACKET_TYPE_RESPONSE: 1317 if (rxrpc_to_client(sp)) 1318 goto discard; 1319 break; 1320 1321 /* Packet types 9-11 should just be ignored. */ 1322 case RXRPC_PACKET_TYPE_PARAMS: 1323 case RXRPC_PACKET_TYPE_10: 1324 case RXRPC_PACKET_TYPE_11: 1325 goto discard; 1326 1327 default: 1328 _proto("Rx Bad Packet Type %u", sp->hdr.type); 1329 goto bad_message; 1330 } 1331 1332 if (sp->hdr.serviceId == 0) 1333 goto bad_message; 1334 1335 if (rxrpc_to_server(sp)) { 1336 /* Weed out packets to services we're not offering. Packets 1337 * that would begin a call are explicitly rejected and the rest 1338 * are just discarded. 1339 */ 1340 rx = rcu_dereference(local->service); 1341 if (!rx || (sp->hdr.serviceId != rx->srx.srx_service && 1342 sp->hdr.serviceId != rx->second_service)) { 1343 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA && 1344 sp->hdr.seq == 1) 1345 goto unsupported_service; 1346 goto discard; 1347 } 1348 } 1349 1350 conn = rxrpc_find_connection_rcu(local, skb, &peer); 1351 if (conn) { 1352 if (sp->hdr.securityIndex != conn->security_ix) 1353 goto wrong_security; 1354 1355 if (sp->hdr.serviceId != conn->service_id) { 1356 int old_id; 1357 1358 if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) 1359 goto reupgrade; 1360 old_id = cmpxchg(&conn->service_id, conn->params.service_id, 1361 sp->hdr.serviceId); 1362 1363 if (old_id != conn->params.service_id && 1364 old_id != sp->hdr.serviceId) 1365 goto reupgrade; 1366 } 1367 1368 if (sp->hdr.callNumber == 0) { 1369 /* Connection-level packet */ 1370 _debug("CONN %p {%d}", conn, conn->debug_id); 1371 rxrpc_post_packet_to_conn(conn, skb); 1372 goto out; 1373 } 1374 1375 if ((int)sp->hdr.serial - (int)conn->hi_serial > 0) 1376 conn->hi_serial = sp->hdr.serial; 1377 1378 /* Call-bound packets are routed by connection channel. */ 1379 channel = sp->hdr.cid & RXRPC_CHANNELMASK; 1380 chan = &conn->channels[channel]; 1381 1382 /* Ignore really old calls */ 1383 if (sp->hdr.callNumber < chan->last_call) 1384 goto discard; 1385 1386 if (sp->hdr.callNumber == chan->last_call) { 1387 if (chan->call || 1388 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT) 1389 goto discard; 1390 1391 /* For the previous service call, if completed 1392 * successfully, we discard all further packets. 1393 */ 1394 if (rxrpc_conn_is_service(conn) && 1395 chan->last_type == RXRPC_PACKET_TYPE_ACK) 1396 goto discard; 1397 1398 /* But otherwise we need to retransmit the final packet 1399 * from data cached in the connection record. 1400 */ 1401 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) 1402 trace_rxrpc_rx_data(chan->call_debug_id, 1403 sp->hdr.seq, 1404 sp->hdr.serial, 1405 sp->hdr.flags); 1406 rxrpc_post_packet_to_conn(conn, skb); 1407 goto out; 1408 } 1409 1410 call = rcu_dereference(chan->call); 1411 1412 if (sp->hdr.callNumber > chan->call_id) { 1413 if (rxrpc_to_client(sp)) 1414 goto reject_packet; 1415 if (call) 1416 rxrpc_input_implicit_end_call(rx, conn, call); 1417 call = NULL; 1418 } 1419 1420 if (call) { 1421 if (sp->hdr.serviceId != call->service_id) 1422 call->service_id = sp->hdr.serviceId; 1423 if ((int)sp->hdr.serial - (int)call->rx_serial > 0) 1424 call->rx_serial = sp->hdr.serial; 1425 if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags)) 1426 set_bit(RXRPC_CALL_RX_HEARD, &call->flags); 1427 } 1428 } 1429 1430 if (!call || refcount_read(&call->ref) == 0) { 1431 if (rxrpc_to_client(sp) || 1432 sp->hdr.type != RXRPC_PACKET_TYPE_DATA) 1433 goto bad_message; 1434 if (sp->hdr.seq != 1) 1435 goto discard; 1436 call = rxrpc_new_incoming_call(local, rx, skb); 1437 if (!call) 1438 goto reject_packet; 1439 } 1440 1441 /* Process a call packet; this either discards or passes on the ref 1442 * elsewhere. 1443 */ 1444 rxrpc_input_call_packet(call, skb); 1445 goto out; 1446 1447 discard: 1448 rxrpc_free_skb(skb, rxrpc_skb_freed); 1449 out: 1450 trace_rxrpc_rx_done(0, 0); 1451 return 0; 1452 1453 wrong_security: 1454 trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1455 RXKADINCONSISTENCY, EBADMSG); 1456 skb->priority = RXKADINCONSISTENCY; 1457 goto post_abort; 1458 1459 unsupported_service: 1460 trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1461 RX_INVALID_OPERATION, EOPNOTSUPP); 1462 skb->priority = RX_INVALID_OPERATION; 1463 goto post_abort; 1464 1465 reupgrade: 1466 trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1467 RX_PROTOCOL_ERROR, EBADMSG); 1468 goto protocol_error; 1469 1470 bad_message: 1471 trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1472 RX_PROTOCOL_ERROR, EBADMSG); 1473 protocol_error: 1474 skb->priority = RX_PROTOCOL_ERROR; 1475 post_abort: 1476 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; 1477 reject_packet: 1478 trace_rxrpc_rx_done(skb->mark, skb->priority); 1479 rxrpc_reject_packet(local, skb); 1480 _leave(" [badmsg]"); 1481 return 0; 1482 } 1483