1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* connection-level event handling 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/module.h> 11 #include <linux/net.h> 12 #include <linux/skbuff.h> 13 #include <linux/errqueue.h> 14 #include <net/sock.h> 15 #include <net/af_rxrpc.h> 16 #include <net/ip.h> 17 #include "ar-internal.h" 18 19 /* 20 * Set the completion state on an aborted connection. 21 */ 22 static bool rxrpc_set_conn_aborted(struct rxrpc_connection *conn, 23 s32 abort_code, int err, 24 enum rxrpc_call_completion compl) 25 { 26 bool aborted = false; 27 28 if (conn->state != RXRPC_CONN_ABORTED) { 29 spin_lock_irq(&conn->state_lock); 30 if (conn->state != RXRPC_CONN_ABORTED) { 31 conn->abort_code = abort_code; 32 conn->error = err; 33 conn->completion = compl; 34 /* Order the abort info before the state change. */ 35 smp_store_release(&conn->state, RXRPC_CONN_ABORTED); 36 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); 37 set_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events); 38 aborted = true; 39 } 40 spin_unlock_irq(&conn->state_lock); 41 } 42 43 return aborted; 44 } 45 46 /* 47 * Mark a socket buffer to indicate that the connection it's on should be aborted. 48 */ 49 int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb, 50 s32 abort_code, int err, enum rxrpc_abort_reason why) 51 { 52 53 u32 cid = conn->proto.cid, call = 0, seq = 0; 54 55 if (skb) { 56 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 57 58 cid = sp->hdr.cid; 59 call = sp->hdr.callNumber; 60 seq = sp->hdr.seq; 61 } 62 63 if (rxrpc_set_conn_aborted(conn, abort_code, err, 64 RXRPC_CALL_LOCALLY_ABORTED)) { 65 trace_rxrpc_abort(0, why, cid, call, seq, abort_code, err); 66 rxrpc_poke_conn(conn, rxrpc_conn_get_poke_abort); 67 } 68 return -EPROTO; 69 } 70 71 /* 72 * Mark a connection as being remotely aborted. 73 */ 74 static void rxrpc_input_conn_abort(struct rxrpc_connection *conn, 75 struct sk_buff *skb) 76 { 77 trace_rxrpc_rx_conn_abort(conn, skb); 78 rxrpc_set_conn_aborted(conn, skb->priority, -ECONNABORTED, 79 RXRPC_CALL_REMOTELY_ABORTED); 80 } 81 82 /* 83 * Retransmit terminal ACK or ABORT of the previous call. 84 */ 85 void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, 86 struct sk_buff *skb, 87 unsigned int channel) 88 { 89 struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL; 90 struct rxrpc_channel *chan; 91 struct msghdr msg; 92 struct kvec iov[3]; 93 struct { 94 struct rxrpc_wire_header whdr; 95 union { 96 __be32 abort_code; 97 struct rxrpc_ackpacket ack; 98 }; 99 } __attribute__((packed)) pkt; 100 struct rxrpc_acktrailer trailer; 101 size_t len; 102 int ret, ioc; 103 u32 serial, max_mtu, if_mtu, call_id, padding; 104 105 _enter("%d", conn->debug_id); 106 107 if (sp && sp->hdr.type == RXRPC_PACKET_TYPE_ACK) { 108 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), 109 &pkt.ack, sizeof(pkt.ack)) < 0) 110 return; 111 if (pkt.ack.reason == RXRPC_ACK_PING_RESPONSE) 112 return; 113 } 114 115 chan = &conn->channels[channel]; 116 117 /* If the last call got moved on whilst we were waiting to run, just 118 * ignore this packet. 119 */ 120 call_id = chan->last_call; 121 if (skb && call_id != sp->hdr.callNumber) 122 return; 123 124 msg.msg_name = &conn->peer->srx.transport; 125 msg.msg_namelen = conn->peer->srx.transport_len; 126 msg.msg_control = NULL; 127 msg.msg_controllen = 0; 128 msg.msg_flags = 0; 129 130 iov[0].iov_base = &pkt; 131 iov[0].iov_len = sizeof(pkt.whdr); 132 iov[1].iov_base = &padding; 133 iov[1].iov_len = 3; 134 iov[2].iov_base = &trailer; 135 iov[2].iov_len = sizeof(trailer); 136 137 serial = rxrpc_get_next_serial(conn); 138 139 pkt.whdr.epoch = htonl(conn->proto.epoch); 140 pkt.whdr.cid = htonl(conn->proto.cid | channel); 141 pkt.whdr.callNumber = htonl(call_id); 142 pkt.whdr.serial = htonl(serial); 143 pkt.whdr.seq = 0; 144 pkt.whdr.type = chan->last_type; 145 pkt.whdr.flags = conn->out_clientflag; 146 pkt.whdr.userStatus = 0; 147 pkt.whdr.securityIndex = conn->security_ix; 148 pkt.whdr._rsvd = 0; 149 pkt.whdr.serviceId = htons(conn->service_id); 150 151 len = sizeof(pkt.whdr); 152 switch (chan->last_type) { 153 case RXRPC_PACKET_TYPE_ABORT: 154 pkt.abort_code = htonl(chan->last_abort); 155 iov[0].iov_len += sizeof(pkt.abort_code); 156 len += sizeof(pkt.abort_code); 157 ioc = 1; 158 break; 159 160 case RXRPC_PACKET_TYPE_ACK: 161 if_mtu = conn->peer->if_mtu - conn->peer->hdrsize; 162 if (conn->peer->ackr_adv_pmtud) { 163 max_mtu = umax(conn->peer->max_data, rxrpc_rx_mtu); 164 } else { 165 if_mtu = umin(1444, if_mtu); 166 max_mtu = if_mtu; 167 } 168 pkt.ack.bufferSpace = 0; 169 pkt.ack.maxSkew = htons(skb ? skb->priority : 0); 170 pkt.ack.firstPacket = htonl(chan->last_seq + 1); 171 pkt.ack.previousPacket = htonl(chan->last_seq); 172 pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0); 173 pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE; 174 pkt.ack.nAcks = 0; 175 trailer.maxMTU = htonl(max_mtu); 176 trailer.ifMTU = htonl(if_mtu); 177 trailer.rwind = htonl(rxrpc_rx_window_size); 178 trailer.jumbo_max = 0; 179 pkt.whdr.flags |= RXRPC_SLOW_START_OK; 180 padding = 0; 181 iov[0].iov_len += sizeof(pkt.ack); 182 len += sizeof(pkt.ack) + 3 + sizeof(trailer); 183 ioc = 3; 184 185 trace_rxrpc_tx_ack(chan->call_debug_id, serial, 186 ntohl(pkt.ack.firstPacket), 187 ntohl(pkt.ack.serial), 188 pkt.ack.reason, 0, rxrpc_rx_window_size, 189 rxrpc_propose_ack_retransmit); 190 break; 191 192 default: 193 return; 194 } 195 196 ret = kernel_sendmsg(conn->local->socket, &msg, iov, ioc, len); 197 rxrpc_peer_mark_tx(conn->peer); 198 if (ret < 0) 199 trace_rxrpc_tx_fail(chan->call_debug_id, serial, ret, 200 rxrpc_tx_point_call_final_resend); 201 else 202 trace_rxrpc_tx_packet(chan->call_debug_id, &pkt.whdr, 203 rxrpc_tx_point_call_final_resend); 204 205 _leave(""); 206 } 207 208 /* 209 * pass a connection-level abort onto all calls on that connection 210 */ 211 static void rxrpc_abort_calls(struct rxrpc_connection *conn) 212 { 213 struct rxrpc_call *call; 214 int i; 215 216 _enter("{%d},%x", conn->debug_id, conn->abort_code); 217 218 for (i = 0; i < RXRPC_MAXCALLS; i++) { 219 call = conn->channels[i].call; 220 if (call) { 221 rxrpc_see_call(call, rxrpc_call_see_conn_abort); 222 rxrpc_set_call_completion(call, 223 conn->completion, 224 conn->abort_code, 225 conn->error); 226 rxrpc_poke_call(call, rxrpc_call_poke_conn_abort); 227 } 228 } 229 230 _leave(""); 231 } 232 233 /* 234 * mark a call as being on a now-secured channel 235 * - must be called with BH's disabled. 236 */ 237 static void rxrpc_call_is_secure(struct rxrpc_call *call) 238 { 239 if (call && __test_and_clear_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags)) 240 rxrpc_notify_socket(call); 241 } 242 243 /* 244 * connection-level Rx packet processor 245 */ 246 static int rxrpc_process_event(struct rxrpc_connection *conn, 247 struct sk_buff *skb) 248 { 249 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 250 bool secured = false; 251 int ret; 252 253 if (conn->state == RXRPC_CONN_ABORTED) 254 return -ECONNABORTED; 255 256 _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial); 257 258 switch (sp->hdr.type) { 259 case RXRPC_PACKET_TYPE_CHALLENGE: 260 ret = conn->security->respond_to_challenge(conn, skb); 261 sp->chall.conn = NULL; 262 rxrpc_put_connection(conn, rxrpc_conn_put_challenge_input); 263 return ret; 264 265 case RXRPC_PACKET_TYPE_RESPONSE: 266 spin_lock_irq(&conn->state_lock); 267 if (conn->state != RXRPC_CONN_SERVICE_CHALLENGING) { 268 spin_unlock_irq(&conn->state_lock); 269 return 0; 270 } 271 spin_unlock_irq(&conn->state_lock); 272 273 ret = conn->security->verify_response(conn, skb); 274 if (ret < 0) 275 return ret; 276 277 ret = conn->security->init_connection_security( 278 conn, conn->key->payload.data[0]); 279 if (ret < 0) 280 return ret; 281 282 spin_lock_irq(&conn->state_lock); 283 if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { 284 conn->state = RXRPC_CONN_SERVICE; 285 secured = true; 286 } 287 spin_unlock_irq(&conn->state_lock); 288 289 if (secured) { 290 /* Offload call state flipping to the I/O thread. As 291 * we've already received the packet, put it on the 292 * front of the queue. 293 */ 294 sp->poke_conn = rxrpc_get_connection( 295 conn, rxrpc_conn_get_poke_secured); 296 skb->mark = RXRPC_SKB_MARK_SERVICE_CONN_SECURED; 297 rxrpc_get_skb(skb, rxrpc_skb_get_conn_secured); 298 skb_queue_head(&conn->local->rx_queue, skb); 299 rxrpc_wake_up_io_thread(conn->local); 300 } 301 return 0; 302 303 default: 304 WARN_ON_ONCE(1); 305 return -EPROTO; 306 } 307 } 308 309 /* 310 * set up security and issue a challenge 311 */ 312 static void rxrpc_secure_connection(struct rxrpc_connection *conn) 313 { 314 if (conn->security->issue_challenge(conn) < 0) 315 rxrpc_abort_conn(conn, NULL, RX_CALL_DEAD, -ENOMEM, 316 rxrpc_abort_nomem); 317 } 318 319 /* 320 * Process delayed final ACKs that we haven't subsumed into a subsequent call. 321 */ 322 void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn, bool force) 323 { 324 unsigned long j = jiffies, next_j; 325 unsigned int channel; 326 bool set; 327 328 again: 329 next_j = j + LONG_MAX; 330 set = false; 331 for (channel = 0; channel < RXRPC_MAXCALLS; channel++) { 332 struct rxrpc_channel *chan = &conn->channels[channel]; 333 unsigned long ack_at; 334 335 if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags)) 336 continue; 337 338 ack_at = chan->final_ack_at; 339 if (time_before(j, ack_at) && !force) { 340 if (time_before(ack_at, next_j)) { 341 next_j = ack_at; 342 set = true; 343 } 344 continue; 345 } 346 347 if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, 348 &conn->flags)) 349 rxrpc_conn_retransmit_call(conn, NULL, channel); 350 } 351 352 j = jiffies; 353 if (time_before_eq(next_j, j)) 354 goto again; 355 if (set) 356 rxrpc_reduce_conn_timer(conn, next_j); 357 } 358 359 /* 360 * connection-level event processor 361 */ 362 static void rxrpc_do_process_connection(struct rxrpc_connection *conn) 363 { 364 struct sk_buff *skb; 365 int ret; 366 367 if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events)) 368 rxrpc_secure_connection(conn); 369 370 /* go through the conn-level event packets, releasing the ref on this 371 * connection that each one has when we've finished with it */ 372 while ((skb = skb_dequeue(&conn->rx_queue))) { 373 rxrpc_see_skb(skb, rxrpc_skb_see_conn_work); 374 ret = rxrpc_process_event(conn, skb); 375 switch (ret) { 376 case -ENOMEM: 377 case -EAGAIN: 378 skb_queue_head(&conn->rx_queue, skb); 379 rxrpc_queue_conn(conn, rxrpc_conn_queue_retry_work); 380 break; 381 default: 382 rxrpc_free_skb(skb, rxrpc_skb_put_conn_work); 383 break; 384 } 385 } 386 } 387 388 void rxrpc_process_connection(struct work_struct *work) 389 { 390 struct rxrpc_connection *conn = 391 container_of(work, struct rxrpc_connection, processor); 392 393 rxrpc_see_connection(conn, rxrpc_conn_see_work); 394 395 if (__rxrpc_use_local(conn->local, rxrpc_local_use_conn_work)) { 396 rxrpc_do_process_connection(conn); 397 rxrpc_unuse_local(conn->local, rxrpc_local_unuse_conn_work); 398 } 399 } 400 401 /* 402 * post connection-level events to the connection 403 * - this includes challenges, responses, some aborts and call terminal packet 404 * retransmission. 405 */ 406 static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, 407 struct sk_buff *skb) 408 { 409 _enter("%p,%p", conn, skb); 410 411 rxrpc_get_skb(skb, rxrpc_skb_get_conn_work); 412 skb_queue_tail(&conn->rx_queue, skb); 413 rxrpc_queue_conn(conn, rxrpc_conn_queue_rx_work); 414 } 415 416 /* 417 * Post a CHALLENGE packet to the socket of one of a connection's calls so that 418 * it can get application data to include in the packet, possibly querying 419 * userspace. 420 */ 421 static bool rxrpc_post_challenge(struct rxrpc_connection *conn, 422 struct sk_buff *skb) 423 { 424 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 425 struct rxrpc_call *call = NULL; 426 struct rxrpc_sock *rx; 427 bool respond = false; 428 429 sp->chall.conn = 430 rxrpc_get_connection(conn, rxrpc_conn_get_challenge_input); 431 432 if (!conn->security->challenge_to_recvmsg) { 433 rxrpc_post_packet_to_conn(conn, skb); 434 return true; 435 } 436 437 rcu_read_lock(); 438 439 for (int i = 0; i < ARRAY_SIZE(conn->channels); i++) { 440 if (conn->channels[i].call) { 441 call = conn->channels[i].call; 442 rx = rcu_dereference(call->socket); 443 if (!rx) { 444 call = NULL; 445 continue; 446 } 447 448 respond = true; 449 if (test_bit(RXRPC_SOCK_MANAGE_RESPONSE, &rx->flags)) 450 break; 451 call = NULL; 452 } 453 } 454 455 if (!respond) { 456 rcu_read_unlock(); 457 rxrpc_put_connection(conn, rxrpc_conn_put_challenge_input); 458 sp->chall.conn = NULL; 459 return false; 460 } 461 462 if (call) 463 rxrpc_notify_socket_oob(call, skb); 464 rcu_read_unlock(); 465 466 if (!call) 467 rxrpc_post_packet_to_conn(conn, skb); 468 return true; 469 } 470 471 /* 472 * Input a connection-level packet. 473 */ 474 bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb) 475 { 476 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 477 478 switch (sp->hdr.type) { 479 case RXRPC_PACKET_TYPE_BUSY: 480 /* Just ignore BUSY packets for now. */ 481 return true; 482 483 case RXRPC_PACKET_TYPE_ABORT: 484 if (rxrpc_is_conn_aborted(conn)) 485 return true; 486 rxrpc_input_conn_abort(conn, skb); 487 rxrpc_abort_calls(conn); 488 return true; 489 490 case RXRPC_PACKET_TYPE_CHALLENGE: 491 rxrpc_see_skb(skb, rxrpc_skb_see_oob_challenge); 492 if (rxrpc_is_conn_aborted(conn)) { 493 if (conn->completion == RXRPC_CALL_LOCALLY_ABORTED) 494 rxrpc_send_conn_abort(conn); 495 return true; 496 } 497 if (!conn->security->validate_challenge(conn, skb)) 498 return false; 499 return rxrpc_post_challenge(conn, skb); 500 501 case RXRPC_PACKET_TYPE_RESPONSE: 502 if (rxrpc_is_conn_aborted(conn)) { 503 if (conn->completion == RXRPC_CALL_LOCALLY_ABORTED) 504 rxrpc_send_conn_abort(conn); 505 return true; 506 } 507 rxrpc_post_packet_to_conn(conn, skb); 508 return true; 509 510 default: 511 WARN_ON_ONCE(1); 512 return true; 513 } 514 } 515 516 /* 517 * Input a connection event. 518 */ 519 void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb) 520 { 521 unsigned int loop; 522 523 if (test_and_clear_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events)) 524 rxrpc_abort_calls(conn); 525 526 if (conn->tx_response) { 527 struct sk_buff *skb; 528 529 spin_lock_irq(&conn->local->lock); 530 skb = conn->tx_response; 531 conn->tx_response = NULL; 532 spin_unlock_irq(&conn->local->lock); 533 534 if (conn->state != RXRPC_CONN_ABORTED) 535 rxrpc_send_response(conn, skb); 536 rxrpc_free_skb(skb, rxrpc_skb_put_response); 537 } 538 539 if (skb) { 540 switch (skb->mark) { 541 case RXRPC_SKB_MARK_SERVICE_CONN_SECURED: 542 if (conn->state != RXRPC_CONN_SERVICE) 543 break; 544 545 for (loop = 0; loop < RXRPC_MAXCALLS; loop++) 546 rxrpc_call_is_secure(conn->channels[loop].call); 547 break; 548 } 549 } 550 551 /* Process delayed ACKs whose time has come. */ 552 if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) 553 rxrpc_process_delayed_final_acks(conn, false); 554 } 555 556 /* 557 * Post a RESPONSE message to the I/O thread for transmission. 558 */ 559 void rxrpc_post_response(struct rxrpc_connection *conn, struct sk_buff *skb) 560 { 561 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 562 struct rxrpc_local *local = conn->local; 563 struct sk_buff *old; 564 565 _enter("%x", sp->resp.challenge_serial); 566 567 spin_lock_irq(&local->lock); 568 old = conn->tx_response; 569 if (old) { 570 struct rxrpc_skb_priv *osp = rxrpc_skb(old); 571 572 /* Always go with the response to the most recent challenge. */ 573 if (after(sp->resp.challenge_serial, osp->resp.challenge_serial)) 574 conn->tx_response = skb; 575 else 576 old = skb; 577 } else { 578 conn->tx_response = skb; 579 } 580 spin_unlock_irq(&local->lock); 581 rxrpc_poke_conn(conn, rxrpc_conn_get_poke_response); 582 rxrpc_free_skb(old, rxrpc_skb_put_old_response); 583 } 584