1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* AF_RXRPC internal definitions 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/atomic.h> 9 #include <linux/seqlock.h> 10 #include <linux/win_minmax.h> 11 #include <net/net_namespace.h> 12 #include <net/netns/generic.h> 13 #include <net/sock.h> 14 #include <net/af_rxrpc.h> 15 #include <keys/rxrpc-type.h> 16 #include "protocol.h" 17 18 #define FCRYPT_BSIZE 8 19 struct rxrpc_crypt { 20 union { 21 u8 x[FCRYPT_BSIZE]; 22 __be32 n[2]; 23 }; 24 } __attribute__((aligned(8))); 25 26 #define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS)) 27 #define rxrpc_queue_delayed_work(WS,D) \ 28 queue_delayed_work(rxrpc_workqueue, (WS), (D)) 29 30 struct key_preparsed_payload; 31 struct rxrpc_connection; 32 struct rxrpc_txbuf; 33 34 /* 35 * Mark applied to socket buffers in skb->mark. skb->priority is used 36 * to pass supplementary information. 37 */ 38 enum rxrpc_skb_mark { 39 RXRPC_SKB_MARK_PACKET, /* Received packet */ 40 RXRPC_SKB_MARK_ERROR, /* Error notification */ 41 RXRPC_SKB_MARK_SERVICE_CONN_SECURED, /* Service connection response has been verified */ 42 RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */ 43 RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */ 44 }; 45 46 /* 47 * sk_state for RxRPC sockets 48 */ 49 enum { 50 RXRPC_UNBOUND = 0, 51 RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */ 52 RXRPC_CLIENT_BOUND, /* client local address bound */ 53 RXRPC_SERVER_BOUND, /* server local address bound */ 54 RXRPC_SERVER_BOUND2, /* second server local address bound */ 55 RXRPC_SERVER_LISTENING, /* server listening for connections */ 56 RXRPC_SERVER_LISTEN_DISABLED, /* server listening disabled */ 57 RXRPC_CLOSE, /* socket is being closed */ 58 }; 59 60 /* 61 * Per-network namespace data. 62 */ 63 struct rxrpc_net { 64 struct proc_dir_entry *proc_net; /* Subdir in /proc/net */ 65 u32 epoch; /* Local epoch for detecting local-end reset */ 66 struct list_head calls; /* List of calls active in this namespace */ 67 spinlock_t call_lock; /* Lock for ->calls */ 68 atomic_t nr_calls; /* Count of allocated calls */ 69 70 atomic_t nr_conns; 71 struct list_head bundle_proc_list; /* List of bundles for proc */ 72 struct list_head conn_proc_list; /* List of conns in this namespace for proc */ 73 struct list_head service_conns; /* Service conns in this namespace */ 74 rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */ 75 struct work_struct service_conn_reaper; 76 struct timer_list service_conn_reap_timer; 77 78 bool live; 79 80 atomic_t nr_client_conns; 81 82 struct hlist_head local_endpoints; 83 struct mutex local_mutex; /* Lock for ->local_endpoints */ 84 85 DECLARE_HASHTABLE (peer_hash, 10); 86 spinlock_t peer_hash_lock; /* Lock for ->peer_hash */ 87 88 #define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */ 89 u8 peer_keepalive_cursor; 90 time64_t peer_keepalive_base; 91 struct list_head peer_keepalive[32]; 92 struct list_head peer_keepalive_new; 93 struct timer_list peer_keepalive_timer; 94 struct work_struct peer_keepalive_work; 95 96 atomic_t stat_tx_data; 97 atomic_t stat_tx_data_retrans; 98 atomic_t stat_tx_data_send; 99 atomic_t stat_tx_data_send_frag; 100 atomic_t stat_tx_data_send_fail; 101 atomic_t stat_tx_data_underflow; 102 atomic_t stat_tx_data_cwnd_reset; 103 atomic_t stat_rx_data; 104 atomic_t stat_rx_data_reqack; 105 atomic_t stat_rx_data_jumbo; 106 107 atomic_t stat_tx_ack_fill; 108 atomic_t stat_tx_ack_send; 109 atomic_t stat_tx_ack_skip; 110 atomic_t stat_tx_acks[256]; 111 atomic_t stat_rx_acks[256]; 112 113 atomic_t stat_why_req_ack[8]; 114 115 atomic_t stat_io_loop; 116 }; 117 118 /* 119 * Service backlog preallocation. 120 * 121 * This contains circular buffers of preallocated peers, connections and calls 122 * for incoming service calls and their head and tail pointers. This allows 123 * calls to be set up in the data_ready handler, thereby avoiding the need to 124 * shuffle packets around so much. 125 */ 126 struct rxrpc_backlog { 127 unsigned short peer_backlog_head; 128 unsigned short peer_backlog_tail; 129 unsigned short conn_backlog_head; 130 unsigned short conn_backlog_tail; 131 unsigned short call_backlog_head; 132 unsigned short call_backlog_tail; 133 #define RXRPC_BACKLOG_MAX 32 134 struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX]; 135 struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX]; 136 struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX]; 137 }; 138 139 /* 140 * RxRPC socket definition 141 */ 142 struct rxrpc_sock { 143 /* WARNING: sk has to be the first member */ 144 struct sock sk; 145 rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */ 146 rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */ 147 struct rxrpc_local *local; /* local endpoint */ 148 struct rxrpc_backlog *backlog; /* Preallocation for services */ 149 spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */ 150 struct list_head sock_calls; /* List of calls owned by this socket */ 151 struct list_head to_be_accepted; /* calls awaiting acceptance */ 152 struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */ 153 spinlock_t recvmsg_lock; /* Lock for recvmsg_q */ 154 struct key *key; /* security for this socket */ 155 struct key *securities; /* list of server security descriptors */ 156 struct rb_root calls; /* User ID -> call mapping */ 157 unsigned long flags; 158 #define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */ 159 rwlock_t call_lock; /* lock for calls */ 160 u32 min_sec_level; /* minimum security level */ 161 #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT 162 bool exclusive; /* Exclusive connection for a client socket */ 163 u16 second_service; /* Additional service bound to the endpoint */ 164 struct { 165 /* Service upgrade information */ 166 u16 from; /* Service ID to upgrade (if not 0) */ 167 u16 to; /* service ID to upgrade to */ 168 } service_upgrade; 169 sa_family_t family; /* Protocol family created with */ 170 struct sockaddr_rxrpc srx; /* Primary Service/local addresses */ 171 struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */ 172 }; 173 174 #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk) 175 176 /* 177 * CPU-byteorder normalised Rx packet header. 178 */ 179 struct rxrpc_host_header { 180 u32 epoch; /* client boot timestamp */ 181 u32 cid; /* connection and channel ID */ 182 u32 callNumber; /* call ID (0 for connection-level packets) */ 183 u32 seq; /* sequence number of pkt in call stream */ 184 u32 serial; /* serial number of pkt sent to network */ 185 u8 type; /* packet type */ 186 u8 flags; /* packet flags */ 187 u8 userStatus; /* app-layer defined status */ 188 u8 securityIndex; /* security protocol ID */ 189 union { 190 u16 _rsvd; /* reserved */ 191 u16 cksum; /* kerberos security checksum */ 192 }; 193 u16 serviceId; /* service ID */ 194 } __packed; 195 196 /* 197 * RxRPC socket buffer private variables 198 * - max 48 bytes (struct sk_buff::cb) 199 */ 200 struct rxrpc_skb_priv { 201 struct rxrpc_connection *conn; /* Connection referred to (poke packet) */ 202 union { 203 struct { 204 u16 offset; /* Offset of data */ 205 u16 len; /* Length of data */ 206 u8 flags; 207 #define RXRPC_RX_VERIFIED 0x01 208 }; 209 struct { 210 rxrpc_seq_t first_ack; /* First packet in acks table */ 211 u8 nr_acks; /* Number of acks+nacks */ 212 u8 nr_nacks; /* Number of nacks */ 213 }; 214 }; 215 struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */ 216 }; 217 218 #define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb) 219 220 /* 221 * RxRPC security module interface 222 */ 223 struct rxrpc_security { 224 const char *name; /* name of this service */ 225 u8 security_index; /* security type provided */ 226 u32 no_key_abort; /* Abort code indicating no key */ 227 228 /* Initialise a security service */ 229 int (*init)(void); 230 231 /* Clean up a security service */ 232 void (*exit)(void); 233 234 /* Parse the information from a server key */ 235 int (*preparse_server_key)(struct key_preparsed_payload *); 236 237 /* Clean up the preparse buffer after parsing a server key */ 238 void (*free_preparse_server_key)(struct key_preparsed_payload *); 239 240 /* Destroy the payload of a server key */ 241 void (*destroy_server_key)(struct key *); 242 243 /* Describe a server key */ 244 void (*describe_server_key)(const struct key *, struct seq_file *); 245 246 /* initialise a connection's security */ 247 int (*init_connection_security)(struct rxrpc_connection *, 248 struct rxrpc_key_token *); 249 250 /* Work out how much data we can store in a packet, given an estimate 251 * of the amount of data remaining. 252 */ 253 int (*how_much_data)(struct rxrpc_call *, size_t, 254 size_t *, size_t *, size_t *); 255 256 /* impose security on a packet */ 257 int (*secure_packet)(struct rxrpc_call *, struct rxrpc_txbuf *); 258 259 /* verify the security on a received packet */ 260 int (*verify_packet)(struct rxrpc_call *, struct sk_buff *); 261 262 /* Free crypto request on a call */ 263 void (*free_call_crypto)(struct rxrpc_call *); 264 265 /* issue a challenge */ 266 int (*issue_challenge)(struct rxrpc_connection *); 267 268 /* respond to a challenge */ 269 int (*respond_to_challenge)(struct rxrpc_connection *, 270 struct sk_buff *); 271 272 /* verify a response */ 273 int (*verify_response)(struct rxrpc_connection *, 274 struct sk_buff *); 275 276 /* clear connection security */ 277 void (*clear)(struct rxrpc_connection *); 278 }; 279 280 /* 281 * RxRPC local transport endpoint description 282 * - owned by a single AF_RXRPC socket 283 * - pointed to by transport socket struct sk_user_data 284 */ 285 struct rxrpc_local { 286 struct rcu_head rcu; 287 atomic_t active_users; /* Number of users of the local endpoint */ 288 refcount_t ref; /* Number of references to the structure */ 289 struct net *net; /* The network namespace */ 290 struct rxrpc_net *rxnet; /* Our bits in the network namespace */ 291 struct hlist_node link; 292 struct socket *socket; /* my UDP socket */ 293 struct task_struct *io_thread; 294 struct completion io_thread_ready; /* Indication that the I/O thread started */ 295 struct rxrpc_sock *service; /* Service(s) listening on this endpoint */ 296 #ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY 297 struct sk_buff_head rx_delay_queue; /* Delay injection queue */ 298 #endif 299 struct sk_buff_head rx_queue; /* Received packets */ 300 struct list_head conn_attend_q; /* Conns requiring immediate attention */ 301 struct list_head call_attend_q; /* Calls requiring immediate attention */ 302 303 struct rb_root client_bundles; /* Client connection bundles by socket params */ 304 spinlock_t client_bundles_lock; /* Lock for client_bundles */ 305 bool kill_all_client_conns; 306 struct list_head idle_client_conns; 307 struct timer_list client_conn_reap_timer; 308 unsigned long client_conn_flags; 309 #define RXRPC_CLIENT_CONN_REAP_TIMER 0 /* The client conn reap timer expired */ 310 311 spinlock_t lock; /* access lock */ 312 rwlock_t services_lock; /* lock for services list */ 313 int debug_id; /* debug ID for printks */ 314 bool dead; 315 bool service_closed; /* Service socket closed */ 316 struct idr conn_ids; /* List of connection IDs */ 317 struct list_head new_client_calls; /* Newly created client calls need connection */ 318 spinlock_t client_call_lock; /* Lock for ->new_client_calls */ 319 struct sockaddr_rxrpc srx; /* local address */ 320 }; 321 322 /* 323 * RxRPC remote transport endpoint definition 324 * - matched by local endpoint, remote port, address and protocol type 325 */ 326 struct rxrpc_peer { 327 struct rcu_head rcu; /* This must be first */ 328 refcount_t ref; 329 unsigned long hash_key; 330 struct hlist_node hash_link; 331 struct rxrpc_local *local; 332 struct hlist_head error_targets; /* targets for net error distribution */ 333 struct rb_root service_conns; /* Service connections */ 334 struct list_head keepalive_link; /* Link in net->peer_keepalive[] */ 335 time64_t last_tx_at; /* Last time packet sent here */ 336 seqlock_t service_conn_lock; 337 spinlock_t lock; /* access lock */ 338 unsigned int if_mtu; /* interface MTU for this peer */ 339 unsigned int mtu; /* network MTU for this peer */ 340 unsigned int maxdata; /* data size (MTU - hdrsize) */ 341 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */ 342 int debug_id; /* debug ID for printks */ 343 struct sockaddr_rxrpc srx; /* remote address */ 344 345 /* calculated RTT cache */ 346 #define RXRPC_RTT_CACHE_SIZE 32 347 spinlock_t rtt_input_lock; /* RTT lock for input routine */ 348 ktime_t rtt_last_req; /* Time of last RTT request */ 349 unsigned int rtt_count; /* Number of samples we've got */ 350 351 u32 srtt_us; /* smoothed round trip time << 3 in usecs */ 352 u32 mdev_us; /* medium deviation */ 353 u32 mdev_max_us; /* maximal mdev for the last rtt period */ 354 u32 rttvar_us; /* smoothed mdev_max */ 355 u32 rto_j; /* Retransmission timeout in jiffies */ 356 u8 backoff; /* Backoff timeout */ 357 358 u8 cong_ssthresh; /* Congestion slow-start threshold */ 359 }; 360 361 /* 362 * Keys for matching a connection. 363 */ 364 struct rxrpc_conn_proto { 365 union { 366 struct { 367 u32 epoch; /* epoch of this connection */ 368 u32 cid; /* connection ID */ 369 }; 370 u64 index_key; 371 }; 372 }; 373 374 struct rxrpc_conn_parameters { 375 struct rxrpc_local *local; /* Representation of local endpoint */ 376 struct rxrpc_peer *peer; /* Representation of remote endpoint */ 377 struct key *key; /* Security details */ 378 bool exclusive; /* T if conn is exclusive */ 379 bool upgrade; /* T if service ID can be upgraded */ 380 u16 service_id; /* Service ID for this connection */ 381 u32 security_level; /* Security level selected */ 382 }; 383 384 /* 385 * Call completion condition (state == RXRPC_CALL_COMPLETE). 386 */ 387 enum rxrpc_call_completion { 388 RXRPC_CALL_SUCCEEDED, /* - Normal termination */ 389 RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */ 390 RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */ 391 RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */ 392 RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */ 393 NR__RXRPC_CALL_COMPLETIONS 394 }; 395 396 /* 397 * Bits in the connection flags. 398 */ 399 enum rxrpc_conn_flag { 400 RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */ 401 RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */ 402 RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */ 403 RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */ 404 RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */ 405 RXRPC_CONN_FINAL_ACK_2, /* Need final ACK for channel 2 */ 406 RXRPC_CONN_FINAL_ACK_3, /* Need final ACK for channel 3 */ 407 }; 408 409 #define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \ 410 (1UL << RXRPC_CONN_FINAL_ACK_1) | \ 411 (1UL << RXRPC_CONN_FINAL_ACK_2) | \ 412 (1UL << RXRPC_CONN_FINAL_ACK_3)) 413 414 /* 415 * Events that can be raised upon a connection. 416 */ 417 enum rxrpc_conn_event { 418 RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */ 419 RXRPC_CONN_EV_ABORT_CALLS, /* Abort attached calls */ 420 }; 421 422 /* 423 * The connection protocol state. 424 */ 425 enum rxrpc_conn_proto_state { 426 RXRPC_CONN_UNUSED, /* Connection not yet attempted */ 427 RXRPC_CONN_CLIENT_UNSECURED, /* Client connection needs security init */ 428 RXRPC_CONN_CLIENT, /* Client connection */ 429 RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */ 430 RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */ 431 RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */ 432 RXRPC_CONN_SERVICE, /* Service secured connection */ 433 RXRPC_CONN_ABORTED, /* Conn aborted */ 434 RXRPC_CONN__NR_STATES 435 }; 436 437 /* 438 * RxRPC client connection bundle. 439 */ 440 struct rxrpc_bundle { 441 struct rxrpc_local *local; /* Representation of local endpoint */ 442 struct rxrpc_peer *peer; /* Remote endpoint */ 443 struct key *key; /* Security details */ 444 struct list_head proc_link; /* Link in net->bundle_proc_list */ 445 const struct rxrpc_security *security; /* applied security module */ 446 refcount_t ref; 447 atomic_t active; /* Number of active users */ 448 unsigned int debug_id; 449 u32 security_level; /* Security level selected */ 450 u16 service_id; /* Service ID for this connection */ 451 bool try_upgrade; /* True if the bundle is attempting upgrade */ 452 bool exclusive; /* T if conn is exclusive */ 453 bool upgrade; /* T if service ID can be upgraded */ 454 unsigned short alloc_error; /* Error from last conn allocation */ 455 struct rb_node local_node; /* Node in local->client_conns */ 456 struct list_head waiting_calls; /* Calls waiting for channels */ 457 unsigned long avail_chans; /* Mask of available channels */ 458 unsigned int conn_ids[4]; /* Connection IDs. */ 459 struct rxrpc_connection *conns[4]; /* The connections in the bundle (max 4) */ 460 }; 461 462 /* 463 * RxRPC connection definition 464 * - matched by { local, peer, epoch, conn_id, direction } 465 * - each connection can only handle four simultaneous calls 466 */ 467 struct rxrpc_connection { 468 struct rxrpc_conn_proto proto; 469 struct rxrpc_local *local; /* Representation of local endpoint */ 470 struct rxrpc_peer *peer; /* Remote endpoint */ 471 struct rxrpc_net *rxnet; /* Network namespace to which call belongs */ 472 struct key *key; /* Security details */ 473 struct list_head attend_link; /* Link in local->conn_attend_q */ 474 475 refcount_t ref; 476 atomic_t active; /* Active count for service conns */ 477 struct rcu_head rcu; 478 struct list_head cache_link; 479 480 unsigned char act_chans; /* Mask of active channels */ 481 struct rxrpc_channel { 482 unsigned long final_ack_at; /* Time at which to issue final ACK */ 483 struct rxrpc_call *call; /* Active call */ 484 unsigned int call_debug_id; /* call->debug_id */ 485 u32 call_id; /* ID of current call */ 486 u32 call_counter; /* Call ID counter */ 487 u32 last_call; /* ID of last call */ 488 u8 last_type; /* Type of last packet */ 489 union { 490 u32 last_seq; 491 u32 last_abort; 492 }; 493 } channels[RXRPC_MAXCALLS]; 494 495 struct timer_list timer; /* Conn event timer */ 496 struct work_struct processor; /* connection event processor */ 497 struct work_struct destructor; /* In-process-context destroyer */ 498 struct rxrpc_bundle *bundle; /* Client connection bundle */ 499 struct rb_node service_node; /* Node in peer->service_conns */ 500 struct list_head proc_link; /* link in procfs list */ 501 struct list_head link; /* link in master connection list */ 502 struct sk_buff_head rx_queue; /* received conn-level packets */ 503 504 struct mutex security_lock; /* Lock for security management */ 505 const struct rxrpc_security *security; /* applied security module */ 506 union { 507 struct { 508 struct crypto_sync_skcipher *cipher; /* encryption handle */ 509 struct rxrpc_crypt csum_iv; /* packet checksum base */ 510 u32 nonce; /* response re-use preventer */ 511 } rxkad; 512 }; 513 unsigned long flags; 514 unsigned long events; 515 unsigned long idle_timestamp; /* Time at which last became idle */ 516 spinlock_t state_lock; /* state-change lock */ 517 enum rxrpc_conn_proto_state state; /* current state of connection */ 518 enum rxrpc_call_completion completion; /* Completion condition */ 519 s32 abort_code; /* Abort code of connection abort */ 520 int debug_id; /* debug ID for printks */ 521 rxrpc_serial_t tx_serial; /* Outgoing packet serial number counter */ 522 unsigned int hi_serial; /* highest serial number received */ 523 u32 service_id; /* Service ID, possibly upgraded */ 524 u32 security_level; /* Security level selected */ 525 u8 security_ix; /* security type */ 526 u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ 527 u8 bundle_shift; /* Index into bundle->avail_chans */ 528 bool exclusive; /* T if conn is exclusive */ 529 bool upgrade; /* T if service ID can be upgraded */ 530 u16 orig_service_id; /* Originally requested service ID */ 531 short error; /* Local error code */ 532 }; 533 534 static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp) 535 { 536 return sp->hdr.flags & RXRPC_CLIENT_INITIATED; 537 } 538 539 static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp) 540 { 541 return !rxrpc_to_server(sp); 542 } 543 544 /* 545 * Flags in call->flags. 546 */ 547 enum rxrpc_call_flag { 548 RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */ 549 RXRPC_CALL_HAS_USERID, /* has a user ID attached */ 550 RXRPC_CALL_IS_SERVICE, /* Call is service call */ 551 RXRPC_CALL_EXPOSED, /* The call was exposed to the world */ 552 RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ 553 RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ 554 RXRPC_CALL_TX_ALL_ACKED, /* Last packet has been hard-acked */ 555 RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ 556 RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ 557 RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */ 558 RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */ 559 RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */ 560 RXRPC_CALL_KERNEL, /* The call was made by the kernel */ 561 RXRPC_CALL_UPGRADE, /* Service upgrade was requested for the call */ 562 RXRPC_CALL_EXCLUSIVE, /* The call uses a once-only connection */ 563 RXRPC_CALL_RX_IS_IDLE, /* recvmsg() is idle - send an ACK */ 564 RXRPC_CALL_RECVMSG_READ_ALL, /* recvmsg() read all of the received data */ 565 }; 566 567 /* 568 * Events that can be raised on a call. 569 */ 570 enum rxrpc_call_event { 571 RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */ 572 RXRPC_CALL_EV_INITIAL_PING, /* Send initial ping for a new service call */ 573 }; 574 575 /* 576 * The states that a call can be in. 577 */ 578 enum rxrpc_call_state { 579 RXRPC_CALL_UNINITIALISED, 580 RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */ 581 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */ 582 RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */ 583 RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */ 584 RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */ 585 RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */ 586 RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */ 587 RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */ 588 RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */ 589 RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */ 590 RXRPC_CALL_COMPLETE, /* - call complete */ 591 NR__RXRPC_CALL_STATES 592 }; 593 594 /* 595 * Call Tx congestion management modes. 596 */ 597 enum rxrpc_congest_mode { 598 RXRPC_CALL_SLOW_START, 599 RXRPC_CALL_CONGEST_AVOIDANCE, 600 RXRPC_CALL_PACKET_LOSS, 601 RXRPC_CALL_FAST_RETRANSMIT, 602 NR__RXRPC_CONGEST_MODES 603 }; 604 605 /* 606 * RxRPC call definition 607 * - matched by { connection, call_id } 608 */ 609 struct rxrpc_call { 610 struct rcu_head rcu; 611 struct rxrpc_connection *conn; /* connection carrying call */ 612 struct rxrpc_bundle *bundle; /* Connection bundle to use */ 613 struct rxrpc_peer *peer; /* Peer record for remote address */ 614 struct rxrpc_local *local; /* Representation of local endpoint */ 615 struct rxrpc_sock __rcu *socket; /* socket responsible */ 616 struct rxrpc_net *rxnet; /* Network namespace to which call belongs */ 617 struct key *key; /* Security details */ 618 const struct rxrpc_security *security; /* applied security module */ 619 struct mutex user_mutex; /* User access mutex */ 620 struct sockaddr_rxrpc dest_srx; /* Destination address */ 621 unsigned long delay_ack_at; /* When DELAY ACK needs to happen */ 622 unsigned long ack_lost_at; /* When ACK is figured as lost */ 623 unsigned long resend_at; /* When next resend needs to happen */ 624 unsigned long ping_at; /* When next to send a ping */ 625 unsigned long keepalive_at; /* When next to send a keepalive ping */ 626 unsigned long expect_rx_by; /* When we expect to get a packet by */ 627 unsigned long expect_req_by; /* When we expect to get a request DATA packet by */ 628 unsigned long expect_term_by; /* When we expect call termination by */ 629 u32 next_rx_timo; /* Timeout for next Rx packet (jif) */ 630 u32 next_req_timo; /* Timeout for next Rx request packet (jif) */ 631 u32 hard_timo; /* Maximum lifetime or 0 (jif) */ 632 struct timer_list timer; /* Combined event timer */ 633 struct work_struct destroyer; /* In-process-context destroyer */ 634 rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */ 635 struct list_head link; /* link in master call list */ 636 struct list_head wait_link; /* Link in local->new_client_calls */ 637 struct hlist_node error_link; /* link in error distribution list */ 638 struct list_head accept_link; /* Link in rx->acceptq */ 639 struct list_head recvmsg_link; /* Link in rx->recvmsg_q */ 640 struct list_head sock_link; /* Link in rx->sock_calls */ 641 struct rb_node sock_node; /* Node in rx->calls */ 642 struct list_head attend_link; /* Link in local->call_attend_q */ 643 struct rxrpc_txbuf *tx_pending; /* Tx buffer being filled */ 644 wait_queue_head_t waitq; /* Wait queue for channel or Tx */ 645 s64 tx_total_len; /* Total length left to be transmitted (or -1) */ 646 unsigned long user_call_ID; /* user-defined call ID */ 647 unsigned long flags; 648 unsigned long events; 649 spinlock_t notify_lock; /* Kernel notification lock */ 650 unsigned int send_abort_why; /* Why the abort [enum rxrpc_abort_reason] */ 651 s32 send_abort; /* Abort code to be sent */ 652 short send_abort_err; /* Error to be associated with the abort */ 653 rxrpc_seq_t send_abort_seq; /* DATA packet that incurred the abort (or 0) */ 654 s32 abort_code; /* Local/remote abort code */ 655 int error; /* Local error incurred */ 656 enum rxrpc_call_state _state; /* Current state of call (needs barrier) */ 657 enum rxrpc_call_completion completion; /* Call completion condition */ 658 refcount_t ref; 659 u8 security_ix; /* Security type */ 660 enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */ 661 u32 call_id; /* call ID on connection */ 662 u32 cid; /* connection ID plus channel index */ 663 u32 security_level; /* Security level selected */ 664 int debug_id; /* debug ID for printks */ 665 unsigned short rx_pkt_offset; /* Current recvmsg packet offset */ 666 unsigned short rx_pkt_len; /* Current recvmsg packet len */ 667 668 /* Transmitted data tracking. */ 669 spinlock_t tx_lock; /* Transmit queue lock */ 670 struct list_head tx_sendmsg; /* Sendmsg prepared packets */ 671 struct list_head tx_buffer; /* Buffer of transmissible packets */ 672 rxrpc_seq_t tx_bottom; /* First packet in buffer */ 673 rxrpc_seq_t tx_transmitted; /* Highest packet transmitted */ 674 rxrpc_seq_t tx_prepared; /* Highest Tx slot prepared. */ 675 rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */ 676 u16 tx_backoff; /* Delay to insert due to Tx failure */ 677 u8 tx_winsize; /* Maximum size of Tx window */ 678 #define RXRPC_TX_MAX_WINDOW 128 679 ktime_t tx_last_sent; /* Last time a transmission occurred */ 680 681 /* Received data tracking */ 682 struct sk_buff_head recvmsg_queue; /* Queue of packets ready for recvmsg() */ 683 struct sk_buff_head rx_oos_queue; /* Queue of out of sequence packets */ 684 685 rxrpc_seq_t rx_highest_seq; /* Higest sequence number received */ 686 rxrpc_seq_t rx_consumed; /* Highest packet consumed */ 687 rxrpc_serial_t rx_serial; /* Highest serial received for this call */ 688 u8 rx_winsize; /* Size of Rx window */ 689 690 /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS 691 * is fixed, we keep these numbers in terms of segments (ie. DATA 692 * packets) rather than bytes. 693 */ 694 #define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN 695 #define RXRPC_MIN_CWND (RXRPC_TX_SMSS > 2190 ? 2 : RXRPC_TX_SMSS > 1095 ? 3 : 4) 696 u8 cong_cwnd; /* Congestion window size */ 697 u8 cong_extra; /* Extra to send for congestion management */ 698 u8 cong_ssthresh; /* Slow-start threshold */ 699 enum rxrpc_congest_mode cong_mode:8; /* Congestion management mode */ 700 u8 cong_dup_acks; /* Count of ACKs showing missing packets */ 701 u8 cong_cumul_acks; /* Cumulative ACK count */ 702 ktime_t cong_tstamp; /* Last time cwnd was changed */ 703 struct sk_buff *cong_last_nack; /* Last ACK with nacks received */ 704 705 /* Receive-phase ACK management (ACKs we send). */ 706 u8 ackr_reason; /* reason to ACK */ 707 u16 ackr_sack_base; /* Starting slot in SACK table ring */ 708 rxrpc_seq_t ackr_window; /* Base of SACK window */ 709 rxrpc_seq_t ackr_wtop; /* Base of SACK window */ 710 unsigned int ackr_nr_unacked; /* Number of unacked packets */ 711 atomic_t ackr_nr_consumed; /* Number of packets needing hard ACK */ 712 struct { 713 #define RXRPC_SACK_SIZE 256 714 /* SACK table for soft-acked packets */ 715 u8 ackr_sack_table[RXRPC_SACK_SIZE]; 716 } __aligned(8); 717 718 /* RTT management */ 719 rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */ 720 ktime_t rtt_sent_at[4]; /* Time packet sent */ 721 unsigned long rtt_avail; /* Mask of available slots in bits 0-3, 722 * Mask of pending samples in 8-11 */ 723 #define RXRPC_CALL_RTT_AVAIL_MASK 0xf 724 #define RXRPC_CALL_RTT_PEND_SHIFT 8 725 726 /* Transmission-phase ACK management (ACKs we've received). */ 727 ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ 728 rxrpc_seq_t acks_first_seq; /* first sequence number received */ 729 rxrpc_seq_t acks_prev_seq; /* Highest previousPacket received */ 730 rxrpc_seq_t acks_hard_ack; /* Latest hard-ack point */ 731 rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */ 732 rxrpc_serial_t acks_highest_serial; /* Highest serial number ACK'd */ 733 }; 734 735 /* 736 * Summary of a new ACK and the changes it made to the Tx buffer packet states. 737 */ 738 struct rxrpc_ack_summary { 739 u16 nr_acks; /* Number of ACKs in packet */ 740 u16 nr_new_acks; /* Number of new ACKs in packet */ 741 u16 nr_new_nacks; /* Number of new nacks in packet */ 742 u16 nr_retained_nacks; /* Number of nacks retained between ACKs */ 743 u8 ack_reason; 744 bool saw_nacks; /* Saw NACKs in packet */ 745 bool new_low_nack; /* T if new low NACK found */ 746 bool retrans_timeo; /* T if reTx due to timeout happened */ 747 u8 flight_size; /* Number of unreceived transmissions */ 748 /* Place to stash values for tracing */ 749 enum rxrpc_congest_mode mode:8; 750 u8 cwnd; 751 u8 ssthresh; 752 u8 dup_acks; 753 u8 cumulative_acks; 754 }; 755 756 /* 757 * sendmsg() cmsg-specified parameters. 758 */ 759 enum rxrpc_command { 760 RXRPC_CMD_SEND_DATA, /* send data message */ 761 RXRPC_CMD_SEND_ABORT, /* request abort generation */ 762 RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */ 763 RXRPC_CMD_CHARGE_ACCEPT, /* [server] charge accept preallocation */ 764 }; 765 766 struct rxrpc_call_params { 767 s64 tx_total_len; /* Total Tx data length (if send data) */ 768 unsigned long user_call_ID; /* User's call ID */ 769 struct { 770 u32 hard; /* Maximum lifetime (sec) */ 771 u32 idle; /* Max time since last data packet (msec) */ 772 u32 normal; /* Max time since last call packet (msec) */ 773 } timeouts; 774 u8 nr_timeouts; /* Number of timeouts specified */ 775 bool kernel; /* T if kernel is making the call */ 776 enum rxrpc_interruptibility interruptibility; /* How is interruptible is the call? */ 777 }; 778 779 struct rxrpc_send_params { 780 struct rxrpc_call_params call; 781 u32 abort_code; /* Abort code to Tx (if abort) */ 782 enum rxrpc_command command : 8; /* The command to implement */ 783 bool exclusive; /* Shared or exclusive call */ 784 bool upgrade; /* If the connection is upgradeable */ 785 }; 786 787 /* 788 * Buffer of data to be output as a packet. 789 */ 790 struct rxrpc_txbuf { 791 struct rcu_head rcu; 792 struct list_head call_link; /* Link in call->tx_sendmsg/tx_buffer */ 793 struct list_head tx_link; /* Link in live Enc queue or Tx queue */ 794 ktime_t last_sent; /* Time at which last transmitted */ 795 refcount_t ref; 796 rxrpc_seq_t seq; /* Sequence number of this packet */ 797 unsigned int call_debug_id; 798 unsigned int debug_id; 799 unsigned int len; /* Amount of data in buffer */ 800 unsigned int space; /* Remaining data space */ 801 unsigned int offset; /* Offset of fill point */ 802 unsigned long flags; 803 #define RXRPC_TXBUF_LAST 0 /* Set if last packet in Tx phase */ 804 #define RXRPC_TXBUF_RESENT 1 /* Set if has been resent */ 805 u8 /*enum rxrpc_propose_ack_trace*/ ack_why; /* If ack, why */ 806 struct { 807 /* The packet for encrypting and DMA'ing. We align it such 808 * that data[] aligns correctly for any crypto blocksize. 809 */ 810 u8 pad[64 - sizeof(struct rxrpc_wire_header)]; 811 struct rxrpc_wire_header wire; /* Network-ready header */ 812 union { 813 u8 data[RXRPC_JUMBO_DATALEN]; /* Data packet */ 814 struct { 815 struct rxrpc_ackpacket ack; 816 DECLARE_FLEX_ARRAY(u8, acks); 817 }; 818 }; 819 } __aligned(64); 820 }; 821 822 static inline bool rxrpc_sending_to_server(const struct rxrpc_txbuf *txb) 823 { 824 return txb->wire.flags & RXRPC_CLIENT_INITIATED; 825 } 826 827 static inline bool rxrpc_sending_to_client(const struct rxrpc_txbuf *txb) 828 { 829 return !rxrpc_sending_to_server(txb); 830 } 831 832 #include <trace/events/rxrpc.h> 833 834 /* 835 * Allocate the next serial number on a connection. 0 must be skipped. 836 */ 837 static inline rxrpc_serial_t rxrpc_get_next_serial(struct rxrpc_connection *conn) 838 { 839 rxrpc_serial_t serial; 840 841 serial = conn->tx_serial; 842 if (serial == 0) 843 serial = 1; 844 conn->tx_serial = serial + 1; 845 return serial; 846 } 847 848 /* 849 * af_rxrpc.c 850 */ 851 extern atomic_t rxrpc_n_rx_skbs; 852 extern struct workqueue_struct *rxrpc_workqueue; 853 854 /* 855 * call_accept.c 856 */ 857 int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t); 858 void rxrpc_discard_prealloc(struct rxrpc_sock *); 859 bool rxrpc_new_incoming_call(struct rxrpc_local *local, 860 struct rxrpc_peer *peer, 861 struct rxrpc_connection *conn, 862 struct sockaddr_rxrpc *peer_srx, 863 struct sk_buff *skb); 864 void rxrpc_accept_incoming_calls(struct rxrpc_local *); 865 int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long); 866 867 /* 868 * call_event.c 869 */ 870 void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial, 871 enum rxrpc_propose_ack_trace why); 872 void rxrpc_send_ACK(struct rxrpc_call *, u8, rxrpc_serial_t, enum rxrpc_propose_ack_trace); 873 void rxrpc_propose_delay_ACK(struct rxrpc_call *, rxrpc_serial_t, 874 enum rxrpc_propose_ack_trace); 875 void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *); 876 void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb); 877 878 void rxrpc_reduce_call_timer(struct rxrpc_call *call, 879 unsigned long expire_at, 880 unsigned long now, 881 enum rxrpc_timer_trace why); 882 883 bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb); 884 885 /* 886 * call_object.c 887 */ 888 extern const char *const rxrpc_call_states[]; 889 extern const char *const rxrpc_call_completions[]; 890 extern struct kmem_cache *rxrpc_call_jar; 891 892 void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what); 893 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); 894 struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int); 895 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, 896 struct rxrpc_conn_parameters *, 897 struct rxrpc_call_params *, gfp_t, 898 unsigned int); 899 void rxrpc_start_call_timer(struct rxrpc_call *call); 900 void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *, 901 struct sk_buff *); 902 void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *); 903 void rxrpc_release_calls_on_socket(struct rxrpc_sock *); 904 void rxrpc_see_call(struct rxrpc_call *, enum rxrpc_call_trace); 905 struct rxrpc_call *rxrpc_try_get_call(struct rxrpc_call *, enum rxrpc_call_trace); 906 void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace); 907 void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace); 908 void rxrpc_cleanup_call(struct rxrpc_call *); 909 void rxrpc_destroy_all_calls(struct rxrpc_net *); 910 911 static inline bool rxrpc_is_service_call(const struct rxrpc_call *call) 912 { 913 return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags); 914 } 915 916 static inline bool rxrpc_is_client_call(const struct rxrpc_call *call) 917 { 918 return !rxrpc_is_service_call(call); 919 } 920 921 /* 922 * call_state.c 923 */ 924 bool rxrpc_set_call_completion(struct rxrpc_call *call, 925 enum rxrpc_call_completion compl, 926 u32 abort_code, 927 int error); 928 bool rxrpc_call_completed(struct rxrpc_call *call); 929 bool rxrpc_abort_call(struct rxrpc_call *call, rxrpc_seq_t seq, 930 u32 abort_code, int error, enum rxrpc_abort_reason why); 931 void rxrpc_prefail_call(struct rxrpc_call *call, enum rxrpc_call_completion compl, 932 int error); 933 934 static inline void rxrpc_set_call_state(struct rxrpc_call *call, 935 enum rxrpc_call_state state) 936 { 937 /* Order write of completion info before write of ->state. */ 938 smp_store_release(&call->_state, state); 939 wake_up(&call->waitq); 940 } 941 942 static inline enum rxrpc_call_state __rxrpc_call_state(const struct rxrpc_call *call) 943 { 944 return call->_state; /* Only inside I/O thread */ 945 } 946 947 static inline bool __rxrpc_call_is_complete(const struct rxrpc_call *call) 948 { 949 return __rxrpc_call_state(call) == RXRPC_CALL_COMPLETE; 950 } 951 952 static inline enum rxrpc_call_state rxrpc_call_state(const struct rxrpc_call *call) 953 { 954 /* Order read ->state before read of completion info. */ 955 return smp_load_acquire(&call->_state); 956 } 957 958 static inline bool rxrpc_call_is_complete(const struct rxrpc_call *call) 959 { 960 return rxrpc_call_state(call) == RXRPC_CALL_COMPLETE; 961 } 962 963 static inline bool rxrpc_call_has_failed(const struct rxrpc_call *call) 964 { 965 return rxrpc_call_is_complete(call) && call->completion != RXRPC_CALL_SUCCEEDED; 966 } 967 968 /* 969 * conn_client.c 970 */ 971 extern unsigned int rxrpc_reap_client_connections; 972 extern unsigned long rxrpc_conn_idle_client_expiry; 973 extern unsigned long rxrpc_conn_idle_client_fast_expiry; 974 975 void rxrpc_purge_client_connections(struct rxrpc_local *local); 976 struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace); 977 void rxrpc_put_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace); 978 int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp); 979 void rxrpc_connect_client_calls(struct rxrpc_local *local); 980 void rxrpc_expose_client_call(struct rxrpc_call *); 981 void rxrpc_disconnect_client_call(struct rxrpc_bundle *, struct rxrpc_call *); 982 void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle); 983 void rxrpc_put_client_conn(struct rxrpc_connection *, enum rxrpc_conn_trace); 984 void rxrpc_discard_expired_client_conns(struct rxrpc_local *local); 985 void rxrpc_clean_up_local_conns(struct rxrpc_local *); 986 987 /* 988 * conn_event.c 989 */ 990 void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, struct sk_buff *skb, 991 unsigned int channel); 992 int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb, 993 s32 abort_code, int err, enum rxrpc_abort_reason why); 994 void rxrpc_process_connection(struct work_struct *); 995 void rxrpc_process_delayed_final_acks(struct rxrpc_connection *, bool); 996 bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb); 997 void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb); 998 999 static inline bool rxrpc_is_conn_aborted(const struct rxrpc_connection *conn) 1000 { 1001 /* Order reading the abort info after the state check. */ 1002 return smp_load_acquire(&conn->state) == RXRPC_CONN_ABORTED; 1003 } 1004 1005 /* 1006 * conn_object.c 1007 */ 1008 extern unsigned int rxrpc_connection_expiry; 1009 extern unsigned int rxrpc_closed_conn_expiry; 1010 1011 void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why); 1012 struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *, gfp_t); 1013 struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *, 1014 struct sockaddr_rxrpc *, 1015 struct sk_buff *); 1016 void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *); 1017 void rxrpc_disconnect_call(struct rxrpc_call *); 1018 void rxrpc_kill_client_conn(struct rxrpc_connection *); 1019 void rxrpc_queue_conn(struct rxrpc_connection *, enum rxrpc_conn_trace); 1020 void rxrpc_see_connection(struct rxrpc_connection *, enum rxrpc_conn_trace); 1021 struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *, 1022 enum rxrpc_conn_trace); 1023 struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *, 1024 enum rxrpc_conn_trace); 1025 void rxrpc_put_connection(struct rxrpc_connection *, enum rxrpc_conn_trace); 1026 void rxrpc_service_connection_reaper(struct work_struct *); 1027 void rxrpc_destroy_all_connections(struct rxrpc_net *); 1028 1029 static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn) 1030 { 1031 return conn->out_clientflag; 1032 } 1033 1034 static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn) 1035 { 1036 return !rxrpc_conn_is_client(conn); 1037 } 1038 1039 static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn, 1040 unsigned long expire_at) 1041 { 1042 timer_reduce(&conn->timer, expire_at); 1043 } 1044 1045 /* 1046 * conn_service.c 1047 */ 1048 struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *, 1049 struct sk_buff *); 1050 struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t); 1051 void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *, 1052 const struct rxrpc_security *, struct sk_buff *); 1053 void rxrpc_unpublish_service_conn(struct rxrpc_connection *); 1054 1055 /* 1056 * input.c 1057 */ 1058 void rxrpc_congestion_degrade(struct rxrpc_call *); 1059 void rxrpc_input_call_packet(struct rxrpc_call *, struct sk_buff *); 1060 void rxrpc_implicit_end_call(struct rxrpc_call *, struct sk_buff *); 1061 1062 /* 1063 * io_thread.c 1064 */ 1065 int rxrpc_encap_rcv(struct sock *, struct sk_buff *); 1066 void rxrpc_error_report(struct sock *); 1067 bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why, 1068 s32 abort_code, int err); 1069 int rxrpc_io_thread(void *data); 1070 static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local) 1071 { 1072 wake_up_process(local->io_thread); 1073 } 1074 1075 static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why) 1076 { 1077 return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EPROTO); 1078 } 1079 1080 /* 1081 * insecure.c 1082 */ 1083 extern const struct rxrpc_security rxrpc_no_security; 1084 1085 /* 1086 * key.c 1087 */ 1088 extern struct key_type key_type_rxrpc; 1089 1090 int rxrpc_request_key(struct rxrpc_sock *, sockptr_t , int); 1091 int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t, 1092 u32); 1093 1094 /* 1095 * local_event.c 1096 */ 1097 void rxrpc_gen_version_string(void); 1098 void rxrpc_send_version_request(struct rxrpc_local *local, 1099 struct rxrpc_host_header *hdr, 1100 struct sk_buff *skb); 1101 1102 /* 1103 * local_object.c 1104 */ 1105 void rxrpc_local_dont_fragment(const struct rxrpc_local *local, bool set); 1106 struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *); 1107 struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *, enum rxrpc_local_trace); 1108 struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *, enum rxrpc_local_trace); 1109 void rxrpc_put_local(struct rxrpc_local *, enum rxrpc_local_trace); 1110 struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *, enum rxrpc_local_trace); 1111 void rxrpc_unuse_local(struct rxrpc_local *, enum rxrpc_local_trace); 1112 void rxrpc_destroy_local(struct rxrpc_local *local); 1113 void rxrpc_destroy_all_locals(struct rxrpc_net *); 1114 1115 static inline bool __rxrpc_use_local(struct rxrpc_local *local, 1116 enum rxrpc_local_trace why) 1117 { 1118 int r, u; 1119 1120 r = refcount_read(&local->ref); 1121 u = atomic_fetch_add_unless(&local->active_users, 1, 0); 1122 trace_rxrpc_local(local->debug_id, why, r, u); 1123 return u != 0; 1124 } 1125 1126 static inline void rxrpc_see_local(struct rxrpc_local *local, 1127 enum rxrpc_local_trace why) 1128 { 1129 int r, u; 1130 1131 r = refcount_read(&local->ref); 1132 u = atomic_read(&local->active_users); 1133 trace_rxrpc_local(local->debug_id, why, r, u); 1134 } 1135 1136 /* 1137 * misc.c 1138 */ 1139 extern unsigned int rxrpc_max_backlog __read_mostly; 1140 extern unsigned long rxrpc_soft_ack_delay; 1141 extern unsigned long rxrpc_idle_ack_delay; 1142 extern unsigned int rxrpc_rx_window_size; 1143 extern unsigned int rxrpc_rx_mtu; 1144 extern unsigned int rxrpc_rx_jumbo_max; 1145 #ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY 1146 extern unsigned long rxrpc_inject_rx_delay; 1147 #endif 1148 1149 /* 1150 * net_ns.c 1151 */ 1152 extern unsigned int rxrpc_net_id; 1153 extern struct pernet_operations rxrpc_net_ops; 1154 1155 static inline struct rxrpc_net *rxrpc_net(struct net *net) 1156 { 1157 return net_generic(net, rxrpc_net_id); 1158 } 1159 1160 /* 1161 * output.c 1162 */ 1163 int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb); 1164 int rxrpc_send_abort_packet(struct rxrpc_call *); 1165 int rxrpc_send_data_packet(struct rxrpc_call *, struct rxrpc_txbuf *); 1166 void rxrpc_send_conn_abort(struct rxrpc_connection *conn); 1167 void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb); 1168 void rxrpc_send_keepalive(struct rxrpc_peer *); 1169 void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb); 1170 1171 /* 1172 * peer_event.c 1173 */ 1174 void rxrpc_input_error(struct rxrpc_local *, struct sk_buff *); 1175 void rxrpc_peer_keepalive_worker(struct work_struct *); 1176 1177 /* 1178 * peer_object.c 1179 */ 1180 struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *, 1181 const struct sockaddr_rxrpc *); 1182 struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, 1183 struct sockaddr_rxrpc *srx, gfp_t gfp); 1184 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t, 1185 enum rxrpc_peer_trace); 1186 void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer); 1187 void rxrpc_destroy_all_peers(struct rxrpc_net *); 1188 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *, enum rxrpc_peer_trace); 1189 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *, enum rxrpc_peer_trace); 1190 void rxrpc_put_peer(struct rxrpc_peer *, enum rxrpc_peer_trace); 1191 1192 /* 1193 * proc.c 1194 */ 1195 extern const struct seq_operations rxrpc_call_seq_ops; 1196 extern const struct seq_operations rxrpc_connection_seq_ops; 1197 extern const struct seq_operations rxrpc_bundle_seq_ops; 1198 extern const struct seq_operations rxrpc_peer_seq_ops; 1199 extern const struct seq_operations rxrpc_local_seq_ops; 1200 1201 /* 1202 * recvmsg.c 1203 */ 1204 void rxrpc_notify_socket(struct rxrpc_call *); 1205 int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int); 1206 1207 /* 1208 * Abort a call due to a protocol error. 1209 */ 1210 static inline int rxrpc_abort_eproto(struct rxrpc_call *call, 1211 struct sk_buff *skb, 1212 s32 abort_code, 1213 enum rxrpc_abort_reason why) 1214 { 1215 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 1216 1217 rxrpc_abort_call(call, sp->hdr.seq, abort_code, -EPROTO, why); 1218 return -EPROTO; 1219 } 1220 1221 /* 1222 * rtt.c 1223 */ 1224 void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int, 1225 rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); 1226 unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool); 1227 void rxrpc_peer_init_rtt(struct rxrpc_peer *); 1228 1229 /* 1230 * rxkad.c 1231 */ 1232 #ifdef CONFIG_RXKAD 1233 extern const struct rxrpc_security rxkad; 1234 #endif 1235 1236 /* 1237 * security.c 1238 */ 1239 int __init rxrpc_init_security(void); 1240 const struct rxrpc_security *rxrpc_security_lookup(u8); 1241 void rxrpc_exit_security(void); 1242 int rxrpc_init_client_call_security(struct rxrpc_call *); 1243 int rxrpc_init_client_conn_security(struct rxrpc_connection *); 1244 const struct rxrpc_security *rxrpc_get_incoming_security(struct rxrpc_sock *, 1245 struct sk_buff *); 1246 struct key *rxrpc_look_up_server_security(struct rxrpc_connection *, 1247 struct sk_buff *, u32, u32); 1248 1249 /* 1250 * sendmsg.c 1251 */ 1252 bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error, 1253 enum rxrpc_abort_reason why); 1254 int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t); 1255 1256 /* 1257 * server_key.c 1258 */ 1259 extern struct key_type key_type_rxrpc_s; 1260 1261 int rxrpc_server_keyring(struct rxrpc_sock *, sockptr_t, int); 1262 1263 /* 1264 * skbuff.c 1265 */ 1266 void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *); 1267 void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace); 1268 void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace); 1269 void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace); 1270 void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace); 1271 void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace); 1272 void rxrpc_purge_queue(struct sk_buff_head *); 1273 1274 /* 1275 * stats.c 1276 */ 1277 int rxrpc_stats_show(struct seq_file *seq, void *v); 1278 int rxrpc_stats_clear(struct file *file, char *buf, size_t size); 1279 1280 #define rxrpc_inc_stat(rxnet, s) atomic_inc(&(rxnet)->s) 1281 #define rxrpc_dec_stat(rxnet, s) atomic_dec(&(rxnet)->s) 1282 1283 /* 1284 * sysctl.c 1285 */ 1286 #ifdef CONFIG_SYSCTL 1287 extern int __init rxrpc_sysctl_init(void); 1288 extern void rxrpc_sysctl_exit(void); 1289 #else 1290 static inline int __init rxrpc_sysctl_init(void) { return 0; } 1291 static inline void rxrpc_sysctl_exit(void) {} 1292 #endif 1293 1294 /* 1295 * txbuf.c 1296 */ 1297 extern atomic_t rxrpc_nr_txbuf; 1298 struct rxrpc_txbuf *rxrpc_alloc_txbuf(struct rxrpc_call *call, u8 packet_type, 1299 gfp_t gfp); 1300 void rxrpc_get_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what); 1301 void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what); 1302 void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what); 1303 1304 /* 1305 * utils.c 1306 */ 1307 int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *); 1308 1309 static inline bool before(u32 seq1, u32 seq2) 1310 { 1311 return (s32)(seq1 - seq2) < 0; 1312 } 1313 static inline bool before_eq(u32 seq1, u32 seq2) 1314 { 1315 return (s32)(seq1 - seq2) <= 0; 1316 } 1317 static inline bool after(u32 seq1, u32 seq2) 1318 { 1319 return (s32)(seq1 - seq2) > 0; 1320 } 1321 static inline bool after_eq(u32 seq1, u32 seq2) 1322 { 1323 return (s32)(seq1 - seq2) >= 0; 1324 } 1325 1326 /* 1327 * debug tracing 1328 */ 1329 extern unsigned int rxrpc_debug; 1330 1331 #define dbgprintk(FMT,...) \ 1332 printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__) 1333 1334 #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__) 1335 #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__) 1336 #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__) 1337 1338 1339 #if defined(__KDEBUG) 1340 #define _enter(FMT,...) kenter(FMT,##__VA_ARGS__) 1341 #define _leave(FMT,...) kleave(FMT,##__VA_ARGS__) 1342 #define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__) 1343 1344 #elif defined(CONFIG_AF_RXRPC_DEBUG) 1345 #define RXRPC_DEBUG_KENTER 0x01 1346 #define RXRPC_DEBUG_KLEAVE 0x02 1347 #define RXRPC_DEBUG_KDEBUG 0x04 1348 1349 #define _enter(FMT,...) \ 1350 do { \ 1351 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \ 1352 kenter(FMT,##__VA_ARGS__); \ 1353 } while (0) 1354 1355 #define _leave(FMT,...) \ 1356 do { \ 1357 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \ 1358 kleave(FMT,##__VA_ARGS__); \ 1359 } while (0) 1360 1361 #define _debug(FMT,...) \ 1362 do { \ 1363 if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \ 1364 kdebug(FMT,##__VA_ARGS__); \ 1365 } while (0) 1366 1367 #else 1368 #define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__) 1369 #define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__) 1370 #define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__) 1371 #endif 1372 1373 /* 1374 * debug assertion checking 1375 */ 1376 #if 1 // defined(__KDEBUGALL) 1377 1378 #define ASSERT(X) \ 1379 do { \ 1380 if (unlikely(!(X))) { \ 1381 pr_err("Assertion failed\n"); \ 1382 BUG(); \ 1383 } \ 1384 } while (0) 1385 1386 #define ASSERTCMP(X, OP, Y) \ 1387 do { \ 1388 __typeof__(X) _x = (X); \ 1389 __typeof__(Y) _y = (__typeof__(X))(Y); \ 1390 if (unlikely(!(_x OP _y))) { \ 1391 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \ 1392 (unsigned long)_x, (unsigned long)_x, #OP, \ 1393 (unsigned long)_y, (unsigned long)_y); \ 1394 BUG(); \ 1395 } \ 1396 } while (0) 1397 1398 #define ASSERTIF(C, X) \ 1399 do { \ 1400 if (unlikely((C) && !(X))) { \ 1401 pr_err("Assertion failed\n"); \ 1402 BUG(); \ 1403 } \ 1404 } while (0) 1405 1406 #define ASSERTIFCMP(C, X, OP, Y) \ 1407 do { \ 1408 __typeof__(X) _x = (X); \ 1409 __typeof__(Y) _y = (__typeof__(X))(Y); \ 1410 if (unlikely((C) && !(_x OP _y))) { \ 1411 pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \ 1412 (unsigned long)_x, (unsigned long)_x, #OP, \ 1413 (unsigned long)_y, (unsigned long)_y); \ 1414 BUG(); \ 1415 } \ 1416 } while (0) 1417 1418 #else 1419 1420 #define ASSERT(X) \ 1421 do { \ 1422 } while (0) 1423 1424 #define ASSERTCMP(X, OP, Y) \ 1425 do { \ 1426 } while (0) 1427 1428 #define ASSERTIF(C, X) \ 1429 do { \ 1430 } while (0) 1431 1432 #define ASSERTIFCMP(C, X, OP, Y) \ 1433 do { \ 1434 } while (0) 1435 1436 #endif /* __KDEBUGALL */ 1437