1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* /proc/net/ support for AF_RXRPC 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/module.h> 9 #include <net/sock.h> 10 #include <net/af_rxrpc.h> 11 #include "ar-internal.h" 12 13 static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = { 14 [RXRPC_CONN_UNUSED] = "Unused ", 15 [RXRPC_CONN_CLIENT_UNSECURED] = "ClUnsec ", 16 [RXRPC_CONN_CLIENT] = "Client ", 17 [RXRPC_CONN_SERVICE_PREALLOC] = "SvPrealc", 18 [RXRPC_CONN_SERVICE_UNSECURED] = "SvUnsec ", 19 [RXRPC_CONN_SERVICE_CHALLENGING] = "SvChall ", 20 [RXRPC_CONN_SERVICE] = "SvSecure", 21 [RXRPC_CONN_ABORTED] = "Aborted ", 22 }; 23 24 /* 25 * generate a list of extant and dead calls in /proc/net/rxrpc_calls 26 */ 27 static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos) 28 __acquires(rcu) 29 { 30 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 31 32 rcu_read_lock(); 33 return seq_list_start_head_rcu(&rxnet->calls, *_pos); 34 } 35 36 static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos) 37 { 38 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 39 40 return seq_list_next_rcu(v, &rxnet->calls, pos); 41 } 42 43 static void rxrpc_call_seq_stop(struct seq_file *seq, void *v) 44 __releases(rcu) 45 { 46 rcu_read_unlock(); 47 } 48 49 static int rxrpc_call_seq_show(struct seq_file *seq, void *v) 50 { 51 struct rxrpc_local *local; 52 struct rxrpc_call *call; 53 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 54 enum rxrpc_call_state state; 55 unsigned long timeout = 0; 56 rxrpc_seq_t acks_hard_ack; 57 char lbuff[50], rbuff[50]; 58 59 if (v == &rxnet->calls) { 60 seq_puts(seq, 61 "Proto Local " 62 " Remote " 63 " SvID ConnID CallID End Use State Abort " 64 " DebugId TxSeq TW RxSeq RW RxSerial CW RxTimo\n"); 65 return 0; 66 } 67 68 call = list_entry(v, struct rxrpc_call, link); 69 70 local = call->local; 71 if (local) 72 sprintf(lbuff, "%pISpc", &local->srx.transport); 73 else 74 strcpy(lbuff, "no_local"); 75 76 sprintf(rbuff, "%pISpc", &call->dest_srx.transport); 77 78 state = rxrpc_call_state(call); 79 if (state != RXRPC_CALL_SERVER_PREALLOC) { 80 timeout = READ_ONCE(call->expect_rx_by); 81 timeout -= jiffies; 82 } 83 84 acks_hard_ack = READ_ONCE(call->acks_hard_ack); 85 seq_printf(seq, 86 "UDP %-47.47s %-47.47s %4x %08x %08x %s %3u" 87 " %-8.8s %08x %08x %08x %02x %08x %02x %08x %02x %06lx\n", 88 lbuff, 89 rbuff, 90 call->dest_srx.srx_service, 91 call->cid, 92 call->call_id, 93 rxrpc_is_service_call(call) ? "Svc" : "Clt", 94 refcount_read(&call->ref), 95 rxrpc_call_states[state], 96 call->abort_code, 97 call->debug_id, 98 acks_hard_ack, READ_ONCE(call->tx_top) - acks_hard_ack, 99 call->ackr_window, call->ackr_wtop - call->ackr_window, 100 call->rx_serial, 101 call->cong_cwnd, 102 timeout); 103 104 return 0; 105 } 106 107 const struct seq_operations rxrpc_call_seq_ops = { 108 .start = rxrpc_call_seq_start, 109 .next = rxrpc_call_seq_next, 110 .stop = rxrpc_call_seq_stop, 111 .show = rxrpc_call_seq_show, 112 }; 113 114 /* 115 * generate a list of extant virtual connections in /proc/net/rxrpc_conns 116 */ 117 static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos) 118 __acquires(rxnet->conn_lock) 119 { 120 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 121 122 read_lock(&rxnet->conn_lock); 123 return seq_list_start_head(&rxnet->conn_proc_list, *_pos); 124 } 125 126 static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v, 127 loff_t *pos) 128 { 129 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 130 131 return seq_list_next(v, &rxnet->conn_proc_list, pos); 132 } 133 134 static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v) 135 __releases(rxnet->conn_lock) 136 { 137 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 138 139 read_unlock(&rxnet->conn_lock); 140 } 141 142 static int rxrpc_connection_seq_show(struct seq_file *seq, void *v) 143 { 144 struct rxrpc_connection *conn; 145 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 146 const char *state; 147 char lbuff[50], rbuff[50]; 148 149 if (v == &rxnet->conn_proc_list) { 150 seq_puts(seq, 151 "Proto Local " 152 " Remote " 153 " SvID ConnID End Ref Act State Key " 154 " Serial ISerial CallId0 CallId1 CallId2 CallId3\n" 155 ); 156 return 0; 157 } 158 159 conn = list_entry(v, struct rxrpc_connection, proc_link); 160 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) { 161 strcpy(lbuff, "no_local"); 162 strcpy(rbuff, "no_connection"); 163 goto print; 164 } 165 166 sprintf(lbuff, "%pISpc", &conn->local->srx.transport); 167 sprintf(rbuff, "%pISpc", &conn->peer->srx.transport); 168 print: 169 state = rxrpc_is_conn_aborted(conn) ? 170 rxrpc_call_completions[conn->completion] : 171 rxrpc_conn_states[conn->state]; 172 seq_printf(seq, 173 "UDP %-47.47s %-47.47s %4x %08x %s %3u %3d" 174 " %s %08x %08x %08x %08x %08x %08x %08x\n", 175 lbuff, 176 rbuff, 177 conn->service_id, 178 conn->proto.cid, 179 rxrpc_conn_is_service(conn) ? "Svc" : "Clt", 180 refcount_read(&conn->ref), 181 atomic_read(&conn->active), 182 state, 183 key_serial(conn->key), 184 atomic_read(&conn->serial), 185 conn->hi_serial, 186 conn->channels[0].call_id, 187 conn->channels[1].call_id, 188 conn->channels[2].call_id, 189 conn->channels[3].call_id); 190 191 return 0; 192 } 193 194 const struct seq_operations rxrpc_connection_seq_ops = { 195 .start = rxrpc_connection_seq_start, 196 .next = rxrpc_connection_seq_next, 197 .stop = rxrpc_connection_seq_stop, 198 .show = rxrpc_connection_seq_show, 199 }; 200 201 /* 202 * generate a list of extant virtual bundles in /proc/net/rxrpc/bundles 203 */ 204 static void *rxrpc_bundle_seq_start(struct seq_file *seq, loff_t *_pos) 205 __acquires(rxnet->conn_lock) 206 { 207 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 208 209 read_lock(&rxnet->conn_lock); 210 return seq_list_start_head(&rxnet->bundle_proc_list, *_pos); 211 } 212 213 static void *rxrpc_bundle_seq_next(struct seq_file *seq, void *v, 214 loff_t *pos) 215 { 216 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 217 218 return seq_list_next(v, &rxnet->bundle_proc_list, pos); 219 } 220 221 static void rxrpc_bundle_seq_stop(struct seq_file *seq, void *v) 222 __releases(rxnet->conn_lock) 223 { 224 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 225 226 read_unlock(&rxnet->conn_lock); 227 } 228 229 static int rxrpc_bundle_seq_show(struct seq_file *seq, void *v) 230 { 231 struct rxrpc_bundle *bundle; 232 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 233 char lbuff[50], rbuff[50]; 234 235 if (v == &rxnet->bundle_proc_list) { 236 seq_puts(seq, 237 "Proto Local " 238 " Remote " 239 " SvID Ref Act Flg Key |" 240 " Bundle Conn_0 Conn_1 Conn_2 Conn_3\n" 241 ); 242 return 0; 243 } 244 245 bundle = list_entry(v, struct rxrpc_bundle, proc_link); 246 247 sprintf(lbuff, "%pISpc", &bundle->local->srx.transport); 248 sprintf(rbuff, "%pISpc", &bundle->peer->srx.transport); 249 seq_printf(seq, 250 "UDP %-47.47s %-47.47s %4x %3u %3d" 251 " %c%c%c %08x | %08x %08x %08x %08x %08x\n", 252 lbuff, 253 rbuff, 254 bundle->service_id, 255 refcount_read(&bundle->ref), 256 atomic_read(&bundle->active), 257 bundle->try_upgrade ? 'U' : '-', 258 bundle->exclusive ? 'e' : '-', 259 bundle->upgrade ? 'u' : '-', 260 key_serial(bundle->key), 261 bundle->debug_id, 262 bundle->conn_ids[0], 263 bundle->conn_ids[1], 264 bundle->conn_ids[2], 265 bundle->conn_ids[3]); 266 267 return 0; 268 } 269 270 const struct seq_operations rxrpc_bundle_seq_ops = { 271 .start = rxrpc_bundle_seq_start, 272 .next = rxrpc_bundle_seq_next, 273 .stop = rxrpc_bundle_seq_stop, 274 .show = rxrpc_bundle_seq_show, 275 }; 276 277 /* 278 * generate a list of extant virtual peers in /proc/net/rxrpc/peers 279 */ 280 static int rxrpc_peer_seq_show(struct seq_file *seq, void *v) 281 { 282 struct rxrpc_peer *peer; 283 time64_t now; 284 char lbuff[50], rbuff[50]; 285 286 if (v == SEQ_START_TOKEN) { 287 seq_puts(seq, 288 "Proto Local " 289 " Remote " 290 " Use SST MTU LastUse RTT RTO\n" 291 ); 292 return 0; 293 } 294 295 peer = list_entry(v, struct rxrpc_peer, hash_link); 296 297 sprintf(lbuff, "%pISpc", &peer->local->srx.transport); 298 299 sprintf(rbuff, "%pISpc", &peer->srx.transport); 300 301 now = ktime_get_seconds(); 302 seq_printf(seq, 303 "UDP %-47.47s %-47.47s %3u" 304 " %3u %5u %6llus %8u %8u\n", 305 lbuff, 306 rbuff, 307 refcount_read(&peer->ref), 308 peer->cong_ssthresh, 309 peer->mtu, 310 now - peer->last_tx_at, 311 peer->srtt_us >> 3, 312 jiffies_to_usecs(peer->rto_j)); 313 314 return 0; 315 } 316 317 static void *rxrpc_peer_seq_start(struct seq_file *seq, loff_t *_pos) 318 __acquires(rcu) 319 { 320 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 321 unsigned int bucket, n; 322 unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash); 323 void *p; 324 325 rcu_read_lock(); 326 327 if (*_pos >= UINT_MAX) 328 return NULL; 329 330 n = *_pos & ((1U << shift) - 1); 331 bucket = *_pos >> shift; 332 for (;;) { 333 if (bucket >= HASH_SIZE(rxnet->peer_hash)) { 334 *_pos = UINT_MAX; 335 return NULL; 336 } 337 if (n == 0) { 338 if (bucket == 0) 339 return SEQ_START_TOKEN; 340 *_pos += 1; 341 n++; 342 } 343 344 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1); 345 if (p) 346 return p; 347 bucket++; 348 n = 1; 349 *_pos = (bucket << shift) | n; 350 } 351 } 352 353 static void *rxrpc_peer_seq_next(struct seq_file *seq, void *v, loff_t *_pos) 354 { 355 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 356 unsigned int bucket, n; 357 unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash); 358 void *p; 359 360 if (*_pos >= UINT_MAX) 361 return NULL; 362 363 bucket = *_pos >> shift; 364 365 p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos); 366 if (p) 367 return p; 368 369 for (;;) { 370 bucket++; 371 n = 1; 372 *_pos = (bucket << shift) | n; 373 374 if (bucket >= HASH_SIZE(rxnet->peer_hash)) { 375 *_pos = UINT_MAX; 376 return NULL; 377 } 378 if (n == 0) { 379 *_pos += 1; 380 n++; 381 } 382 383 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1); 384 if (p) 385 return p; 386 } 387 } 388 389 static void rxrpc_peer_seq_stop(struct seq_file *seq, void *v) 390 __releases(rcu) 391 { 392 rcu_read_unlock(); 393 } 394 395 396 const struct seq_operations rxrpc_peer_seq_ops = { 397 .start = rxrpc_peer_seq_start, 398 .next = rxrpc_peer_seq_next, 399 .stop = rxrpc_peer_seq_stop, 400 .show = rxrpc_peer_seq_show, 401 }; 402 403 /* 404 * Generate a list of extant virtual local endpoints in /proc/net/rxrpc/locals 405 */ 406 static int rxrpc_local_seq_show(struct seq_file *seq, void *v) 407 { 408 struct rxrpc_local *local; 409 char lbuff[50]; 410 411 if (v == SEQ_START_TOKEN) { 412 seq_puts(seq, 413 "Proto Local " 414 " Use Act RxQ\n"); 415 return 0; 416 } 417 418 local = hlist_entry(v, struct rxrpc_local, link); 419 420 sprintf(lbuff, "%pISpc", &local->srx.transport); 421 422 seq_printf(seq, 423 "UDP %-47.47s %3u %3u %3u\n", 424 lbuff, 425 refcount_read(&local->ref), 426 atomic_read(&local->active_users), 427 local->rx_queue.qlen); 428 429 return 0; 430 } 431 432 static void *rxrpc_local_seq_start(struct seq_file *seq, loff_t *_pos) 433 __acquires(rcu) 434 { 435 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 436 unsigned int n; 437 438 rcu_read_lock(); 439 440 if (*_pos >= UINT_MAX) 441 return NULL; 442 443 n = *_pos; 444 if (n == 0) 445 return SEQ_START_TOKEN; 446 447 return seq_hlist_start_rcu(&rxnet->local_endpoints, n - 1); 448 } 449 450 static void *rxrpc_local_seq_next(struct seq_file *seq, void *v, loff_t *_pos) 451 { 452 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 453 454 if (*_pos >= UINT_MAX) 455 return NULL; 456 457 return seq_hlist_next_rcu(v, &rxnet->local_endpoints, _pos); 458 } 459 460 static void rxrpc_local_seq_stop(struct seq_file *seq, void *v) 461 __releases(rcu) 462 { 463 rcu_read_unlock(); 464 } 465 466 const struct seq_operations rxrpc_local_seq_ops = { 467 .start = rxrpc_local_seq_start, 468 .next = rxrpc_local_seq_next, 469 .stop = rxrpc_local_seq_stop, 470 .show = rxrpc_local_seq_show, 471 }; 472 473 /* 474 * Display stats in /proc/net/rxrpc/stats 475 */ 476 int rxrpc_stats_show(struct seq_file *seq, void *v) 477 { 478 struct rxrpc_net *rxnet = rxrpc_net(seq_file_single_net(seq)); 479 480 seq_printf(seq, 481 "Data : send=%u sendf=%u fail=%u\n", 482 atomic_read(&rxnet->stat_tx_data_send), 483 atomic_read(&rxnet->stat_tx_data_send_frag), 484 atomic_read(&rxnet->stat_tx_data_send_fail)); 485 seq_printf(seq, 486 "Data-Tx : nr=%u retrans=%u uf=%u cwr=%u\n", 487 atomic_read(&rxnet->stat_tx_data), 488 atomic_read(&rxnet->stat_tx_data_retrans), 489 atomic_read(&rxnet->stat_tx_data_underflow), 490 atomic_read(&rxnet->stat_tx_data_cwnd_reset)); 491 seq_printf(seq, 492 "Data-Rx : nr=%u reqack=%u jumbo=%u\n", 493 atomic_read(&rxnet->stat_rx_data), 494 atomic_read(&rxnet->stat_rx_data_reqack), 495 atomic_read(&rxnet->stat_rx_data_jumbo)); 496 seq_printf(seq, 497 "Ack : fill=%u send=%u skip=%u\n", 498 atomic_read(&rxnet->stat_tx_ack_fill), 499 atomic_read(&rxnet->stat_tx_ack_send), 500 atomic_read(&rxnet->stat_tx_ack_skip)); 501 seq_printf(seq, 502 "Ack-Tx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n", 503 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_REQUESTED]), 504 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_DUPLICATE]), 505 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_OUT_OF_SEQUENCE]), 506 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_EXCEEDS_WINDOW]), 507 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_NOSPACE]), 508 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_PING]), 509 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_PING_RESPONSE]), 510 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_DELAY]), 511 atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_IDLE])); 512 seq_printf(seq, 513 "Ack-Rx : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n", 514 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_REQUESTED]), 515 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_DUPLICATE]), 516 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_OUT_OF_SEQUENCE]), 517 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_EXCEEDS_WINDOW]), 518 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_NOSPACE]), 519 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_PING]), 520 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_PING_RESPONSE]), 521 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_DELAY]), 522 atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_IDLE])); 523 seq_printf(seq, 524 "Why-Req-A: acklost=%u already=%u mrtt=%u ortt=%u\n", 525 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_ack_lost]), 526 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_already_on]), 527 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_more_rtt]), 528 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_old_rtt])); 529 seq_printf(seq, 530 "Why-Req-A: nolast=%u retx=%u slows=%u smtxw=%u\n", 531 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_no_srv_last]), 532 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_retrans]), 533 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_slow_start]), 534 atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_small_txwin])); 535 seq_printf(seq, 536 "Buffers : txb=%u rxb=%u\n", 537 atomic_read(&rxrpc_nr_txbuf), 538 atomic_read(&rxrpc_n_rx_skbs)); 539 seq_printf(seq, 540 "IO-thread: loops=%u\n", 541 atomic_read(&rxnet->stat_io_loop)); 542 return 0; 543 } 544 545 /* 546 * Clear stats if /proc/net/rxrpc/stats is written to. 547 */ 548 int rxrpc_stats_clear(struct file *file, char *buf, size_t size) 549 { 550 struct seq_file *m = file->private_data; 551 struct rxrpc_net *rxnet = rxrpc_net(seq_file_single_net(m)); 552 553 if (size > 1 || (size == 1 && buf[0] != '\n')) 554 return -EINVAL; 555 556 atomic_set(&rxnet->stat_tx_data, 0); 557 atomic_set(&rxnet->stat_tx_data_retrans, 0); 558 atomic_set(&rxnet->stat_tx_data_underflow, 0); 559 atomic_set(&rxnet->stat_tx_data_cwnd_reset, 0); 560 atomic_set(&rxnet->stat_tx_data_send, 0); 561 atomic_set(&rxnet->stat_tx_data_send_frag, 0); 562 atomic_set(&rxnet->stat_tx_data_send_fail, 0); 563 atomic_set(&rxnet->stat_rx_data, 0); 564 atomic_set(&rxnet->stat_rx_data_reqack, 0); 565 atomic_set(&rxnet->stat_rx_data_jumbo, 0); 566 567 atomic_set(&rxnet->stat_tx_ack_fill, 0); 568 atomic_set(&rxnet->stat_tx_ack_send, 0); 569 atomic_set(&rxnet->stat_tx_ack_skip, 0); 570 memset(&rxnet->stat_tx_acks, 0, sizeof(rxnet->stat_tx_acks)); 571 memset(&rxnet->stat_rx_acks, 0, sizeof(rxnet->stat_rx_acks)); 572 573 memset(&rxnet->stat_why_req_ack, 0, sizeof(rxnet->stat_why_req_ack)); 574 575 atomic_set(&rxnet->stat_io_loop, 0); 576 return size; 577 } 578