xref: /linux/net/rxrpc/proc.c (revision c79c3c34f75d72a066e292b10aa50fc758c97c89)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* /proc/net/ support for AF_RXRPC
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/module.h>
9 #include <net/sock.h>
10 #include <net/af_rxrpc.h>
11 #include "ar-internal.h"
12 
13 static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
14 	[RXRPC_CONN_UNUSED]			= "Unused  ",
15 	[RXRPC_CONN_CLIENT]			= "Client  ",
16 	[RXRPC_CONN_SERVICE_PREALLOC]		= "SvPrealc",
17 	[RXRPC_CONN_SERVICE_UNSECURED]		= "SvUnsec ",
18 	[RXRPC_CONN_SERVICE_CHALLENGING]	= "SvChall ",
19 	[RXRPC_CONN_SERVICE]			= "SvSecure",
20 	[RXRPC_CONN_REMOTELY_ABORTED]		= "RmtAbort",
21 	[RXRPC_CONN_LOCALLY_ABORTED]		= "LocAbort",
22 };
23 
24 /*
25  * generate a list of extant and dead calls in /proc/net/rxrpc_calls
26  */
27 static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
28 	__acquires(rcu)
29 	__acquires(rxnet->call_lock)
30 {
31 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
32 
33 	rcu_read_lock();
34 	read_lock(&rxnet->call_lock);
35 	return seq_list_start_head(&rxnet->calls, *_pos);
36 }
37 
38 static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
39 {
40 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
41 
42 	return seq_list_next(v, &rxnet->calls, pos);
43 }
44 
45 static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
46 	__releases(rxnet->call_lock)
47 	__releases(rcu)
48 {
49 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
50 
51 	read_unlock(&rxnet->call_lock);
52 	rcu_read_unlock();
53 }
54 
55 static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
56 {
57 	struct rxrpc_local *local;
58 	struct rxrpc_sock *rx;
59 	struct rxrpc_peer *peer;
60 	struct rxrpc_call *call;
61 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
62 	unsigned long timeout = 0;
63 	rxrpc_seq_t tx_hard_ack, rx_hard_ack;
64 	char lbuff[50], rbuff[50];
65 
66 	if (v == &rxnet->calls) {
67 		seq_puts(seq,
68 			 "Proto Local                                          "
69 			 " Remote                                         "
70 			 " SvID ConnID   CallID   End Use State    Abort   "
71 			 " DebugId  TxSeq    TW RxSeq    RW RxSerial RxTimo\n");
72 		return 0;
73 	}
74 
75 	call = list_entry(v, struct rxrpc_call, link);
76 
77 	rx = rcu_dereference(call->socket);
78 	if (rx) {
79 		local = READ_ONCE(rx->local);
80 		if (local)
81 			sprintf(lbuff, "%pISpc", &local->srx.transport);
82 		else
83 			strcpy(lbuff, "no_local");
84 	} else {
85 		strcpy(lbuff, "no_socket");
86 	}
87 
88 	peer = call->peer;
89 	if (peer)
90 		sprintf(rbuff, "%pISpc", &peer->srx.transport);
91 	else
92 		strcpy(rbuff, "no_connection");
93 
94 	if (call->state != RXRPC_CALL_SERVER_PREALLOC) {
95 		timeout = READ_ONCE(call->expect_rx_by);
96 		timeout -= jiffies;
97 	}
98 
99 	tx_hard_ack = READ_ONCE(call->tx_hard_ack);
100 	rx_hard_ack = READ_ONCE(call->rx_hard_ack);
101 	seq_printf(seq,
102 		   "UDP   %-47.47s %-47.47s %4x %08x %08x %s %3u"
103 		   " %-8.8s %08x %08x %08x %02x %08x %02x %08x %06lx\n",
104 		   lbuff,
105 		   rbuff,
106 		   call->service_id,
107 		   call->cid,
108 		   call->call_id,
109 		   rxrpc_is_service_call(call) ? "Svc" : "Clt",
110 		   atomic_read(&call->usage),
111 		   rxrpc_call_states[call->state],
112 		   call->abort_code,
113 		   call->debug_id,
114 		   tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack,
115 		   rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack,
116 		   call->rx_serial,
117 		   timeout);
118 
119 	return 0;
120 }
121 
122 const struct seq_operations rxrpc_call_seq_ops = {
123 	.start  = rxrpc_call_seq_start,
124 	.next   = rxrpc_call_seq_next,
125 	.stop   = rxrpc_call_seq_stop,
126 	.show   = rxrpc_call_seq_show,
127 };
128 
129 /*
130  * generate a list of extant virtual connections in /proc/net/rxrpc_conns
131  */
132 static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
133 	__acquires(rxnet->conn_lock)
134 {
135 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
136 
137 	read_lock(&rxnet->conn_lock);
138 	return seq_list_start_head(&rxnet->conn_proc_list, *_pos);
139 }
140 
141 static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
142 				       loff_t *pos)
143 {
144 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
145 
146 	return seq_list_next(v, &rxnet->conn_proc_list, pos);
147 }
148 
149 static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
150 	__releases(rxnet->conn_lock)
151 {
152 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
153 
154 	read_unlock(&rxnet->conn_lock);
155 }
156 
157 static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
158 {
159 	struct rxrpc_connection *conn;
160 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
161 	char lbuff[50], rbuff[50];
162 
163 	if (v == &rxnet->conn_proc_list) {
164 		seq_puts(seq,
165 			 "Proto Local                                          "
166 			 " Remote                                         "
167 			 " SvID ConnID   End Use State    Key     "
168 			 " Serial   ISerial  CallId0  CallId1  CallId2  CallId3\n"
169 			 );
170 		return 0;
171 	}
172 
173 	conn = list_entry(v, struct rxrpc_connection, proc_link);
174 	if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) {
175 		strcpy(lbuff, "no_local");
176 		strcpy(rbuff, "no_connection");
177 		goto print;
178 	}
179 
180 	sprintf(lbuff, "%pISpc", &conn->params.local->srx.transport);
181 
182 	sprintf(rbuff, "%pISpc", &conn->params.peer->srx.transport);
183 print:
184 	seq_printf(seq,
185 		   "UDP   %-47.47s %-47.47s %4x %08x %s %3u"
186 		   " %s %08x %08x %08x %08x %08x %08x %08x\n",
187 		   lbuff,
188 		   rbuff,
189 		   conn->service_id,
190 		   conn->proto.cid,
191 		   rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
192 		   atomic_read(&conn->usage),
193 		   rxrpc_conn_states[conn->state],
194 		   key_serial(conn->params.key),
195 		   atomic_read(&conn->serial),
196 		   conn->hi_serial,
197 		   conn->channels[0].call_id,
198 		   conn->channels[1].call_id,
199 		   conn->channels[2].call_id,
200 		   conn->channels[3].call_id);
201 
202 	return 0;
203 }
204 
205 const struct seq_operations rxrpc_connection_seq_ops = {
206 	.start  = rxrpc_connection_seq_start,
207 	.next   = rxrpc_connection_seq_next,
208 	.stop   = rxrpc_connection_seq_stop,
209 	.show   = rxrpc_connection_seq_show,
210 };
211 
212 /*
213  * generate a list of extant virtual peers in /proc/net/rxrpc/peers
214  */
215 static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
216 {
217 	struct rxrpc_peer *peer;
218 	time64_t now;
219 	char lbuff[50], rbuff[50];
220 
221 	if (v == SEQ_START_TOKEN) {
222 		seq_puts(seq,
223 			 "Proto Local                                          "
224 			 " Remote                                         "
225 			 " Use  CW   MTU LastUse      RTT      RTO\n"
226 			 );
227 		return 0;
228 	}
229 
230 	peer = list_entry(v, struct rxrpc_peer, hash_link);
231 
232 	sprintf(lbuff, "%pISpc", &peer->local->srx.transport);
233 
234 	sprintf(rbuff, "%pISpc", &peer->srx.transport);
235 
236 	now = ktime_get_seconds();
237 	seq_printf(seq,
238 		   "UDP   %-47.47s %-47.47s %3u"
239 		   " %3u %5u %6llus %8u %8u\n",
240 		   lbuff,
241 		   rbuff,
242 		   atomic_read(&peer->usage),
243 		   peer->cong_cwnd,
244 		   peer->mtu,
245 		   now - peer->last_tx_at,
246 		   peer->srtt_us >> 3,
247 		   jiffies_to_usecs(peer->rto_j));
248 
249 	return 0;
250 }
251 
252 static void *rxrpc_peer_seq_start(struct seq_file *seq, loff_t *_pos)
253 	__acquires(rcu)
254 {
255 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
256 	unsigned int bucket, n;
257 	unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
258 	void *p;
259 
260 	rcu_read_lock();
261 
262 	if (*_pos >= UINT_MAX)
263 		return NULL;
264 
265 	n = *_pos & ((1U << shift) - 1);
266 	bucket = *_pos >> shift;
267 	for (;;) {
268 		if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
269 			*_pos = UINT_MAX;
270 			return NULL;
271 		}
272 		if (n == 0) {
273 			if (bucket == 0)
274 				return SEQ_START_TOKEN;
275 			*_pos += 1;
276 			n++;
277 		}
278 
279 		p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
280 		if (p)
281 			return p;
282 		bucket++;
283 		n = 1;
284 		*_pos = (bucket << shift) | n;
285 	}
286 }
287 
288 static void *rxrpc_peer_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
289 {
290 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
291 	unsigned int bucket, n;
292 	unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
293 	void *p;
294 
295 	if (*_pos >= UINT_MAX)
296 		return NULL;
297 
298 	bucket = *_pos >> shift;
299 
300 	p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos);
301 	if (p)
302 		return p;
303 
304 	for (;;) {
305 		bucket++;
306 		n = 1;
307 		*_pos = (bucket << shift) | n;
308 
309 		if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
310 			*_pos = UINT_MAX;
311 			return NULL;
312 		}
313 		if (n == 0) {
314 			*_pos += 1;
315 			n++;
316 		}
317 
318 		p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
319 		if (p)
320 			return p;
321 	}
322 }
323 
324 static void rxrpc_peer_seq_stop(struct seq_file *seq, void *v)
325 	__releases(rcu)
326 {
327 	rcu_read_unlock();
328 }
329 
330 
331 const struct seq_operations rxrpc_peer_seq_ops = {
332 	.start  = rxrpc_peer_seq_start,
333 	.next   = rxrpc_peer_seq_next,
334 	.stop   = rxrpc_peer_seq_stop,
335 	.show   = rxrpc_peer_seq_show,
336 };
337