xref: /linux/net/sunrpc/stats.c (revision daa2be74b1b2302004945b2a5e32424e177cc7da)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/net/sunrpc/stats.c
4  *
5  * procfs-based user access to generic RPC statistics. The stats files
6  * reside in /proc/net/rpc.
7  *
8  * The read routines assume that the buffer passed in is just big enough.
9  * If you implement an RPC service that has its own stats routine which
10  * appends the generic RPC stats, make sure you don't exceed the PAGE_SIZE
11  * limit.
12  *
13  * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
14  */
15 
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 
19 #include <linux/init.h>
20 #include <linux/kernel.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/sunrpc/clnt.h>
24 #include <linux/sunrpc/svcsock.h>
25 #include <linux/sunrpc/metrics.h>
26 #include <linux/rcupdate.h>
27 
28 #include <trace/events/sunrpc.h>
29 
30 #include "netns.h"
31 
32 #define RPCDBG_FACILITY	RPCDBG_MISC
33 
34 /*
35  * Get RPC client stats
36  */
37 static int rpc_proc_show(struct seq_file *seq, void *v) {
38 	const struct rpc_stat	*statp = seq->private;
39 	const struct rpc_program *prog = statp->program;
40 	unsigned int i, j;
41 
42 	seq_printf(seq,
43 		"net %u %u %u %u\n",
44 			statp->netcnt,
45 			statp->netudpcnt,
46 			statp->nettcpcnt,
47 			statp->nettcpconn);
48 	seq_printf(seq,
49 		"rpc %u %u %u\n",
50 			statp->rpccnt,
51 			statp->rpcretrans,
52 			statp->rpcauthrefresh);
53 
54 	for (i = 0; i < prog->nrvers; i++) {
55 		const struct rpc_version *vers = prog->version[i];
56 		if (!vers)
57 			continue;
58 		seq_printf(seq, "proc%u %u",
59 					vers->number, vers->nrprocs);
60 		for (j = 0; j < vers->nrprocs; j++)
61 			seq_printf(seq, " %u", vers->counts[j]);
62 		seq_putc(seq, '\n');
63 	}
64 	return 0;
65 }
66 
67 static int rpc_proc_open(struct inode *inode, struct file *file)
68 {
69 	return single_open(file, rpc_proc_show, pde_data(inode));
70 }
71 
72 static const struct proc_ops rpc_proc_ops = {
73 	.proc_open	= rpc_proc_open,
74 	.proc_read	= seq_read,
75 	.proc_lseek	= seq_lseek,
76 	.proc_release	= single_release,
77 };
78 
79 /*
80  * Get RPC server stats
81  */
82 void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp)
83 {
84 	const struct svc_program *prog = statp->program;
85 	const struct svc_version *vers;
86 	unsigned int i, j, k;
87 	unsigned long count;
88 
89 	seq_printf(seq,
90 		"net %u %u %u %u\n",
91 			statp->netcnt,
92 			statp->netudpcnt,
93 			statp->nettcpcnt,
94 			statp->nettcpconn);
95 	seq_printf(seq,
96 		"rpc %u %u %u %u %u\n",
97 			statp->rpccnt,
98 			statp->rpcbadfmt+statp->rpcbadauth+statp->rpcbadclnt,
99 			statp->rpcbadfmt,
100 			statp->rpcbadauth,
101 			statp->rpcbadclnt);
102 
103 	for (i = 0; i < prog->pg_nvers; i++) {
104 		vers = prog->pg_vers[i];
105 		if (!vers)
106 			continue;
107 		seq_printf(seq, "proc%d %u", i, vers->vs_nproc);
108 		for (j = 0; j < vers->vs_nproc; j++) {
109 			count = 0;
110 			for_each_possible_cpu(k)
111 				count += per_cpu(vers->vs_count[j], k);
112 			seq_printf(seq, " %lu", count);
113 		}
114 		seq_putc(seq, '\n');
115 	}
116 }
117 EXPORT_SYMBOL_GPL(svc_seq_show);
118 
119 /**
120  * rpc_alloc_iostats - allocate an rpc_iostats structure
121  * @clnt: RPC program, version, and xprt
122  *
123  */
124 struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
125 {
126 	struct rpc_iostats *stats;
127 	int i;
128 
129 	stats = kcalloc(clnt->cl_maxproc, sizeof(*stats), GFP_KERNEL);
130 	if (stats) {
131 		for (i = 0; i < clnt->cl_maxproc; i++)
132 			spin_lock_init(&stats[i].om_lock);
133 	}
134 	return stats;
135 }
136 EXPORT_SYMBOL_GPL(rpc_alloc_iostats);
137 
138 /**
139  * rpc_free_iostats - release an rpc_iostats structure
140  * @stats: doomed rpc_iostats structure
141  *
142  */
143 void rpc_free_iostats(struct rpc_iostats *stats)
144 {
145 	kfree(stats);
146 }
147 EXPORT_SYMBOL_GPL(rpc_free_iostats);
148 
149 /**
150  * rpc_count_iostats_metrics - tally up per-task stats
151  * @task: completed rpc_task
152  * @op_metrics: stat structure for OP that will accumulate stats from @task
153  */
154 void rpc_count_iostats_metrics(const struct rpc_task *task,
155 			       struct rpc_iostats *op_metrics)
156 {
157 	struct rpc_rqst *req = task->tk_rqstp;
158 	ktime_t backlog, execute, now;
159 
160 	if (!op_metrics || !req)
161 		return;
162 
163 	now = ktime_get();
164 	spin_lock(&op_metrics->om_lock);
165 
166 	op_metrics->om_ops++;
167 	/* kernel API: om_ops must never become larger than om_ntrans */
168 	op_metrics->om_ntrans += max(req->rq_ntrans, 1);
169 	op_metrics->om_timeouts += task->tk_timeouts;
170 
171 	op_metrics->om_bytes_sent += req->rq_xmit_bytes_sent;
172 	op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
173 
174 	backlog = 0;
175 	if (ktime_to_ns(req->rq_xtime)) {
176 		backlog = ktime_sub(req->rq_xtime, task->tk_start);
177 		op_metrics->om_queue = ktime_add(op_metrics->om_queue, backlog);
178 	}
179 
180 	op_metrics->om_rtt = ktime_add(op_metrics->om_rtt, req->rq_rtt);
181 
182 	execute = ktime_sub(now, task->tk_start);
183 	op_metrics->om_execute = ktime_add(op_metrics->om_execute, execute);
184 	if (task->tk_status < 0)
185 		op_metrics->om_error_status++;
186 
187 	spin_unlock(&op_metrics->om_lock);
188 
189 	trace_rpc_stats_latency(req->rq_task, backlog, req->rq_rtt, execute);
190 }
191 EXPORT_SYMBOL_GPL(rpc_count_iostats_metrics);
192 
193 /**
194  * rpc_count_iostats - tally up per-task stats
195  * @task: completed rpc_task
196  * @stats: array of stat structures
197  *
198  * Uses the statidx from @task
199  */
200 void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats)
201 {
202 	rpc_count_iostats_metrics(task,
203 				  &stats[task->tk_msg.rpc_proc->p_statidx]);
204 }
205 EXPORT_SYMBOL_GPL(rpc_count_iostats);
206 
207 static void _print_name(struct seq_file *seq, unsigned int op,
208 			const struct rpc_procinfo *procs)
209 {
210 	if (procs[op].p_name)
211 		seq_printf(seq, "\t%12s: ", procs[op].p_name);
212 	else if (op == 0)
213 		seq_printf(seq, "\t        NULL: ");
214 	else
215 		seq_printf(seq, "\t%12u: ", op);
216 }
217 
218 static void _add_rpc_iostats(struct rpc_iostats *a, struct rpc_iostats *b)
219 {
220 	a->om_ops += b->om_ops;
221 	a->om_ntrans += b->om_ntrans;
222 	a->om_timeouts += b->om_timeouts;
223 	a->om_bytes_sent += b->om_bytes_sent;
224 	a->om_bytes_recv += b->om_bytes_recv;
225 	a->om_queue = ktime_add(a->om_queue, b->om_queue);
226 	a->om_rtt = ktime_add(a->om_rtt, b->om_rtt);
227 	a->om_execute = ktime_add(a->om_execute, b->om_execute);
228 	a->om_error_status += b->om_error_status;
229 }
230 
231 static void _print_rpc_iostats(struct seq_file *seq, struct rpc_iostats *stats,
232 			       int op, const struct rpc_procinfo *procs)
233 {
234 	_print_name(seq, op, procs);
235 	seq_printf(seq, "%lu %lu %lu %llu %llu %llu %llu %llu %lu\n",
236 		   stats->om_ops,
237 		   stats->om_ntrans,
238 		   stats->om_timeouts,
239 		   stats->om_bytes_sent,
240 		   stats->om_bytes_recv,
241 		   ktime_to_ms(stats->om_queue),
242 		   ktime_to_ms(stats->om_rtt),
243 		   ktime_to_ms(stats->om_execute),
244 		   stats->om_error_status);
245 }
246 
247 static int do_print_stats(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *seqv)
248 {
249 	struct seq_file *seq = seqv;
250 
251 	xprt->ops->print_stats(xprt, seq);
252 	return 0;
253 }
254 
255 void rpc_clnt_show_stats(struct seq_file *seq, struct rpc_clnt *clnt)
256 {
257 	unsigned int op, maxproc = clnt->cl_maxproc;
258 
259 	if (!clnt->cl_metrics)
260 		return;
261 
262 	seq_printf(seq, "\tRPC iostats version: %s  ", RPC_IOSTATS_VERS);
263 	seq_printf(seq, "p/v: %u/%u (%s)\n",
264 			clnt->cl_prog, clnt->cl_vers, clnt->cl_program->name);
265 
266 	rpc_clnt_iterate_for_each_xprt(clnt, do_print_stats, seq);
267 
268 	seq_printf(seq, "\tper-op statistics\n");
269 	for (op = 0; op < maxproc; op++) {
270 		struct rpc_iostats stats = {};
271 		struct rpc_clnt *next = clnt;
272 		do {
273 			_add_rpc_iostats(&stats, &next->cl_metrics[op]);
274 			if (next == next->cl_parent)
275 				break;
276 			next = next->cl_parent;
277 		} while (next);
278 		_print_rpc_iostats(seq, &stats, op, clnt->cl_procinfo);
279 	}
280 }
281 EXPORT_SYMBOL_GPL(rpc_clnt_show_stats);
282 
283 /*
284  * Register/unregister RPC proc files
285  */
286 static inline struct proc_dir_entry *
287 do_register(struct net *net, const char *name, void *data,
288 	    const struct proc_ops *proc_ops)
289 {
290 	struct sunrpc_net *sn;
291 
292 	dprintk("RPC:       registering /proc/net/rpc/%s\n", name);
293 	sn = net_generic(net, sunrpc_net_id);
294 	return proc_create_data(name, 0, sn->proc_net_rpc, proc_ops, data);
295 }
296 
297 struct proc_dir_entry *
298 rpc_proc_register(struct net *net, struct rpc_stat *statp)
299 {
300 	return do_register(net, statp->program->name, statp, &rpc_proc_ops);
301 }
302 EXPORT_SYMBOL_GPL(rpc_proc_register);
303 
304 void
305 rpc_proc_unregister(struct net *net, const char *name)
306 {
307 	struct sunrpc_net *sn;
308 
309 	sn = net_generic(net, sunrpc_net_id);
310 	remove_proc_entry(name, sn->proc_net_rpc);
311 }
312 EXPORT_SYMBOL_GPL(rpc_proc_unregister);
313 
314 struct proc_dir_entry *
315 svc_proc_register(struct net *net, struct svc_stat *statp, const struct proc_ops *proc_ops)
316 {
317 	return do_register(net, statp->program->pg_name, net, proc_ops);
318 }
319 EXPORT_SYMBOL_GPL(svc_proc_register);
320 
321 void
322 svc_proc_unregister(struct net *net, const char *name)
323 {
324 	struct sunrpc_net *sn;
325 
326 	sn = net_generic(net, sunrpc_net_id);
327 	remove_proc_entry(name, sn->proc_net_rpc);
328 }
329 EXPORT_SYMBOL_GPL(svc_proc_unregister);
330 
331 int rpc_proc_init(struct net *net)
332 {
333 	struct sunrpc_net *sn;
334 
335 	dprintk("RPC:       registering /proc/net/rpc\n");
336 	sn = net_generic(net, sunrpc_net_id);
337 	sn->proc_net_rpc = proc_mkdir("rpc", net->proc_net);
338 	if (sn->proc_net_rpc == NULL)
339 		return -ENOMEM;
340 
341 	return 0;
342 }
343 
344 void rpc_proc_exit(struct net *net)
345 {
346 	dprintk("RPC:       unregistering /proc/net/rpc\n");
347 	remove_proc_entry("rpc", net->proc_net);
348 }
349