xref: /linux/include/net/gen_stats.h (revision 52a9dab6d892763b2a8334a568bd4e2c1a6fde66)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_GEN_STATS_H
3 #define __NET_GEN_STATS_H
4 
5 #include <linux/gen_stats.h>
6 #include <linux/socket.h>
7 #include <linux/rtnetlink.h>
8 #include <linux/pkt_sched.h>
9 
10 /* Throughput stats.
11  * Must be initialized beforehand with gnet_stats_basic_sync_init().
12  *
13  * If no reads can ever occur parallel to writes (e.g. stack-allocated
14  * bstats), then the internal stat values can be written to and read
15  * from directly. Otherwise, use _bstats_set/update() for writes and
16  * gnet_stats_add_basic() for reads.
17  */
18 struct gnet_stats_basic_sync {
19 	u64_stats_t bytes;
20 	u64_stats_t packets;
21 	struct u64_stats_sync syncp;
22 } __aligned(2 * sizeof(u64));
23 
24 struct net_rate_estimator;
25 
26 struct gnet_dump {
27 	spinlock_t *      lock;
28 	struct sk_buff *  skb;
29 	struct nlattr *   tail;
30 
31 	/* Backward compatibility */
32 	int               compat_tc_stats;
33 	int               compat_xstats;
34 	int               padattr;
35 	void *            xstats;
36 	int               xstats_len;
37 	struct tc_stats   tc_stats;
38 };
39 
40 void gnet_stats_basic_sync_init(struct gnet_stats_basic_sync *b);
41 int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
42 			  struct gnet_dump *d, int padattr);
43 
44 int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
45 				 int tc_stats_type, int xstats_type,
46 				 spinlock_t *lock, struct gnet_dump *d,
47 				 int padattr);
48 
49 int gnet_stats_copy_basic(struct gnet_dump *d,
50 			  struct gnet_stats_basic_sync __percpu *cpu,
51 			  struct gnet_stats_basic_sync *b, bool running);
52 void gnet_stats_add_basic(struct gnet_stats_basic_sync *bstats,
53 			  struct gnet_stats_basic_sync __percpu *cpu,
54 			  struct gnet_stats_basic_sync *b, bool running);
55 int gnet_stats_copy_basic_hw(struct gnet_dump *d,
56 			     struct gnet_stats_basic_sync __percpu *cpu,
57 			     struct gnet_stats_basic_sync *b, bool running);
58 int gnet_stats_copy_rate_est(struct gnet_dump *d,
59 			     struct net_rate_estimator __rcu **ptr);
60 int gnet_stats_copy_queue(struct gnet_dump *d,
61 			  struct gnet_stats_queue __percpu *cpu_q,
62 			  struct gnet_stats_queue *q, __u32 qlen);
63 void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
64 			  const struct gnet_stats_queue __percpu *cpu_q,
65 			  const struct gnet_stats_queue *q);
66 int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
67 
68 int gnet_stats_finish_copy(struct gnet_dump *d);
69 
70 int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
71 		      struct gnet_stats_basic_sync __percpu *cpu_bstats,
72 		      struct net_rate_estimator __rcu **rate_est,
73 		      spinlock_t *lock,
74 		      bool running, struct nlattr *opt);
75 void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
76 int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
77 			  struct gnet_stats_basic_sync __percpu *cpu_bstats,
78 			  struct net_rate_estimator __rcu **ptr,
79 			  spinlock_t *lock,
80 			  bool running, struct nlattr *opt);
81 bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
82 bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
83 			struct gnet_stats_rate_est64 *sample);
84 #endif
85