xref: /linux/net/core/net-procfs.c (revision 3f0a50f345f78183f6e9b39c2f45ca5dcaa511ca)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/netdevice.h>
3 #include <linux/proc_fs.h>
4 #include <linux/seq_file.h>
5 #include <net/wext.h>
6 
7 #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
8 
9 #define get_bucket(x) ((x) >> BUCKET_SPACE)
10 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
11 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
12 
13 static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
14 {
15 	struct net *net = seq_file_net(seq);
16 	struct net_device *dev;
17 	struct hlist_head *h;
18 	unsigned int count = 0, offset = get_offset(*pos);
19 
20 	h = &net->dev_index_head[get_bucket(*pos)];
21 	hlist_for_each_entry_rcu(dev, h, index_hlist) {
22 		if (++count == offset)
23 			return dev;
24 	}
25 
26 	return NULL;
27 }
28 
29 static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
30 {
31 	struct net_device *dev;
32 	unsigned int bucket;
33 
34 	do {
35 		dev = dev_from_same_bucket(seq, pos);
36 		if (dev)
37 			return dev;
38 
39 		bucket = get_bucket(*pos) + 1;
40 		*pos = set_bucket_offset(bucket, 1);
41 	} while (bucket < NETDEV_HASHENTRIES);
42 
43 	return NULL;
44 }
45 
46 /*
47  *	This is invoked by the /proc filesystem handler to display a device
48  *	in detail.
49  */
50 static void *dev_seq_start(struct seq_file *seq, loff_t *pos)
51 	__acquires(RCU)
52 {
53 	rcu_read_lock();
54 	if (!*pos)
55 		return SEQ_START_TOKEN;
56 
57 	if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
58 		return NULL;
59 
60 	return dev_from_bucket(seq, pos);
61 }
62 
63 static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
64 {
65 	++*pos;
66 	return dev_from_bucket(seq, pos);
67 }
68 
69 static void dev_seq_stop(struct seq_file *seq, void *v)
70 	__releases(RCU)
71 {
72 	rcu_read_unlock();
73 }
74 
75 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
76 {
77 	struct rtnl_link_stats64 temp;
78 	const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
79 
80 	seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
81 		   "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
82 		   dev->name, stats->rx_bytes, stats->rx_packets,
83 		   stats->rx_errors,
84 		   stats->rx_dropped + stats->rx_missed_errors,
85 		   stats->rx_fifo_errors,
86 		   stats->rx_length_errors + stats->rx_over_errors +
87 		    stats->rx_crc_errors + stats->rx_frame_errors,
88 		   stats->rx_compressed, stats->multicast,
89 		   stats->tx_bytes, stats->tx_packets,
90 		   stats->tx_errors, stats->tx_dropped,
91 		   stats->tx_fifo_errors, stats->collisions,
92 		   stats->tx_carrier_errors +
93 		    stats->tx_aborted_errors +
94 		    stats->tx_window_errors +
95 		    stats->tx_heartbeat_errors,
96 		   stats->tx_compressed);
97 }
98 
99 /*
100  *	Called from the PROCfs module. This now uses the new arbitrary sized
101  *	/proc/net interface to create /proc/net/dev
102  */
103 static int dev_seq_show(struct seq_file *seq, void *v)
104 {
105 	if (v == SEQ_START_TOKEN)
106 		seq_puts(seq, "Inter-|   Receive                            "
107 			      "                    |  Transmit\n"
108 			      " face |bytes    packets errs drop fifo frame "
109 			      "compressed multicast|bytes    packets errs "
110 			      "drop fifo colls carrier compressed\n");
111 	else
112 		dev_seq_printf_stats(seq, v);
113 	return 0;
114 }
115 
116 static u32 softnet_backlog_len(struct softnet_data *sd)
117 {
118 	return skb_queue_len_lockless(&sd->input_pkt_queue) +
119 	       skb_queue_len_lockless(&sd->process_queue);
120 }
121 
122 static struct softnet_data *softnet_get_online(loff_t *pos)
123 {
124 	struct softnet_data *sd = NULL;
125 
126 	while (*pos < nr_cpu_ids)
127 		if (cpu_online(*pos)) {
128 			sd = &per_cpu(softnet_data, *pos);
129 			break;
130 		} else
131 			++*pos;
132 	return sd;
133 }
134 
135 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
136 {
137 	return softnet_get_online(pos);
138 }
139 
140 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
141 {
142 	++*pos;
143 	return softnet_get_online(pos);
144 }
145 
146 static void softnet_seq_stop(struct seq_file *seq, void *v)
147 {
148 }
149 
150 static int softnet_seq_show(struct seq_file *seq, void *v)
151 {
152 	struct softnet_data *sd = v;
153 	unsigned int flow_limit_count = 0;
154 
155 #ifdef CONFIG_NET_FLOW_LIMIT
156 	struct sd_flow_limit *fl;
157 
158 	rcu_read_lock();
159 	fl = rcu_dereference(sd->flow_limit);
160 	if (fl)
161 		flow_limit_count = fl->count;
162 	rcu_read_unlock();
163 #endif
164 
165 	/* the index is the CPU id owing this sd. Since offline CPUs are not
166 	 * displayed, it would be othrwise not trivial for the user-space
167 	 * mapping the data a specific CPU
168 	 */
169 	seq_printf(seq,
170 		   "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
171 		   sd->processed, sd->dropped, sd->time_squeeze, 0,
172 		   0, 0, 0, 0, /* was fastroute */
173 		   0,	/* was cpu_collision */
174 		   sd->received_rps, flow_limit_count,
175 		   softnet_backlog_len(sd), (int)seq->index);
176 	return 0;
177 }
178 
179 static const struct seq_operations dev_seq_ops = {
180 	.start = dev_seq_start,
181 	.next  = dev_seq_next,
182 	.stop  = dev_seq_stop,
183 	.show  = dev_seq_show,
184 };
185 
186 static const struct seq_operations softnet_seq_ops = {
187 	.start = softnet_seq_start,
188 	.next  = softnet_seq_next,
189 	.stop  = softnet_seq_stop,
190 	.show  = softnet_seq_show,
191 };
192 
193 static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
194 {
195 	struct list_head *ptype_list = NULL;
196 	struct packet_type *pt = NULL;
197 	struct net_device *dev;
198 	loff_t i = 0;
199 	int t;
200 
201 	for_each_netdev_rcu(seq_file_net(seq), dev) {
202 		ptype_list = &dev->ptype_all;
203 		list_for_each_entry_rcu(pt, ptype_list, list) {
204 			if (i == pos)
205 				return pt;
206 			++i;
207 		}
208 	}
209 
210 	list_for_each_entry_rcu(pt, &ptype_all, list) {
211 		if (i == pos)
212 			return pt;
213 		++i;
214 	}
215 
216 	for (t = 0; t < PTYPE_HASH_SIZE; t++) {
217 		list_for_each_entry_rcu(pt, &ptype_base[t], list) {
218 			if (i == pos)
219 				return pt;
220 			++i;
221 		}
222 	}
223 	return NULL;
224 }
225 
226 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
227 	__acquires(RCU)
228 {
229 	rcu_read_lock();
230 	return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
231 }
232 
233 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
234 {
235 	struct net_device *dev;
236 	struct packet_type *pt;
237 	struct list_head *nxt;
238 	int hash;
239 
240 	++*pos;
241 	if (v == SEQ_START_TOKEN)
242 		return ptype_get_idx(seq, 0);
243 
244 	pt = v;
245 	nxt = pt->list.next;
246 	if (pt->dev) {
247 		if (nxt != &pt->dev->ptype_all)
248 			goto found;
249 
250 		dev = pt->dev;
251 		for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
252 			if (!list_empty(&dev->ptype_all)) {
253 				nxt = dev->ptype_all.next;
254 				goto found;
255 			}
256 		}
257 
258 		nxt = ptype_all.next;
259 		goto ptype_all;
260 	}
261 
262 	if (pt->type == htons(ETH_P_ALL)) {
263 ptype_all:
264 		if (nxt != &ptype_all)
265 			goto found;
266 		hash = 0;
267 		nxt = ptype_base[0].next;
268 	} else
269 		hash = ntohs(pt->type) & PTYPE_HASH_MASK;
270 
271 	while (nxt == &ptype_base[hash]) {
272 		if (++hash >= PTYPE_HASH_SIZE)
273 			return NULL;
274 		nxt = ptype_base[hash].next;
275 	}
276 found:
277 	return list_entry(nxt, struct packet_type, list);
278 }
279 
280 static void ptype_seq_stop(struct seq_file *seq, void *v)
281 	__releases(RCU)
282 {
283 	rcu_read_unlock();
284 }
285 
286 static int ptype_seq_show(struct seq_file *seq, void *v)
287 {
288 	struct packet_type *pt = v;
289 
290 	if (v == SEQ_START_TOKEN)
291 		seq_puts(seq, "Type Device      Function\n");
292 	else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
293 		 (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
294 		if (pt->type == htons(ETH_P_ALL))
295 			seq_puts(seq, "ALL ");
296 		else
297 			seq_printf(seq, "%04x", ntohs(pt->type));
298 
299 		seq_printf(seq, " %-8s %ps\n",
300 			   pt->dev ? pt->dev->name : "", pt->func);
301 	}
302 
303 	return 0;
304 }
305 
306 static const struct seq_operations ptype_seq_ops = {
307 	.start = ptype_seq_start,
308 	.next  = ptype_seq_next,
309 	.stop  = ptype_seq_stop,
310 	.show  = ptype_seq_show,
311 };
312 
313 static int __net_init dev_proc_net_init(struct net *net)
314 {
315 	int rc = -ENOMEM;
316 
317 	if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops,
318 			sizeof(struct seq_net_private)))
319 		goto out;
320 	if (!proc_create_seq("softnet_stat", 0444, net->proc_net,
321 			 &softnet_seq_ops))
322 		goto out_dev;
323 	if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
324 			sizeof(struct seq_net_private)))
325 		goto out_softnet;
326 
327 	if (wext_proc_init(net))
328 		goto out_ptype;
329 	rc = 0;
330 out:
331 	return rc;
332 out_ptype:
333 	remove_proc_entry("ptype", net->proc_net);
334 out_softnet:
335 	remove_proc_entry("softnet_stat", net->proc_net);
336 out_dev:
337 	remove_proc_entry("dev", net->proc_net);
338 	goto out;
339 }
340 
341 static void __net_exit dev_proc_net_exit(struct net *net)
342 {
343 	wext_proc_exit(net);
344 
345 	remove_proc_entry("ptype", net->proc_net);
346 	remove_proc_entry("softnet_stat", net->proc_net);
347 	remove_proc_entry("dev", net->proc_net);
348 }
349 
350 static struct pernet_operations __net_initdata dev_proc_ops = {
351 	.init = dev_proc_net_init,
352 	.exit = dev_proc_net_exit,
353 };
354 
355 static int dev_mc_seq_show(struct seq_file *seq, void *v)
356 {
357 	struct netdev_hw_addr *ha;
358 	struct net_device *dev = v;
359 
360 	if (v == SEQ_START_TOKEN)
361 		return 0;
362 
363 	netif_addr_lock_bh(dev);
364 	netdev_for_each_mc_addr(ha, dev) {
365 		seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n",
366 			   dev->ifindex, dev->name,
367 			   ha->refcount, ha->global_use,
368 			   (int)dev->addr_len, ha->addr);
369 	}
370 	netif_addr_unlock_bh(dev);
371 	return 0;
372 }
373 
374 static const struct seq_operations dev_mc_seq_ops = {
375 	.start = dev_seq_start,
376 	.next  = dev_seq_next,
377 	.stop  = dev_seq_stop,
378 	.show  = dev_mc_seq_show,
379 };
380 
381 static int __net_init dev_mc_net_init(struct net *net)
382 {
383 	if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops,
384 			sizeof(struct seq_net_private)))
385 		return -ENOMEM;
386 	return 0;
387 }
388 
389 static void __net_exit dev_mc_net_exit(struct net *net)
390 {
391 	remove_proc_entry("dev_mcast", net->proc_net);
392 }
393 
394 static struct pernet_operations __net_initdata dev_mc_net_ops = {
395 	.init = dev_mc_net_init,
396 	.exit = dev_mc_net_exit,
397 };
398 
399 int __init dev_proc_init(void)
400 {
401 	int ret = register_pernet_subsys(&dev_proc_ops);
402 	if (!ret)
403 		return register_pernet_subsys(&dev_mc_net_ops);
404 	return ret;
405 }
406