1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/netdevice.h> 3 #include <linux/proc_fs.h> 4 #include <linux/seq_file.h> 5 #include <net/wext.h> 6 #include <net/hotdata.h> 7 8 #include "dev.h" 9 10 static void *dev_seq_from_index(struct seq_file *seq, loff_t *pos) 11 { 12 unsigned long ifindex = *pos; 13 struct net_device *dev; 14 15 for_each_netdev_dump(seq_file_net(seq), dev, ifindex) { 16 *pos = dev->ifindex; 17 return dev; 18 } 19 return NULL; 20 } 21 22 static void *dev_seq_start(struct seq_file *seq, loff_t *pos) 23 __acquires(RCU) 24 { 25 rcu_read_lock(); 26 if (!*pos) 27 return SEQ_START_TOKEN; 28 29 return dev_seq_from_index(seq, pos); 30 } 31 32 static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 33 { 34 ++*pos; 35 return dev_seq_from_index(seq, pos); 36 } 37 38 static void dev_seq_stop(struct seq_file *seq, void *v) 39 __releases(RCU) 40 { 41 rcu_read_unlock(); 42 } 43 44 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) 45 { 46 struct rtnl_link_stats64 temp; 47 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); 48 49 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " 50 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", 51 dev->name, stats->rx_bytes, stats->rx_packets, 52 stats->rx_errors, 53 stats->rx_dropped + stats->rx_missed_errors, 54 stats->rx_fifo_errors, 55 stats->rx_length_errors + stats->rx_over_errors + 56 stats->rx_crc_errors + stats->rx_frame_errors, 57 stats->rx_compressed, stats->multicast, 58 stats->tx_bytes, stats->tx_packets, 59 stats->tx_errors, stats->tx_dropped, 60 stats->tx_fifo_errors, stats->collisions, 61 stats->tx_carrier_errors + 62 stats->tx_aborted_errors + 63 stats->tx_window_errors + 64 stats->tx_heartbeat_errors, 65 stats->tx_compressed); 66 } 67 68 /* 69 * Called from the PROCfs module. This now uses the new arbitrary sized 70 * /proc/net interface to create /proc/net/dev 71 */ 72 static int dev_seq_show(struct seq_file *seq, void *v) 73 { 74 if (v == SEQ_START_TOKEN) 75 seq_puts(seq, "Inter-| Receive " 76 " | Transmit\n" 77 " face |bytes packets errs drop fifo frame " 78 "compressed multicast|bytes packets errs " 79 "drop fifo colls carrier compressed\n"); 80 else 81 dev_seq_printf_stats(seq, v); 82 return 0; 83 } 84 85 static u32 softnet_input_pkt_queue_len(struct softnet_data *sd) 86 { 87 return skb_queue_len_lockless(&sd->input_pkt_queue); 88 } 89 90 static u32 softnet_process_queue_len(struct softnet_data *sd) 91 { 92 return skb_queue_len_lockless(&sd->process_queue); 93 } 94 95 static struct softnet_data *softnet_get_online(loff_t *pos) 96 { 97 struct softnet_data *sd = NULL; 98 99 while (*pos < nr_cpu_ids) 100 if (cpu_online(*pos)) { 101 sd = &per_cpu(softnet_data, *pos); 102 break; 103 } else 104 ++*pos; 105 return sd; 106 } 107 108 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) 109 { 110 return softnet_get_online(pos); 111 } 112 113 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 114 { 115 ++*pos; 116 return softnet_get_online(pos); 117 } 118 119 static void softnet_seq_stop(struct seq_file *seq, void *v) 120 { 121 } 122 123 static int softnet_seq_show(struct seq_file *seq, void *v) 124 { 125 struct softnet_data *sd = v; 126 u32 input_qlen = softnet_input_pkt_queue_len(sd); 127 u32 process_qlen = softnet_process_queue_len(sd); 128 unsigned int flow_limit_count = 0; 129 130 #ifdef CONFIG_NET_FLOW_LIMIT 131 struct sd_flow_limit *fl; 132 133 rcu_read_lock(); 134 fl = rcu_dereference(sd->flow_limit); 135 if (fl) 136 flow_limit_count = fl->count; 137 rcu_read_unlock(); 138 #endif 139 140 /* the index is the CPU id owing this sd. Since offline CPUs are not 141 * displayed, it would be othrwise not trivial for the user-space 142 * mapping the data a specific CPU 143 */ 144 seq_printf(seq, 145 "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x " 146 "%08x %08x\n", 147 sd->processed, atomic_read(&sd->dropped), 148 sd->time_squeeze, 0, 149 0, 0, 0, 0, /* was fastroute */ 150 0, /* was cpu_collision */ 151 sd->received_rps, flow_limit_count, 152 input_qlen + process_qlen, (int)seq->index, 153 input_qlen, process_qlen); 154 return 0; 155 } 156 157 static const struct seq_operations dev_seq_ops = { 158 .start = dev_seq_start, 159 .next = dev_seq_next, 160 .stop = dev_seq_stop, 161 .show = dev_seq_show, 162 }; 163 164 static const struct seq_operations softnet_seq_ops = { 165 .start = softnet_seq_start, 166 .next = softnet_seq_next, 167 .stop = softnet_seq_stop, 168 .show = softnet_seq_show, 169 }; 170 171 static void *ptype_get_idx(struct seq_file *seq, loff_t pos) 172 { 173 struct list_head *ptype_list = NULL; 174 struct packet_type *pt = NULL; 175 struct net_device *dev; 176 loff_t i = 0; 177 int t; 178 179 for_each_netdev_rcu(seq_file_net(seq), dev) { 180 ptype_list = &dev->ptype_all; 181 list_for_each_entry_rcu(pt, ptype_list, list) { 182 if (i == pos) 183 return pt; 184 ++i; 185 } 186 } 187 188 list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_all, list) { 189 if (i == pos) 190 return pt; 191 ++i; 192 } 193 194 list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_specific, list) { 195 if (i == pos) 196 return pt; 197 ++i; 198 } 199 200 for (t = 0; t < PTYPE_HASH_SIZE; t++) { 201 list_for_each_entry_rcu(pt, &ptype_base[t], list) { 202 if (i == pos) 203 return pt; 204 ++i; 205 } 206 } 207 return NULL; 208 } 209 210 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) 211 __acquires(RCU) 212 { 213 rcu_read_lock(); 214 return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 215 } 216 217 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) 218 { 219 struct net *net = seq_file_net(seq); 220 struct net_device *dev; 221 struct packet_type *pt; 222 struct list_head *nxt; 223 int hash; 224 225 ++*pos; 226 if (v == SEQ_START_TOKEN) 227 return ptype_get_idx(seq, 0); 228 229 pt = v; 230 nxt = pt->list.next; 231 if (pt->dev) { 232 if (nxt != &pt->dev->ptype_all) 233 goto found; 234 235 dev = pt->dev; 236 for_each_netdev_continue_rcu(seq_file_net(seq), dev) { 237 if (!list_empty(&dev->ptype_all)) { 238 nxt = dev->ptype_all.next; 239 goto found; 240 } 241 } 242 nxt = net->ptype_all.next; 243 goto net_ptype_all; 244 } 245 246 if (pt->af_packet_net) { 247 net_ptype_all: 248 if (nxt != &net->ptype_all && nxt != &net->ptype_specific) 249 goto found; 250 251 if (nxt == &net->ptype_all) { 252 /* continue with ->ptype_specific if it's not empty */ 253 nxt = net->ptype_specific.next; 254 if (nxt != &net->ptype_specific) 255 goto found; 256 } 257 258 hash = 0; 259 nxt = ptype_base[0].next; 260 } else 261 hash = ntohs(pt->type) & PTYPE_HASH_MASK; 262 263 while (nxt == &ptype_base[hash]) { 264 if (++hash >= PTYPE_HASH_SIZE) 265 return NULL; 266 nxt = ptype_base[hash].next; 267 } 268 found: 269 return list_entry(nxt, struct packet_type, list); 270 } 271 272 static void ptype_seq_stop(struct seq_file *seq, void *v) 273 __releases(RCU) 274 { 275 rcu_read_unlock(); 276 } 277 278 static int ptype_seq_show(struct seq_file *seq, void *v) 279 { 280 struct packet_type *pt = v; 281 282 if (v == SEQ_START_TOKEN) 283 seq_puts(seq, "Type Device Function\n"); 284 else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) && 285 (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) { 286 if (pt->type == htons(ETH_P_ALL)) 287 seq_puts(seq, "ALL "); 288 else 289 seq_printf(seq, "%04x", ntohs(pt->type)); 290 291 seq_printf(seq, " %-8s %ps\n", 292 pt->dev ? pt->dev->name : "", pt->func); 293 } 294 295 return 0; 296 } 297 298 static const struct seq_operations ptype_seq_ops = { 299 .start = ptype_seq_start, 300 .next = ptype_seq_next, 301 .stop = ptype_seq_stop, 302 .show = ptype_seq_show, 303 }; 304 305 static int __net_init dev_proc_net_init(struct net *net) 306 { 307 int rc = -ENOMEM; 308 309 if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops, 310 sizeof(struct seq_net_private))) 311 goto out; 312 if (!proc_create_seq("softnet_stat", 0444, net->proc_net, 313 &softnet_seq_ops)) 314 goto out_dev; 315 if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops, 316 sizeof(struct seq_net_private))) 317 goto out_softnet; 318 319 if (wext_proc_init(net)) 320 goto out_ptype; 321 rc = 0; 322 out: 323 return rc; 324 out_ptype: 325 remove_proc_entry("ptype", net->proc_net); 326 out_softnet: 327 remove_proc_entry("softnet_stat", net->proc_net); 328 out_dev: 329 remove_proc_entry("dev", net->proc_net); 330 goto out; 331 } 332 333 static void __net_exit dev_proc_net_exit(struct net *net) 334 { 335 wext_proc_exit(net); 336 337 remove_proc_entry("ptype", net->proc_net); 338 remove_proc_entry("softnet_stat", net->proc_net); 339 remove_proc_entry("dev", net->proc_net); 340 } 341 342 static struct pernet_operations __net_initdata dev_proc_ops = { 343 .init = dev_proc_net_init, 344 .exit = dev_proc_net_exit, 345 }; 346 347 static int dev_mc_seq_show(struct seq_file *seq, void *v) 348 { 349 struct netdev_hw_addr *ha; 350 struct net_device *dev = v; 351 352 if (v == SEQ_START_TOKEN) 353 return 0; 354 355 netif_addr_lock_bh(dev); 356 netdev_for_each_mc_addr(ha, dev) { 357 seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n", 358 dev->ifindex, dev->name, 359 ha->refcount, ha->global_use, 360 (int)dev->addr_len, ha->addr); 361 } 362 netif_addr_unlock_bh(dev); 363 return 0; 364 } 365 366 static const struct seq_operations dev_mc_seq_ops = { 367 .start = dev_seq_start, 368 .next = dev_seq_next, 369 .stop = dev_seq_stop, 370 .show = dev_mc_seq_show, 371 }; 372 373 static int __net_init dev_mc_net_init(struct net *net) 374 { 375 if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops, 376 sizeof(struct seq_net_private))) 377 return -ENOMEM; 378 return 0; 379 } 380 381 static void __net_exit dev_mc_net_exit(struct net *net) 382 { 383 remove_proc_entry("dev_mcast", net->proc_net); 384 } 385 386 static struct pernet_operations __net_initdata dev_mc_net_ops = { 387 .init = dev_mc_net_init, 388 .exit = dev_mc_net_exit, 389 }; 390 391 int __init dev_proc_init(void) 392 { 393 int ret = register_pernet_subsys(&dev_proc_ops); 394 if (!ret) 395 return register_pernet_subsys(&dev_mc_net_ops); 396 return ret; 397 } 398