1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/netdevice.h> 3 #include <linux/proc_fs.h> 4 #include <linux/seq_file.h> 5 #include <net/wext.h> 6 #include <net/hotdata.h> 7 8 #include "dev.h" 9 10 static void *dev_seq_from_index(struct seq_file *seq, loff_t *pos) 11 { 12 unsigned long ifindex = *pos; 13 struct net_device *dev; 14 15 for_each_netdev_dump(seq_file_net(seq), dev, ifindex) { 16 *pos = dev->ifindex; 17 return dev; 18 } 19 return NULL; 20 } 21 22 static void *dev_seq_start(struct seq_file *seq, loff_t *pos) 23 __acquires(RCU) 24 { 25 rcu_read_lock(); 26 if (!*pos) 27 return SEQ_START_TOKEN; 28 29 return dev_seq_from_index(seq, pos); 30 } 31 32 static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) 33 { 34 ++*pos; 35 return dev_seq_from_index(seq, pos); 36 } 37 38 static void dev_seq_stop(struct seq_file *seq, void *v) 39 __releases(RCU) 40 { 41 rcu_read_unlock(); 42 } 43 44 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) 45 { 46 struct rtnl_link_stats64 temp; 47 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); 48 49 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " 50 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", 51 dev->name, stats->rx_bytes, stats->rx_packets, 52 stats->rx_errors, 53 stats->rx_dropped + stats->rx_missed_errors, 54 stats->rx_fifo_errors, 55 stats->rx_length_errors + stats->rx_over_errors + 56 stats->rx_crc_errors + stats->rx_frame_errors, 57 stats->rx_compressed, stats->multicast, 58 stats->tx_bytes, stats->tx_packets, 59 stats->tx_errors, stats->tx_dropped, 60 stats->tx_fifo_errors, stats->collisions, 61 stats->tx_carrier_errors + 62 stats->tx_aborted_errors + 63 stats->tx_window_errors + 64 stats->tx_heartbeat_errors, 65 stats->tx_compressed); 66 } 67 68 /* 69 * Called from the PROCfs module. This now uses the new arbitrary sized 70 * /proc/net interface to create /proc/net/dev 71 */ 72 static int dev_seq_show(struct seq_file *seq, void *v) 73 { 74 if (v == SEQ_START_TOKEN) 75 seq_puts(seq, "Inter-| Receive " 76 " | Transmit\n" 77 " face |bytes packets errs drop fifo frame " 78 "compressed multicast|bytes packets errs " 79 "drop fifo colls carrier compressed\n"); 80 else 81 dev_seq_printf_stats(seq, v); 82 return 0; 83 } 84 85 static u32 softnet_input_pkt_queue_len(struct softnet_data *sd) 86 { 87 return skb_queue_len_lockless(&sd->input_pkt_queue); 88 } 89 90 static u32 softnet_process_queue_len(struct softnet_data *sd) 91 { 92 return skb_queue_len_lockless(&sd->process_queue); 93 } 94 95 static struct softnet_data *softnet_get_online(loff_t *pos) 96 { 97 struct softnet_data *sd = NULL; 98 99 while (*pos < nr_cpu_ids) 100 if (cpu_online(*pos)) { 101 sd = &per_cpu(softnet_data, *pos); 102 break; 103 } else 104 ++*pos; 105 return sd; 106 } 107 108 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) 109 { 110 return softnet_get_online(pos); 111 } 112 113 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos) 114 { 115 ++*pos; 116 return softnet_get_online(pos); 117 } 118 119 static void softnet_seq_stop(struct seq_file *seq, void *v) 120 { 121 } 122 123 static int softnet_seq_show(struct seq_file *seq, void *v) 124 { 125 struct softnet_data *sd = v; 126 u32 input_qlen = softnet_input_pkt_queue_len(sd); 127 u32 process_qlen = softnet_process_queue_len(sd); 128 unsigned int flow_limit_count = 0; 129 130 #ifdef CONFIG_NET_FLOW_LIMIT 131 struct sd_flow_limit *fl; 132 133 rcu_read_lock(); 134 fl = rcu_dereference(sd->flow_limit); 135 /* Pairs with WRITE_ONCE() in skb_flow_limit() */ 136 if (fl) 137 flow_limit_count = READ_ONCE(fl->count); 138 rcu_read_unlock(); 139 #endif 140 141 /* the index is the CPU id owing this sd. Since offline CPUs are not 142 * displayed, it would be othrwise not trivial for the user-space 143 * mapping the data a specific CPU 144 */ 145 seq_printf(seq, 146 "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x " 147 "%08x %08x\n", 148 READ_ONCE(sd->processed), 149 numa_drop_read(&sd->drop_counters), 150 READ_ONCE(sd->time_squeeze), 0, 151 0, 0, 0, 0, /* was fastroute */ 152 0, /* was cpu_collision */ 153 READ_ONCE(sd->received_rps), flow_limit_count, 154 input_qlen + process_qlen, (int)seq->index, 155 input_qlen, process_qlen); 156 return 0; 157 } 158 159 static const struct seq_operations dev_seq_ops = { 160 .start = dev_seq_start, 161 .next = dev_seq_next, 162 .stop = dev_seq_stop, 163 .show = dev_seq_show, 164 }; 165 166 static const struct seq_operations softnet_seq_ops = { 167 .start = softnet_seq_start, 168 .next = softnet_seq_next, 169 .stop = softnet_seq_stop, 170 .show = softnet_seq_show, 171 }; 172 173 struct ptype_iter_state { 174 struct seq_net_private p; 175 struct net_device *dev; 176 }; 177 178 static void *ptype_get_idx(struct seq_file *seq, loff_t pos) 179 { 180 struct ptype_iter_state *iter = seq->private; 181 struct list_head *ptype_list = NULL; 182 struct packet_type *pt = NULL; 183 struct net_device *dev; 184 loff_t i = 0; 185 int t; 186 187 for_each_netdev_rcu(seq_file_net(seq), dev) { 188 ptype_list = &dev->ptype_all; 189 list_for_each_entry_rcu(pt, ptype_list, list) { 190 if (i == pos) { 191 iter->dev = dev; 192 return pt; 193 } 194 ++i; 195 } 196 } 197 198 iter->dev = NULL; 199 200 list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_all, list) { 201 if (i == pos) 202 return pt; 203 ++i; 204 } 205 206 list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_specific, list) { 207 if (i == pos) 208 return pt; 209 ++i; 210 } 211 212 for (t = 0; t < PTYPE_HASH_SIZE; t++) { 213 list_for_each_entry_rcu(pt, &ptype_base[t], list) { 214 if (i == pos) 215 return pt; 216 ++i; 217 } 218 } 219 return NULL; 220 } 221 222 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) 223 __acquires(RCU) 224 { 225 rcu_read_lock(); 226 return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 227 } 228 229 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) 230 { 231 struct ptype_iter_state *iter = seq->private; 232 struct net *net = seq_file_net(seq); 233 struct net_device *dev; 234 struct packet_type *pt; 235 struct list_head *nxt; 236 int hash; 237 238 ++*pos; 239 if (v == SEQ_START_TOKEN) 240 return ptype_get_idx(seq, 0); 241 242 pt = v; 243 nxt = READ_ONCE(pt->list.next); 244 dev = iter->dev; 245 if (dev) { 246 if (nxt != &dev->ptype_all) 247 goto found; 248 249 for_each_netdev_continue_rcu(seq_file_net(seq), dev) { 250 nxt = READ_ONCE(dev->ptype_all.next); 251 if (nxt != &dev->ptype_all) { 252 iter->dev = dev; 253 goto found; 254 } 255 } 256 iter->dev = NULL; 257 nxt = READ_ONCE(net->ptype_all.next); 258 goto net_ptype_all; 259 } 260 261 if (pt->af_packet_net) { 262 net_ptype_all: 263 if (nxt != &net->ptype_all && nxt != &net->ptype_specific) 264 goto found; 265 266 if (nxt == &net->ptype_all) { 267 /* continue with ->ptype_specific if it's not empty */ 268 nxt = READ_ONCE(net->ptype_specific.next); 269 if (nxt != &net->ptype_specific) 270 goto found; 271 } 272 273 hash = 0; 274 nxt = READ_ONCE(ptype_base[0].next); 275 } else 276 hash = ntohs(pt->type) & PTYPE_HASH_MASK; 277 278 while (nxt == &ptype_base[hash]) { 279 if (++hash >= PTYPE_HASH_SIZE) 280 return NULL; 281 nxt = READ_ONCE(ptype_base[hash].next); 282 } 283 found: 284 return list_entry(nxt, struct packet_type, list); 285 } 286 287 static void ptype_seq_stop(struct seq_file *seq, void *v) 288 __releases(RCU) 289 { 290 rcu_read_unlock(); 291 } 292 293 static int ptype_seq_show(struct seq_file *seq, void *v) 294 { 295 struct ptype_iter_state *iter = seq->private; 296 struct packet_type *pt = v; 297 struct net_device *dev; 298 299 if (v == SEQ_START_TOKEN) { 300 seq_puts(seq, "Type Device Function\n"); 301 return 0; 302 } 303 dev = iter->dev; 304 if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) && 305 (!dev || net_eq(dev_net(dev), seq_file_net(seq)))) { 306 if (pt->type == htons(ETH_P_ALL)) 307 seq_puts(seq, "ALL "); 308 else 309 seq_printf(seq, "%04x", ntohs(pt->type)); 310 311 seq_printf(seq, " %-8s %ps\n", 312 dev ? dev->name : "", pt->func); 313 } 314 315 return 0; 316 } 317 318 static const struct seq_operations ptype_seq_ops = { 319 .start = ptype_seq_start, 320 .next = ptype_seq_next, 321 .stop = ptype_seq_stop, 322 .show = ptype_seq_show, 323 }; 324 325 static int __net_init dev_proc_net_init(struct net *net) 326 { 327 int rc = -ENOMEM; 328 329 if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops, 330 sizeof(struct seq_net_private))) 331 goto out; 332 if (!proc_create_seq("softnet_stat", 0444, net->proc_net, 333 &softnet_seq_ops)) 334 goto out_dev; 335 if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops, 336 sizeof(struct ptype_iter_state))) 337 goto out_softnet; 338 339 if (wext_proc_init(net)) 340 goto out_ptype; 341 rc = 0; 342 out: 343 return rc; 344 out_ptype: 345 remove_proc_entry("ptype", net->proc_net); 346 out_softnet: 347 remove_proc_entry("softnet_stat", net->proc_net); 348 out_dev: 349 remove_proc_entry("dev", net->proc_net); 350 goto out; 351 } 352 353 static void __net_exit dev_proc_net_exit(struct net *net) 354 { 355 wext_proc_exit(net); 356 357 remove_proc_entry("ptype", net->proc_net); 358 remove_proc_entry("softnet_stat", net->proc_net); 359 remove_proc_entry("dev", net->proc_net); 360 } 361 362 static struct pernet_operations __net_initdata dev_proc_ops = { 363 .init = dev_proc_net_init, 364 .exit = dev_proc_net_exit, 365 }; 366 367 static int dev_mc_seq_show(struct seq_file *seq, void *v) 368 { 369 struct netdev_hw_addr *ha; 370 struct net_device *dev = v; 371 372 if (v == SEQ_START_TOKEN) 373 return 0; 374 375 netif_addr_lock_bh(dev); 376 netdev_for_each_mc_addr(ha, dev) { 377 seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n", 378 dev->ifindex, dev->name, 379 ha->refcount, ha->global_use, 380 (int)dev->addr_len, ha->addr); 381 } 382 netif_addr_unlock_bh(dev); 383 return 0; 384 } 385 386 static const struct seq_operations dev_mc_seq_ops = { 387 .start = dev_seq_start, 388 .next = dev_seq_next, 389 .stop = dev_seq_stop, 390 .show = dev_mc_seq_show, 391 }; 392 393 static int __net_init dev_mc_net_init(struct net *net) 394 { 395 if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops, 396 sizeof(struct seq_net_private))) 397 return -ENOMEM; 398 return 0; 399 } 400 401 static void __net_exit dev_mc_net_exit(struct net *net) 402 { 403 remove_proc_entry("dev_mcast", net->proc_net); 404 } 405 406 static struct pernet_operations __net_initdata dev_mc_net_ops = { 407 .init = dev_mc_net_init, 408 .exit = dev_mc_net_exit, 409 }; 410 411 int __init dev_proc_init(void) 412 { 413 int ret = register_pernet_subsys(&dev_proc_ops); 414 if (!ret) 415 return register_pernet_subsys(&dev_mc_net_ops); 416 return ret; 417 } 418