1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Authors: 4 * Copyright 2001, 2002 by Robert Olsson <robert.olsson@its.uu.se> 5 * Uppsala University and 6 * Swedish University of Agricultural Sciences 7 * 8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 9 * Ben Greear <greearb@candelatech.com> 10 * Jens Låås <jens.laas@data.slu.se> 11 * 12 * A tool for loading the network with preconfigurated packets. 13 * The tool is implemented as a linux module. Parameters are output 14 * device, delay (to hard_xmit), number of packets, and whether 15 * to use multiple SKBs or just the same one. 16 * pktgen uses the installed interface's output routine. 17 * 18 * Additional hacking by: 19 * 20 * Jens.Laas@data.slu.se 21 * Improved by ANK. 010120. 22 * Improved by ANK even more. 010212. 23 * MAC address typo fixed. 010417 --ro 24 * Integrated. 020301 --DaveM 25 * Added multiskb option 020301 --DaveM 26 * Scaling of results. 020417--sigurdur@linpro.no 27 * Significant re-work of the module: 28 * * Convert to threaded model to more efficiently be able to transmit 29 * and receive on multiple interfaces at once. 30 * * Converted many counters to __u64 to allow longer runs. 31 * * Allow configuration of ranges, like min/max IP address, MACs, 32 * and UDP-ports, for both source and destination, and can 33 * set to use a random distribution or sequentially walk the range. 34 * * Can now change most values after starting. 35 * * Place 12-byte packet in UDP payload with magic number, 36 * sequence number, and timestamp. 37 * * Add receiver code that detects dropped pkts, re-ordered pkts, and 38 * latencies (with micro-second) precision. 39 * * Add IOCTL interface to easily get counters & configuration. 40 * --Ben Greear <greearb@candelatech.com> 41 * 42 * Renamed multiskb to clone_skb and cleaned up sending core for two distinct 43 * skb modes. A clone_skb=0 mode for Ben "ranges" work and a clone_skb != 0 44 * as a "fastpath" with a configurable number of clones after alloc's. 45 * clone_skb=0 means all packets are allocated this also means ranges time 46 * stamps etc can be used. clone_skb=100 means 1 malloc is followed by 100 47 * clones. 48 * 49 * Also moved to /proc/net/pktgen/ 50 * --ro 51 * 52 * Sept 10: Fixed threading/locking. Lots of bone-headed and more clever 53 * mistakes. Also merged in DaveM's patch in the -pre6 patch. 54 * --Ben Greear <greearb@candelatech.com> 55 * 56 * Integrated to 2.5.x 021029 --Lucio Maciel (luciomaciel@zipmail.com.br) 57 * 58 * 021124 Finished major redesign and rewrite for new functionality. 59 * See Documentation/networking/pktgen.rst for how to use this. 60 * 61 * The new operation: 62 * For each CPU one thread/process is created at start. This process checks 63 * for running devices in the if_list and sends packets until count is 0 it 64 * also the thread checks the thread->control which is used for inter-process 65 * communication. controlling process "posts" operations to the threads this 66 * way. 67 * The if_list is RCU protected, and the if_lock remains to protect updating 68 * of if_list, from "add_device" as it invoked from userspace (via proc write). 69 * 70 * By design there should only be *one* "controlling" process. In practice 71 * multiple write accesses gives unpredictable result. Understood by "write" 72 * to /proc gives result code thats should be read be the "writer". 73 * For practical use this should be no problem. 74 * 75 * Note when adding devices to a specific CPU there good idea to also assign 76 * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU. 77 * --ro 78 * 79 * Fix refcount off by one if first packet fails, potential null deref, 80 * memleak 030710- KJP 81 * 82 * First "ranges" functionality for ipv6 030726 --ro 83 * 84 * Included flow support. 030802 ANK. 85 * 86 * Fixed unaligned access on IA-64 Grant Grundler <grundler@parisc-linux.org> 87 * 88 * Remove if fix from added Harald Welte <laforge@netfilter.org> 040419 89 * ia64 compilation fix from Aron Griffis <aron@hp.com> 040604 90 * 91 * New xmit() return, do_div and misc clean up by Stephen Hemminger 92 * <shemminger@osdl.org> 040923 93 * 94 * Randy Dunlap fixed u64 printk compiler warning 95 * 96 * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org> 97 * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213 98 * 99 * Corrections from Nikolai Malykh (nmalykh@bilim.com) 100 * Removed unused flags F_SET_SRCMAC & F_SET_SRCIP 041230 101 * 102 * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan <nacc@us.ibm.com> 103 * 050103 104 * 105 * MPLS support by Steven Whitehouse <steve@chygwyn.com> 106 * 107 * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com> 108 * 109 * Fixed src_mac command to set source mac of packet to value specified in 110 * command by Adit Ranadive <adit.262@gmail.com> 111 */ 112 113 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 114 115 #include <linux/sys.h> 116 #include <linux/types.h> 117 #include <linux/module.h> 118 #include <linux/moduleparam.h> 119 #include <linux/kernel.h> 120 #include <linux/mutex.h> 121 #include <linux/sched.h> 122 #include <linux/slab.h> 123 #include <linux/vmalloc.h> 124 #include <linux/unistd.h> 125 #include <linux/string.h> 126 #include <linux/ptrace.h> 127 #include <linux/errno.h> 128 #include <linux/ioport.h> 129 #include <linux/interrupt.h> 130 #include <linux/capability.h> 131 #include <linux/hrtimer.h> 132 #include <linux/freezer.h> 133 #include <linux/delay.h> 134 #include <linux/timer.h> 135 #include <linux/list.h> 136 #include <linux/init.h> 137 #include <linux/skbuff.h> 138 #include <linux/netdevice.h> 139 #include <linux/inet.h> 140 #include <linux/inetdevice.h> 141 #include <linux/rtnetlink.h> 142 #include <linux/if_arp.h> 143 #include <linux/if_vlan.h> 144 #include <linux/in.h> 145 #include <linux/ip.h> 146 #include <linux/ipv6.h> 147 #include <linux/udp.h> 148 #include <linux/proc_fs.h> 149 #include <linux/seq_file.h> 150 #include <linux/wait.h> 151 #include <linux/etherdevice.h> 152 #include <linux/kthread.h> 153 #include <linux/prefetch.h> 154 #include <linux/mmzone.h> 155 #include <net/net_namespace.h> 156 #include <net/checksum.h> 157 #include <net/ipv6.h> 158 #include <net/udp.h> 159 #include <net/ip6_checksum.h> 160 #include <net/addrconf.h> 161 #ifdef CONFIG_XFRM 162 #include <net/xfrm.h> 163 #endif 164 #include <net/netns/generic.h> 165 #include <asm/byteorder.h> 166 #include <linux/rcupdate.h> 167 #include <linux/bitops.h> 168 #include <linux/io.h> 169 #include <linux/timex.h> 170 #include <linux/uaccess.h> 171 #include <asm/dma.h> 172 #include <asm/div64.h> /* do_div */ 173 174 #define VERSION "2.75" 175 #define IP_NAME_SZ 32 176 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ 177 #define MPLS_STACK_BOTTOM htonl(0x00000100) 178 /* Max number of internet mix entries that can be specified in imix_weights. */ 179 #define MAX_IMIX_ENTRIES 20 180 #define IMIX_PRECISION 100 /* Precision of IMIX distribution */ 181 182 #define func_enter() pr_debug("entering %s\n", __func__); 183 184 #define PKT_FLAGS \ 185 pf(IPV6) /* Interface in IPV6 Mode */ \ 186 pf(IPSRC_RND) /* IP-Src Random */ \ 187 pf(IPDST_RND) /* IP-Dst Random */ \ 188 pf(TXSIZE_RND) /* Transmit size is random */ \ 189 pf(UDPSRC_RND) /* UDP-Src Random */ \ 190 pf(UDPDST_RND) /* UDP-Dst Random */ \ 191 pf(UDPCSUM) /* Include UDP checksum */ \ 192 pf(NO_TIMESTAMP) /* Don't timestamp packets (default TS) */ \ 193 pf(MPLS_RND) /* Random MPLS labels */ \ 194 pf(QUEUE_MAP_RND) /* queue map Random */ \ 195 pf(QUEUE_MAP_CPU) /* queue map mirrors smp_processor_id() */ \ 196 pf(FLOW_SEQ) /* Sequential flows */ \ 197 pf(IPSEC) /* ipsec on for flows */ \ 198 pf(MACSRC_RND) /* MAC-Src Random */ \ 199 pf(MACDST_RND) /* MAC-Dst Random */ \ 200 pf(VID_RND) /* Random VLAN ID */ \ 201 pf(SVID_RND) /* Random SVLAN ID */ \ 202 pf(NODE) /* Node memory alloc*/ \ 203 204 #define pf(flag) flag##_SHIFT, 205 enum pkt_flags { 206 PKT_FLAGS 207 }; 208 #undef pf 209 210 /* Device flag bits */ 211 #define pf(flag) static const __u32 F_##flag = (1<<flag##_SHIFT); 212 PKT_FLAGS 213 #undef pf 214 215 #define pf(flag) __stringify(flag), 216 static char *pkt_flag_names[] = { 217 PKT_FLAGS 218 }; 219 #undef pf 220 221 #define NR_PKT_FLAGS ARRAY_SIZE(pkt_flag_names) 222 223 /* Thread control flag bits */ 224 #define T_STOP (1<<0) /* Stop run */ 225 #define T_RUN (1<<1) /* Start run */ 226 #define T_REMDEVALL (1<<2) /* Remove all devs */ 227 #define T_REMDEV (1<<3) /* Remove one dev */ 228 229 /* Xmit modes */ 230 #define M_START_XMIT 0 /* Default normal TX */ 231 #define M_NETIF_RECEIVE 1 /* Inject packets into stack */ 232 #define M_QUEUE_XMIT 2 /* Inject packet into qdisc */ 233 234 /* If lock -- protects updating of if_list */ 235 #define if_lock(t) mutex_lock(&(t->if_lock)); 236 #define if_unlock(t) mutex_unlock(&(t->if_lock)); 237 238 /* Used to help with determining the pkts on receive */ 239 #define PKTGEN_MAGIC 0xbe9be955 240 #define PG_PROC_DIR "pktgen" 241 #define PGCTRL "pgctrl" 242 243 #define MAX_CFLOWS 65536 244 245 #define VLAN_TAG_SIZE(x) ((x)->vlan_id == 0xffff ? 0 : 4) 246 #define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4) 247 248 struct imix_pkt { 249 u64 size; 250 u64 weight; 251 u64 count_so_far; 252 }; 253 254 struct flow_state { 255 __be32 cur_daddr; 256 int count; 257 #ifdef CONFIG_XFRM 258 struct xfrm_state *x; 259 #endif 260 __u32 flags; 261 }; 262 263 /* flow flag bits */ 264 #define F_INIT (1<<0) /* flow has been initialized */ 265 266 struct pktgen_dev { 267 /* 268 * Try to keep frequent/infrequent used vars. separated. 269 */ 270 struct proc_dir_entry *entry; /* proc file */ 271 struct pktgen_thread *pg_thread;/* the owner */ 272 struct list_head list; /* chaining in the thread's run-queue */ 273 struct rcu_head rcu; /* freed by RCU */ 274 275 int running; /* if false, the test will stop */ 276 277 /* If min != max, then we will either do a linear iteration, or 278 * we will do a random selection from within the range. 279 */ 280 __u32 flags; 281 int xmit_mode; 282 int min_pkt_size; 283 int max_pkt_size; 284 int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ 285 int nfrags; 286 int removal_mark; /* non-zero => the device is marked for 287 * removal by worker thread */ 288 289 struct page *page; 290 u64 delay; /* nano-seconds */ 291 292 __u64 count; /* Default No packets to send */ 293 __u64 sofar; /* How many pkts we've sent so far */ 294 __u64 tx_bytes; /* How many bytes we've transmitted */ 295 __u64 errors; /* Errors when trying to transmit, */ 296 297 /* runtime counters relating to clone_skb */ 298 299 __u32 clone_count; 300 int last_ok; /* Was last skb sent? 301 * Or a failed transmit of some sort? 302 * This will keep sequence numbers in order 303 */ 304 ktime_t next_tx; 305 ktime_t started_at; 306 ktime_t stopped_at; 307 u64 idle_acc; /* nano-seconds */ 308 309 __u32 seq_num; 310 311 int clone_skb; /* 312 * Use multiple SKBs during packet gen. 313 * If this number is greater than 1, then 314 * that many copies of the same packet will be 315 * sent before a new packet is allocated. 316 * If you want to send 1024 identical packets 317 * before creating a new packet, 318 * set clone_skb to 1024. 319 */ 320 321 char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 322 char dst_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 323 char src_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 324 char src_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 325 326 struct in6_addr in6_saddr; 327 struct in6_addr in6_daddr; 328 struct in6_addr cur_in6_daddr; 329 struct in6_addr cur_in6_saddr; 330 /* For ranges */ 331 struct in6_addr min_in6_daddr; 332 struct in6_addr max_in6_daddr; 333 struct in6_addr min_in6_saddr; 334 struct in6_addr max_in6_saddr; 335 336 /* If we're doing ranges, random or incremental, then this 337 * defines the min/max for those ranges. 338 */ 339 __be32 saddr_min; /* inclusive, source IP address */ 340 __be32 saddr_max; /* exclusive, source IP address */ 341 __be32 daddr_min; /* inclusive, dest IP address */ 342 __be32 daddr_max; /* exclusive, dest IP address */ 343 344 __u16 udp_src_min; /* inclusive, source UDP port */ 345 __u16 udp_src_max; /* exclusive, source UDP port */ 346 __u16 udp_dst_min; /* inclusive, dest UDP port */ 347 __u16 udp_dst_max; /* exclusive, dest UDP port */ 348 349 /* DSCP + ECN */ 350 __u8 tos; /* six MSB of (former) IPv4 TOS 351 are for dscp codepoint */ 352 __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6 353 (see RFC 3260, sec. 4) */ 354 355 /* IMIX */ 356 unsigned int n_imix_entries; 357 struct imix_pkt imix_entries[MAX_IMIX_ENTRIES]; 358 /* Maps 0-IMIX_PRECISION range to imix_entry based on probability*/ 359 __u8 imix_distribution[IMIX_PRECISION]; 360 361 /* MPLS */ 362 unsigned int nr_labels; /* Depth of stack, 0 = no MPLS */ 363 __be32 labels[MAX_MPLS_LABELS]; 364 365 /* VLAN/SVLAN (802.1Q/Q-in-Q) */ 366 __u8 vlan_p; 367 __u8 vlan_cfi; 368 __u16 vlan_id; /* 0xffff means no vlan tag */ 369 370 __u8 svlan_p; 371 __u8 svlan_cfi; 372 __u16 svlan_id; /* 0xffff means no svlan tag */ 373 374 __u32 src_mac_count; /* How many MACs to iterate through */ 375 __u32 dst_mac_count; /* How many MACs to iterate through */ 376 377 unsigned char dst_mac[ETH_ALEN]; 378 unsigned char src_mac[ETH_ALEN]; 379 380 __u32 cur_dst_mac_offset; 381 __u32 cur_src_mac_offset; 382 __be32 cur_saddr; 383 __be32 cur_daddr; 384 __u16 ip_id; 385 __u16 cur_udp_dst; 386 __u16 cur_udp_src; 387 __u16 cur_queue_map; 388 __u32 cur_pkt_size; 389 __u32 last_pkt_size; 390 391 __u8 hh[14]; 392 /* = { 393 0x00, 0x80, 0xC8, 0x79, 0xB3, 0xCB, 394 395 We fill in SRC address later 396 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 397 0x08, 0x00 398 }; 399 */ 400 __u16 pad; /* pad out the hh struct to an even 16 bytes */ 401 402 struct sk_buff *skb; /* skb we are to transmit next, used for when we 403 * are transmitting the same one multiple times 404 */ 405 struct net_device *odev; /* The out-going device. 406 * Note that the device should have it's 407 * pg_info pointer pointing back to this 408 * device. 409 * Set when the user specifies the out-going 410 * device name (not when the inject is 411 * started as it used to do.) 412 */ 413 netdevice_tracker dev_tracker; 414 char odevname[32]; 415 struct flow_state *flows; 416 unsigned int cflows; /* Concurrent flows (config) */ 417 unsigned int lflow; /* Flow length (config) */ 418 unsigned int nflows; /* accumulated flows (stats) */ 419 unsigned int curfl; /* current sequenced flow (state)*/ 420 421 u16 queue_map_min; 422 u16 queue_map_max; 423 __u32 skb_priority; /* skb priority field */ 424 unsigned int burst; /* number of duplicated packets to burst */ 425 int node; /* Memory node */ 426 427 #ifdef CONFIG_XFRM 428 __u8 ipsmode; /* IPSEC mode (config) */ 429 __u8 ipsproto; /* IPSEC type (config) */ 430 __u32 spi; 431 struct xfrm_dst xdst; 432 struct dst_ops dstops; 433 #endif 434 char result[512]; 435 }; 436 437 struct pktgen_hdr { 438 __be32 pgh_magic; 439 __be32 seq_num; 440 __be32 tv_sec; 441 __be32 tv_usec; 442 }; 443 444 445 static unsigned int pg_net_id __read_mostly; 446 447 struct pktgen_net { 448 struct net *net; 449 struct proc_dir_entry *proc_dir; 450 struct list_head pktgen_threads; 451 bool pktgen_exiting; 452 }; 453 454 struct pktgen_thread { 455 struct mutex if_lock; /* for list of devices */ 456 struct list_head if_list; /* All device here */ 457 struct list_head th_list; 458 struct task_struct *tsk; 459 char result[512]; 460 461 /* Field for thread to receive "posted" events terminate, 462 stop ifs etc. */ 463 464 u32 control; 465 int cpu; 466 467 wait_queue_head_t queue; 468 struct completion start_done; 469 struct pktgen_net *net; 470 }; 471 472 #define REMOVE 1 473 #define FIND 0 474 475 static const char version[] = 476 "Packet Generator for packet performance testing. " 477 "Version: " VERSION "\n"; 478 479 static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); 480 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); 481 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 482 const char *ifname, bool exact); 483 static int pktgen_device_event(struct notifier_block *, unsigned long, void *); 484 static void pktgen_run_all_threads(struct pktgen_net *pn); 485 static void pktgen_reset_all_threads(struct pktgen_net *pn); 486 static void pktgen_stop_all_threads(struct pktgen_net *pn); 487 488 static void pktgen_stop(struct pktgen_thread *t); 489 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); 490 static void fill_imix_distribution(struct pktgen_dev *pkt_dev); 491 492 /* Module parameters, defaults. */ 493 static int pg_count_d __read_mostly = 1000; 494 static int pg_delay_d __read_mostly; 495 static int pg_clone_skb_d __read_mostly; 496 static int debug __read_mostly; 497 498 static DEFINE_MUTEX(pktgen_thread_lock); 499 500 static struct notifier_block pktgen_notifier_block = { 501 .notifier_call = pktgen_device_event, 502 }; 503 504 /* 505 * /proc handling functions 506 * 507 */ 508 509 static int pgctrl_show(struct seq_file *seq, void *v) 510 { 511 seq_puts(seq, version); 512 return 0; 513 } 514 515 static ssize_t pgctrl_write(struct file *file, const char __user *buf, 516 size_t count, loff_t *ppos) 517 { 518 char data[128]; 519 struct pktgen_net *pn = net_generic(current->nsproxy->net_ns, pg_net_id); 520 521 if (!capable(CAP_NET_ADMIN)) 522 return -EPERM; 523 524 if (count == 0) 525 return -EINVAL; 526 527 if (count > sizeof(data)) 528 count = sizeof(data); 529 530 if (copy_from_user(data, buf, count)) 531 return -EFAULT; 532 533 data[count - 1] = 0; /* Strip trailing '\n' and terminate string */ 534 535 if (!strcmp(data, "stop")) 536 pktgen_stop_all_threads(pn); 537 else if (!strcmp(data, "start")) 538 pktgen_run_all_threads(pn); 539 else if (!strcmp(data, "reset")) 540 pktgen_reset_all_threads(pn); 541 else 542 return -EINVAL; 543 544 return count; 545 } 546 547 static int pgctrl_open(struct inode *inode, struct file *file) 548 { 549 return single_open(file, pgctrl_show, pde_data(inode)); 550 } 551 552 static const struct proc_ops pktgen_proc_ops = { 553 .proc_open = pgctrl_open, 554 .proc_read = seq_read, 555 .proc_lseek = seq_lseek, 556 .proc_write = pgctrl_write, 557 .proc_release = single_release, 558 }; 559 560 static int pktgen_if_show(struct seq_file *seq, void *v) 561 { 562 const struct pktgen_dev *pkt_dev = seq->private; 563 ktime_t stopped; 564 unsigned int i; 565 u64 idle; 566 567 seq_printf(seq, 568 "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", 569 (unsigned long long)pkt_dev->count, pkt_dev->min_pkt_size, 570 pkt_dev->max_pkt_size); 571 572 if (pkt_dev->n_imix_entries > 0) { 573 seq_puts(seq, " imix_weights: "); 574 for (i = 0; i < pkt_dev->n_imix_entries; i++) { 575 seq_printf(seq, "%llu,%llu ", 576 pkt_dev->imix_entries[i].size, 577 pkt_dev->imix_entries[i].weight); 578 } 579 seq_puts(seq, "\n"); 580 } 581 582 seq_printf(seq, 583 " frags: %d delay: %llu clone_skb: %d ifname: %s\n", 584 pkt_dev->nfrags, (unsigned long long) pkt_dev->delay, 585 pkt_dev->clone_skb, pkt_dev->odevname); 586 587 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, 588 pkt_dev->lflow); 589 590 seq_printf(seq, 591 " queue_map_min: %u queue_map_max: %u\n", 592 pkt_dev->queue_map_min, 593 pkt_dev->queue_map_max); 594 595 if (pkt_dev->skb_priority) 596 seq_printf(seq, " skb_priority: %u\n", 597 pkt_dev->skb_priority); 598 599 if (pkt_dev->flags & F_IPV6) { 600 seq_printf(seq, 601 " saddr: %pI6c min_saddr: %pI6c max_saddr: %pI6c\n" 602 " daddr: %pI6c min_daddr: %pI6c max_daddr: %pI6c\n", 603 &pkt_dev->in6_saddr, 604 &pkt_dev->min_in6_saddr, &pkt_dev->max_in6_saddr, 605 &pkt_dev->in6_daddr, 606 &pkt_dev->min_in6_daddr, &pkt_dev->max_in6_daddr); 607 } else { 608 seq_printf(seq, 609 " dst_min: %s dst_max: %s\n", 610 pkt_dev->dst_min, pkt_dev->dst_max); 611 seq_printf(seq, 612 " src_min: %s src_max: %s\n", 613 pkt_dev->src_min, pkt_dev->src_max); 614 } 615 616 seq_puts(seq, " src_mac: "); 617 618 seq_printf(seq, "%pM ", 619 is_zero_ether_addr(pkt_dev->src_mac) ? 620 pkt_dev->odev->dev_addr : pkt_dev->src_mac); 621 622 seq_puts(seq, "dst_mac: "); 623 seq_printf(seq, "%pM\n", pkt_dev->dst_mac); 624 625 seq_printf(seq, 626 " udp_src_min: %d udp_src_max: %d" 627 " udp_dst_min: %d udp_dst_max: %d\n", 628 pkt_dev->udp_src_min, pkt_dev->udp_src_max, 629 pkt_dev->udp_dst_min, pkt_dev->udp_dst_max); 630 631 seq_printf(seq, 632 " src_mac_count: %d dst_mac_count: %d\n", 633 pkt_dev->src_mac_count, pkt_dev->dst_mac_count); 634 635 if (pkt_dev->nr_labels) { 636 seq_puts(seq, " mpls: "); 637 for (i = 0; i < pkt_dev->nr_labels; i++) 638 seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), 639 i == pkt_dev->nr_labels-1 ? "\n" : ", "); 640 } 641 642 if (pkt_dev->vlan_id != 0xffff) 643 seq_printf(seq, " vlan_id: %u vlan_p: %u vlan_cfi: %u\n", 644 pkt_dev->vlan_id, pkt_dev->vlan_p, 645 pkt_dev->vlan_cfi); 646 647 if (pkt_dev->svlan_id != 0xffff) 648 seq_printf(seq, " svlan_id: %u vlan_p: %u vlan_cfi: %u\n", 649 pkt_dev->svlan_id, pkt_dev->svlan_p, 650 pkt_dev->svlan_cfi); 651 652 if (pkt_dev->tos) 653 seq_printf(seq, " tos: 0x%02x\n", pkt_dev->tos); 654 655 if (pkt_dev->traffic_class) 656 seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); 657 658 if (pkt_dev->burst > 1) 659 seq_printf(seq, " burst: %d\n", pkt_dev->burst); 660 661 if (pkt_dev->node >= 0) 662 seq_printf(seq, " node: %d\n", pkt_dev->node); 663 664 if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) 665 seq_puts(seq, " xmit_mode: netif_receive\n"); 666 else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) 667 seq_puts(seq, " xmit_mode: xmit_queue\n"); 668 669 seq_puts(seq, " Flags: "); 670 671 for (i = 0; i < NR_PKT_FLAGS; i++) { 672 if (i == F_FLOW_SEQ) 673 if (!pkt_dev->cflows) 674 continue; 675 676 if (pkt_dev->flags & (1 << i)) 677 seq_printf(seq, "%s ", pkt_flag_names[i]); 678 else if (i == F_FLOW_SEQ) 679 seq_puts(seq, "FLOW_RND "); 680 681 #ifdef CONFIG_XFRM 682 if (i == F_IPSEC && pkt_dev->spi) 683 seq_printf(seq, "spi:%u", pkt_dev->spi); 684 #endif 685 } 686 687 seq_puts(seq, "\n"); 688 689 /* not really stopped, more like last-running-at */ 690 stopped = pkt_dev->running ? ktime_get() : pkt_dev->stopped_at; 691 idle = pkt_dev->idle_acc; 692 do_div(idle, NSEC_PER_USEC); 693 694 seq_printf(seq, 695 "Current:\n pkts-sofar: %llu errors: %llu\n", 696 (unsigned long long)pkt_dev->sofar, 697 (unsigned long long)pkt_dev->errors); 698 699 if (pkt_dev->n_imix_entries > 0) { 700 int i; 701 702 seq_puts(seq, " imix_size_counts: "); 703 for (i = 0; i < pkt_dev->n_imix_entries; i++) { 704 seq_printf(seq, "%llu,%llu ", 705 pkt_dev->imix_entries[i].size, 706 pkt_dev->imix_entries[i].count_so_far); 707 } 708 seq_puts(seq, "\n"); 709 } 710 711 seq_printf(seq, 712 " started: %lluus stopped: %lluus idle: %lluus\n", 713 (unsigned long long) ktime_to_us(pkt_dev->started_at), 714 (unsigned long long) ktime_to_us(stopped), 715 (unsigned long long) idle); 716 717 seq_printf(seq, 718 " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", 719 pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, 720 pkt_dev->cur_src_mac_offset); 721 722 if (pkt_dev->flags & F_IPV6) { 723 seq_printf(seq, " cur_saddr: %pI6c cur_daddr: %pI6c\n", 724 &pkt_dev->cur_in6_saddr, 725 &pkt_dev->cur_in6_daddr); 726 } else 727 seq_printf(seq, " cur_saddr: %pI4 cur_daddr: %pI4\n", 728 &pkt_dev->cur_saddr, &pkt_dev->cur_daddr); 729 730 seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n", 731 pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); 732 733 seq_printf(seq, " cur_queue_map: %u\n", pkt_dev->cur_queue_map); 734 735 seq_printf(seq, " flows: %u\n", pkt_dev->nflows); 736 737 if (pkt_dev->result[0]) 738 seq_printf(seq, "Result: %s\n", pkt_dev->result); 739 else 740 seq_puts(seq, "Result: Idle\n"); 741 742 return 0; 743 } 744 745 746 static int hex32_arg(const char __user *user_buffer, unsigned long maxlen, 747 __u32 *num) 748 { 749 int i = 0; 750 *num = 0; 751 752 for (; i < maxlen; i++) { 753 int value; 754 char c; 755 *num <<= 4; 756 if (get_user(c, &user_buffer[i])) 757 return -EFAULT; 758 value = hex_to_bin(c); 759 if (value >= 0) 760 *num |= value; 761 else 762 break; 763 } 764 return i; 765 } 766 767 static int count_trail_chars(const char __user * user_buffer, 768 unsigned int maxlen) 769 { 770 int i; 771 772 for (i = 0; i < maxlen; i++) { 773 char c; 774 if (get_user(c, &user_buffer[i])) 775 return -EFAULT; 776 switch (c) { 777 case '\"': 778 case '\n': 779 case '\r': 780 case '\t': 781 case ' ': 782 case '=': 783 break; 784 default: 785 goto done; 786 } 787 } 788 done: 789 return i; 790 } 791 792 static long num_arg(const char __user *user_buffer, unsigned long maxlen, 793 unsigned long *num) 794 { 795 int i; 796 *num = 0; 797 798 for (i = 0; i < maxlen; i++) { 799 char c; 800 if (get_user(c, &user_buffer[i])) 801 return -EFAULT; 802 if ((c >= '0') && (c <= '9')) { 803 *num *= 10; 804 *num += c - '0'; 805 } else 806 break; 807 } 808 return i; 809 } 810 811 static int strn_len(const char __user * user_buffer, unsigned int maxlen) 812 { 813 int i; 814 815 for (i = 0; i < maxlen; i++) { 816 char c; 817 if (get_user(c, &user_buffer[i])) 818 return -EFAULT; 819 switch (c) { 820 case '\"': 821 case '\n': 822 case '\r': 823 case '\t': 824 case ' ': 825 goto done_str; 826 default: 827 break; 828 } 829 } 830 done_str: 831 return i; 832 } 833 834 /* Parses imix entries from user buffer. 835 * The user buffer should consist of imix entries separated by spaces 836 * where each entry consists of size and weight delimited by commas. 837 * "size1,weight_1 size2,weight_2 ... size_n,weight_n" for example. 838 */ 839 static ssize_t get_imix_entries(const char __user *buffer, 840 struct pktgen_dev *pkt_dev) 841 { 842 const int max_digits = 10; 843 int i = 0; 844 long len; 845 char c; 846 847 pkt_dev->n_imix_entries = 0; 848 849 do { 850 unsigned long weight; 851 unsigned long size; 852 853 len = num_arg(&buffer[i], max_digits, &size); 854 if (len < 0) 855 return len; 856 i += len; 857 if (get_user(c, &buffer[i])) 858 return -EFAULT; 859 /* Check for comma between size_i and weight_i */ 860 if (c != ',') 861 return -EINVAL; 862 i++; 863 864 if (size < 14 + 20 + 8) 865 size = 14 + 20 + 8; 866 867 len = num_arg(&buffer[i], max_digits, &weight); 868 if (len < 0) 869 return len; 870 if (weight <= 0) 871 return -EINVAL; 872 873 pkt_dev->imix_entries[pkt_dev->n_imix_entries].size = size; 874 pkt_dev->imix_entries[pkt_dev->n_imix_entries].weight = weight; 875 876 i += len; 877 if (get_user(c, &buffer[i])) 878 return -EFAULT; 879 880 i++; 881 pkt_dev->n_imix_entries++; 882 883 if (pkt_dev->n_imix_entries > MAX_IMIX_ENTRIES) 884 return -E2BIG; 885 } while (c == ' '); 886 887 return i; 888 } 889 890 static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) 891 { 892 unsigned int n = 0; 893 char c; 894 ssize_t i = 0; 895 int len; 896 897 pkt_dev->nr_labels = 0; 898 do { 899 __u32 tmp; 900 len = hex32_arg(&buffer[i], 8, &tmp); 901 if (len <= 0) 902 return len; 903 pkt_dev->labels[n] = htonl(tmp); 904 if (pkt_dev->labels[n] & MPLS_STACK_BOTTOM) 905 pkt_dev->flags |= F_MPLS_RND; 906 i += len; 907 if (get_user(c, &buffer[i])) 908 return -EFAULT; 909 i++; 910 n++; 911 if (n >= MAX_MPLS_LABELS) 912 return -E2BIG; 913 } while (c == ','); 914 915 pkt_dev->nr_labels = n; 916 return i; 917 } 918 919 static __u32 pktgen_read_flag(const char *f, bool *disable) 920 { 921 __u32 i; 922 923 if (f[0] == '!') { 924 *disable = true; 925 f++; 926 } 927 928 for (i = 0; i < NR_PKT_FLAGS; i++) { 929 if (!IS_ENABLED(CONFIG_XFRM) && i == IPSEC_SHIFT) 930 continue; 931 932 /* allow only disabling ipv6 flag */ 933 if (!*disable && i == IPV6_SHIFT) 934 continue; 935 936 if (strcmp(f, pkt_flag_names[i]) == 0) 937 return 1 << i; 938 } 939 940 if (strcmp(f, "FLOW_RND") == 0) { 941 *disable = !*disable; 942 return F_FLOW_SEQ; 943 } 944 945 return 0; 946 } 947 948 static ssize_t pktgen_if_write(struct file *file, 949 const char __user * user_buffer, size_t count, 950 loff_t * offset) 951 { 952 struct seq_file *seq = file->private_data; 953 struct pktgen_dev *pkt_dev = seq->private; 954 int i, max, len; 955 char name[16], valstr[32]; 956 unsigned long value = 0; 957 char *pg_result = NULL; 958 int tmp = 0; 959 char buf[128]; 960 961 pg_result = &(pkt_dev->result[0]); 962 963 if (count < 1) { 964 pr_warn("wrong command format\n"); 965 return -EINVAL; 966 } 967 968 max = count; 969 tmp = count_trail_chars(user_buffer, max); 970 if (tmp < 0) { 971 pr_warn("illegal format\n"); 972 return tmp; 973 } 974 i = tmp; 975 976 /* Read variable name */ 977 978 len = strn_len(&user_buffer[i], sizeof(name) - 1); 979 if (len < 0) 980 return len; 981 982 memset(name, 0, sizeof(name)); 983 if (copy_from_user(name, &user_buffer[i], len)) 984 return -EFAULT; 985 i += len; 986 987 max = count - i; 988 len = count_trail_chars(&user_buffer[i], max); 989 if (len < 0) 990 return len; 991 992 i += len; 993 994 if (debug) { 995 size_t copy = min_t(size_t, count + 1, 1024); 996 char *tp = strndup_user(user_buffer, copy); 997 998 if (IS_ERR(tp)) 999 return PTR_ERR(tp); 1000 1001 pr_debug("%s,%zu buffer -:%s:-\n", name, count, tp); 1002 kfree(tp); 1003 } 1004 1005 if (!strcmp(name, "min_pkt_size")) { 1006 len = num_arg(&user_buffer[i], 10, &value); 1007 if (len < 0) 1008 return len; 1009 1010 i += len; 1011 if (value < 14 + 20 + 8) 1012 value = 14 + 20 + 8; 1013 if (value != pkt_dev->min_pkt_size) { 1014 pkt_dev->min_pkt_size = value; 1015 pkt_dev->cur_pkt_size = value; 1016 } 1017 sprintf(pg_result, "OK: min_pkt_size=%d", 1018 pkt_dev->min_pkt_size); 1019 return count; 1020 } 1021 1022 if (!strcmp(name, "max_pkt_size")) { 1023 len = num_arg(&user_buffer[i], 10, &value); 1024 if (len < 0) 1025 return len; 1026 1027 i += len; 1028 if (value < 14 + 20 + 8) 1029 value = 14 + 20 + 8; 1030 if (value != pkt_dev->max_pkt_size) { 1031 pkt_dev->max_pkt_size = value; 1032 pkt_dev->cur_pkt_size = value; 1033 } 1034 sprintf(pg_result, "OK: max_pkt_size=%d", 1035 pkt_dev->max_pkt_size); 1036 return count; 1037 } 1038 1039 /* Shortcut for min = max */ 1040 1041 if (!strcmp(name, "pkt_size")) { 1042 len = num_arg(&user_buffer[i], 10, &value); 1043 if (len < 0) 1044 return len; 1045 1046 i += len; 1047 if (value < 14 + 20 + 8) 1048 value = 14 + 20 + 8; 1049 if (value != pkt_dev->min_pkt_size) { 1050 pkt_dev->min_pkt_size = value; 1051 pkt_dev->max_pkt_size = value; 1052 pkt_dev->cur_pkt_size = value; 1053 } 1054 sprintf(pg_result, "OK: pkt_size=%d", pkt_dev->min_pkt_size); 1055 return count; 1056 } 1057 1058 if (!strcmp(name, "imix_weights")) { 1059 if (pkt_dev->clone_skb > 0) 1060 return -EINVAL; 1061 1062 len = get_imix_entries(&user_buffer[i], pkt_dev); 1063 if (len < 0) 1064 return len; 1065 1066 fill_imix_distribution(pkt_dev); 1067 1068 i += len; 1069 return count; 1070 } 1071 1072 if (!strcmp(name, "debug")) { 1073 len = num_arg(&user_buffer[i], 10, &value); 1074 if (len < 0) 1075 return len; 1076 1077 i += len; 1078 debug = value; 1079 sprintf(pg_result, "OK: debug=%u", debug); 1080 return count; 1081 } 1082 1083 if (!strcmp(name, "frags")) { 1084 len = num_arg(&user_buffer[i], 10, &value); 1085 if (len < 0) 1086 return len; 1087 1088 i += len; 1089 pkt_dev->nfrags = value; 1090 sprintf(pg_result, "OK: frags=%d", pkt_dev->nfrags); 1091 return count; 1092 } 1093 if (!strcmp(name, "delay")) { 1094 len = num_arg(&user_buffer[i], 10, &value); 1095 if (len < 0) 1096 return len; 1097 1098 i += len; 1099 if (value == 0x7FFFFFFF) 1100 pkt_dev->delay = ULLONG_MAX; 1101 else 1102 pkt_dev->delay = (u64)value; 1103 1104 sprintf(pg_result, "OK: delay=%llu", 1105 (unsigned long long) pkt_dev->delay); 1106 return count; 1107 } 1108 if (!strcmp(name, "rate")) { 1109 len = num_arg(&user_buffer[i], 10, &value); 1110 if (len < 0) 1111 return len; 1112 1113 i += len; 1114 if (!value) 1115 return len; 1116 pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value; 1117 if (debug) 1118 pr_info("Delay set at: %llu ns\n", pkt_dev->delay); 1119 1120 sprintf(pg_result, "OK: rate=%lu", value); 1121 return count; 1122 } 1123 if (!strcmp(name, "ratep")) { 1124 len = num_arg(&user_buffer[i], 10, &value); 1125 if (len < 0) 1126 return len; 1127 1128 i += len; 1129 if (!value) 1130 return len; 1131 pkt_dev->delay = NSEC_PER_SEC/value; 1132 if (debug) 1133 pr_info("Delay set at: %llu ns\n", pkt_dev->delay); 1134 1135 sprintf(pg_result, "OK: rate=%lu", value); 1136 return count; 1137 } 1138 if (!strcmp(name, "udp_src_min")) { 1139 len = num_arg(&user_buffer[i], 10, &value); 1140 if (len < 0) 1141 return len; 1142 1143 i += len; 1144 if (value != pkt_dev->udp_src_min) { 1145 pkt_dev->udp_src_min = value; 1146 pkt_dev->cur_udp_src = value; 1147 } 1148 sprintf(pg_result, "OK: udp_src_min=%u", pkt_dev->udp_src_min); 1149 return count; 1150 } 1151 if (!strcmp(name, "udp_dst_min")) { 1152 len = num_arg(&user_buffer[i], 10, &value); 1153 if (len < 0) 1154 return len; 1155 1156 i += len; 1157 if (value != pkt_dev->udp_dst_min) { 1158 pkt_dev->udp_dst_min = value; 1159 pkt_dev->cur_udp_dst = value; 1160 } 1161 sprintf(pg_result, "OK: udp_dst_min=%u", pkt_dev->udp_dst_min); 1162 return count; 1163 } 1164 if (!strcmp(name, "udp_src_max")) { 1165 len = num_arg(&user_buffer[i], 10, &value); 1166 if (len < 0) 1167 return len; 1168 1169 i += len; 1170 if (value != pkt_dev->udp_src_max) { 1171 pkt_dev->udp_src_max = value; 1172 pkt_dev->cur_udp_src = value; 1173 } 1174 sprintf(pg_result, "OK: udp_src_max=%u", pkt_dev->udp_src_max); 1175 return count; 1176 } 1177 if (!strcmp(name, "udp_dst_max")) { 1178 len = num_arg(&user_buffer[i], 10, &value); 1179 if (len < 0) 1180 return len; 1181 1182 i += len; 1183 if (value != pkt_dev->udp_dst_max) { 1184 pkt_dev->udp_dst_max = value; 1185 pkt_dev->cur_udp_dst = value; 1186 } 1187 sprintf(pg_result, "OK: udp_dst_max=%u", pkt_dev->udp_dst_max); 1188 return count; 1189 } 1190 if (!strcmp(name, "clone_skb")) { 1191 len = num_arg(&user_buffer[i], 10, &value); 1192 if (len < 0) 1193 return len; 1194 /* clone_skb is not supported for netif_receive xmit_mode and 1195 * IMIX mode. 1196 */ 1197 if ((value > 0) && 1198 ((pkt_dev->xmit_mode == M_NETIF_RECEIVE) || 1199 !(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))) 1200 return -ENOTSUPP; 1201 if (value > 0 && pkt_dev->n_imix_entries > 0) 1202 return -EINVAL; 1203 1204 i += len; 1205 pkt_dev->clone_skb = value; 1206 1207 sprintf(pg_result, "OK: clone_skb=%d", pkt_dev->clone_skb); 1208 return count; 1209 } 1210 if (!strcmp(name, "count")) { 1211 len = num_arg(&user_buffer[i], 10, &value); 1212 if (len < 0) 1213 return len; 1214 1215 i += len; 1216 pkt_dev->count = value; 1217 sprintf(pg_result, "OK: count=%llu", 1218 (unsigned long long)pkt_dev->count); 1219 return count; 1220 } 1221 if (!strcmp(name, "src_mac_count")) { 1222 len = num_arg(&user_buffer[i], 10, &value); 1223 if (len < 0) 1224 return len; 1225 1226 i += len; 1227 if (pkt_dev->src_mac_count != value) { 1228 pkt_dev->src_mac_count = value; 1229 pkt_dev->cur_src_mac_offset = 0; 1230 } 1231 sprintf(pg_result, "OK: src_mac_count=%d", 1232 pkt_dev->src_mac_count); 1233 return count; 1234 } 1235 if (!strcmp(name, "dst_mac_count")) { 1236 len = num_arg(&user_buffer[i], 10, &value); 1237 if (len < 0) 1238 return len; 1239 1240 i += len; 1241 if (pkt_dev->dst_mac_count != value) { 1242 pkt_dev->dst_mac_count = value; 1243 pkt_dev->cur_dst_mac_offset = 0; 1244 } 1245 sprintf(pg_result, "OK: dst_mac_count=%d", 1246 pkt_dev->dst_mac_count); 1247 return count; 1248 } 1249 if (!strcmp(name, "burst")) { 1250 len = num_arg(&user_buffer[i], 10, &value); 1251 if (len < 0) 1252 return len; 1253 1254 i += len; 1255 if ((value > 1) && 1256 ((pkt_dev->xmit_mode == M_QUEUE_XMIT) || 1257 ((pkt_dev->xmit_mode == M_START_XMIT) && 1258 (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))))) 1259 return -ENOTSUPP; 1260 pkt_dev->burst = value < 1 ? 1 : value; 1261 sprintf(pg_result, "OK: burst=%u", pkt_dev->burst); 1262 return count; 1263 } 1264 if (!strcmp(name, "node")) { 1265 len = num_arg(&user_buffer[i], 10, &value); 1266 if (len < 0) 1267 return len; 1268 1269 i += len; 1270 1271 if (node_possible(value)) { 1272 pkt_dev->node = value; 1273 sprintf(pg_result, "OK: node=%d", pkt_dev->node); 1274 if (pkt_dev->page) { 1275 put_page(pkt_dev->page); 1276 pkt_dev->page = NULL; 1277 } 1278 } 1279 else 1280 sprintf(pg_result, "ERROR: node not possible"); 1281 return count; 1282 } 1283 if (!strcmp(name, "xmit_mode")) { 1284 char f[32]; 1285 1286 memset(f, 0, 32); 1287 len = strn_len(&user_buffer[i], sizeof(f) - 1); 1288 if (len < 0) 1289 return len; 1290 1291 if (copy_from_user(f, &user_buffer[i], len)) 1292 return -EFAULT; 1293 i += len; 1294 1295 if (strcmp(f, "start_xmit") == 0) { 1296 pkt_dev->xmit_mode = M_START_XMIT; 1297 } else if (strcmp(f, "netif_receive") == 0) { 1298 /* clone_skb set earlier, not supported in this mode */ 1299 if (pkt_dev->clone_skb > 0) 1300 return -ENOTSUPP; 1301 1302 pkt_dev->xmit_mode = M_NETIF_RECEIVE; 1303 1304 /* make sure new packet is allocated every time 1305 * pktgen_xmit() is called 1306 */ 1307 pkt_dev->last_ok = 1; 1308 } else if (strcmp(f, "queue_xmit") == 0) { 1309 pkt_dev->xmit_mode = M_QUEUE_XMIT; 1310 pkt_dev->last_ok = 1; 1311 } else { 1312 sprintf(pg_result, 1313 "xmit_mode -:%s:- unknown\nAvailable modes: %s", 1314 f, "start_xmit, netif_receive\n"); 1315 return count; 1316 } 1317 sprintf(pg_result, "OK: xmit_mode=%s", f); 1318 return count; 1319 } 1320 if (!strcmp(name, "flag")) { 1321 __u32 flag; 1322 char f[32]; 1323 bool disable = false; 1324 1325 memset(f, 0, 32); 1326 len = strn_len(&user_buffer[i], sizeof(f) - 1); 1327 if (len < 0) 1328 return len; 1329 1330 if (copy_from_user(f, &user_buffer[i], len)) 1331 return -EFAULT; 1332 i += len; 1333 1334 flag = pktgen_read_flag(f, &disable); 1335 1336 if (flag) { 1337 if (disable) 1338 pkt_dev->flags &= ~flag; 1339 else 1340 pkt_dev->flags |= flag; 1341 } else { 1342 sprintf(pg_result, 1343 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", 1344 f, 1345 "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, " 1346 "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, " 1347 "MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, " 1348 "QUEUE_MAP_RND, QUEUE_MAP_CPU, UDPCSUM, " 1349 "NO_TIMESTAMP, " 1350 #ifdef CONFIG_XFRM 1351 "IPSEC, " 1352 #endif 1353 "NODE_ALLOC\n"); 1354 return count; 1355 } 1356 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); 1357 return count; 1358 } 1359 if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) { 1360 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1); 1361 if (len < 0) 1362 return len; 1363 1364 if (copy_from_user(buf, &user_buffer[i], len)) 1365 return -EFAULT; 1366 buf[len] = 0; 1367 if (strcmp(buf, pkt_dev->dst_min) != 0) { 1368 memset(pkt_dev->dst_min, 0, sizeof(pkt_dev->dst_min)); 1369 strcpy(pkt_dev->dst_min, buf); 1370 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); 1371 pkt_dev->cur_daddr = pkt_dev->daddr_min; 1372 } 1373 if (debug) 1374 pr_debug("dst_min set to: %s\n", pkt_dev->dst_min); 1375 i += len; 1376 sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min); 1377 return count; 1378 } 1379 if (!strcmp(name, "dst_max")) { 1380 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1); 1381 if (len < 0) 1382 return len; 1383 1384 if (copy_from_user(buf, &user_buffer[i], len)) 1385 return -EFAULT; 1386 buf[len] = 0; 1387 if (strcmp(buf, pkt_dev->dst_max) != 0) { 1388 memset(pkt_dev->dst_max, 0, sizeof(pkt_dev->dst_max)); 1389 strcpy(pkt_dev->dst_max, buf); 1390 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); 1391 pkt_dev->cur_daddr = pkt_dev->daddr_max; 1392 } 1393 if (debug) 1394 pr_debug("dst_max set to: %s\n", pkt_dev->dst_max); 1395 i += len; 1396 sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max); 1397 return count; 1398 } 1399 if (!strcmp(name, "dst6")) { 1400 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1401 if (len < 0) 1402 return len; 1403 1404 pkt_dev->flags |= F_IPV6; 1405 1406 if (copy_from_user(buf, &user_buffer[i], len)) 1407 return -EFAULT; 1408 buf[len] = 0; 1409 1410 in6_pton(buf, -1, pkt_dev->in6_daddr.s6_addr, -1, NULL); 1411 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr); 1412 1413 pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr; 1414 1415 if (debug) 1416 pr_debug("dst6 set to: %s\n", buf); 1417 1418 i += len; 1419 sprintf(pg_result, "OK: dst6=%s", buf); 1420 return count; 1421 } 1422 if (!strcmp(name, "dst6_min")) { 1423 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1424 if (len < 0) 1425 return len; 1426 1427 pkt_dev->flags |= F_IPV6; 1428 1429 if (copy_from_user(buf, &user_buffer[i], len)) 1430 return -EFAULT; 1431 buf[len] = 0; 1432 1433 in6_pton(buf, -1, pkt_dev->min_in6_daddr.s6_addr, -1, NULL); 1434 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr); 1435 1436 pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr; 1437 if (debug) 1438 pr_debug("dst6_min set to: %s\n", buf); 1439 1440 i += len; 1441 sprintf(pg_result, "OK: dst6_min=%s", buf); 1442 return count; 1443 } 1444 if (!strcmp(name, "dst6_max")) { 1445 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1446 if (len < 0) 1447 return len; 1448 1449 pkt_dev->flags |= F_IPV6; 1450 1451 if (copy_from_user(buf, &user_buffer[i], len)) 1452 return -EFAULT; 1453 buf[len] = 0; 1454 1455 in6_pton(buf, -1, pkt_dev->max_in6_daddr.s6_addr, -1, NULL); 1456 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr); 1457 1458 if (debug) 1459 pr_debug("dst6_max set to: %s\n", buf); 1460 1461 i += len; 1462 sprintf(pg_result, "OK: dst6_max=%s", buf); 1463 return count; 1464 } 1465 if (!strcmp(name, "src6")) { 1466 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1467 if (len < 0) 1468 return len; 1469 1470 pkt_dev->flags |= F_IPV6; 1471 1472 if (copy_from_user(buf, &user_buffer[i], len)) 1473 return -EFAULT; 1474 buf[len] = 0; 1475 1476 in6_pton(buf, -1, pkt_dev->in6_saddr.s6_addr, -1, NULL); 1477 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr); 1478 1479 pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr; 1480 1481 if (debug) 1482 pr_debug("src6 set to: %s\n", buf); 1483 1484 i += len; 1485 sprintf(pg_result, "OK: src6=%s", buf); 1486 return count; 1487 } 1488 if (!strcmp(name, "src_min")) { 1489 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1); 1490 if (len < 0) 1491 return len; 1492 1493 if (copy_from_user(buf, &user_buffer[i], len)) 1494 return -EFAULT; 1495 buf[len] = 0; 1496 if (strcmp(buf, pkt_dev->src_min) != 0) { 1497 memset(pkt_dev->src_min, 0, sizeof(pkt_dev->src_min)); 1498 strcpy(pkt_dev->src_min, buf); 1499 pkt_dev->saddr_min = in_aton(pkt_dev->src_min); 1500 pkt_dev->cur_saddr = pkt_dev->saddr_min; 1501 } 1502 if (debug) 1503 pr_debug("src_min set to: %s\n", pkt_dev->src_min); 1504 i += len; 1505 sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min); 1506 return count; 1507 } 1508 if (!strcmp(name, "src_max")) { 1509 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1); 1510 if (len < 0) 1511 return len; 1512 1513 if (copy_from_user(buf, &user_buffer[i], len)) 1514 return -EFAULT; 1515 buf[len] = 0; 1516 if (strcmp(buf, pkt_dev->src_max) != 0) { 1517 memset(pkt_dev->src_max, 0, sizeof(pkt_dev->src_max)); 1518 strcpy(pkt_dev->src_max, buf); 1519 pkt_dev->saddr_max = in_aton(pkt_dev->src_max); 1520 pkt_dev->cur_saddr = pkt_dev->saddr_max; 1521 } 1522 if (debug) 1523 pr_debug("src_max set to: %s\n", pkt_dev->src_max); 1524 i += len; 1525 sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max); 1526 return count; 1527 } 1528 if (!strcmp(name, "dst_mac")) { 1529 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1530 if (len < 0) 1531 return len; 1532 1533 memset(valstr, 0, sizeof(valstr)); 1534 if (copy_from_user(valstr, &user_buffer[i], len)) 1535 return -EFAULT; 1536 1537 if (!mac_pton(valstr, pkt_dev->dst_mac)) 1538 return -EINVAL; 1539 /* Set up Dest MAC */ 1540 ether_addr_copy(&pkt_dev->hh[0], pkt_dev->dst_mac); 1541 1542 sprintf(pg_result, "OK: dstmac %pM", pkt_dev->dst_mac); 1543 return count; 1544 } 1545 if (!strcmp(name, "src_mac")) { 1546 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1547 if (len < 0) 1548 return len; 1549 1550 memset(valstr, 0, sizeof(valstr)); 1551 if (copy_from_user(valstr, &user_buffer[i], len)) 1552 return -EFAULT; 1553 1554 if (!mac_pton(valstr, pkt_dev->src_mac)) 1555 return -EINVAL; 1556 /* Set up Src MAC */ 1557 ether_addr_copy(&pkt_dev->hh[6], pkt_dev->src_mac); 1558 1559 sprintf(pg_result, "OK: srcmac %pM", pkt_dev->src_mac); 1560 return count; 1561 } 1562 1563 if (!strcmp(name, "clear_counters")) { 1564 pktgen_clear_counters(pkt_dev); 1565 sprintf(pg_result, "OK: Clearing counters.\n"); 1566 return count; 1567 } 1568 1569 if (!strcmp(name, "flows")) { 1570 len = num_arg(&user_buffer[i], 10, &value); 1571 if (len < 0) 1572 return len; 1573 1574 i += len; 1575 if (value > MAX_CFLOWS) 1576 value = MAX_CFLOWS; 1577 1578 pkt_dev->cflows = value; 1579 sprintf(pg_result, "OK: flows=%u", pkt_dev->cflows); 1580 return count; 1581 } 1582 #ifdef CONFIG_XFRM 1583 if (!strcmp(name, "spi")) { 1584 len = num_arg(&user_buffer[i], 10, &value); 1585 if (len < 0) 1586 return len; 1587 1588 i += len; 1589 pkt_dev->spi = value; 1590 sprintf(pg_result, "OK: spi=%u", pkt_dev->spi); 1591 return count; 1592 } 1593 #endif 1594 if (!strcmp(name, "flowlen")) { 1595 len = num_arg(&user_buffer[i], 10, &value); 1596 if (len < 0) 1597 return len; 1598 1599 i += len; 1600 pkt_dev->lflow = value; 1601 sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow); 1602 return count; 1603 } 1604 1605 if (!strcmp(name, "queue_map_min")) { 1606 len = num_arg(&user_buffer[i], 5, &value); 1607 if (len < 0) 1608 return len; 1609 1610 i += len; 1611 pkt_dev->queue_map_min = value; 1612 sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min); 1613 return count; 1614 } 1615 1616 if (!strcmp(name, "queue_map_max")) { 1617 len = num_arg(&user_buffer[i], 5, &value); 1618 if (len < 0) 1619 return len; 1620 1621 i += len; 1622 pkt_dev->queue_map_max = value; 1623 sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max); 1624 return count; 1625 } 1626 1627 if (!strcmp(name, "mpls")) { 1628 unsigned int n, cnt; 1629 1630 len = get_labels(&user_buffer[i], pkt_dev); 1631 if (len < 0) 1632 return len; 1633 i += len; 1634 cnt = sprintf(pg_result, "OK: mpls="); 1635 for (n = 0; n < pkt_dev->nr_labels; n++) 1636 cnt += sprintf(pg_result + cnt, 1637 "%08x%s", ntohl(pkt_dev->labels[n]), 1638 n == pkt_dev->nr_labels-1 ? "" : ","); 1639 1640 if (pkt_dev->nr_labels && pkt_dev->vlan_id != 0xffff) { 1641 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ 1642 pkt_dev->svlan_id = 0xffff; 1643 1644 if (debug) 1645 pr_debug("VLAN/SVLAN auto turned off\n"); 1646 } 1647 return count; 1648 } 1649 1650 if (!strcmp(name, "vlan_id")) { 1651 len = num_arg(&user_buffer[i], 4, &value); 1652 if (len < 0) 1653 return len; 1654 1655 i += len; 1656 if (value <= 4095) { 1657 pkt_dev->vlan_id = value; /* turn on VLAN */ 1658 1659 if (debug) 1660 pr_debug("VLAN turned on\n"); 1661 1662 if (debug && pkt_dev->nr_labels) 1663 pr_debug("MPLS auto turned off\n"); 1664 1665 pkt_dev->nr_labels = 0; /* turn off MPLS */ 1666 sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id); 1667 } else { 1668 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ 1669 pkt_dev->svlan_id = 0xffff; 1670 1671 if (debug) 1672 pr_debug("VLAN/SVLAN turned off\n"); 1673 } 1674 return count; 1675 } 1676 1677 if (!strcmp(name, "vlan_p")) { 1678 len = num_arg(&user_buffer[i], 1, &value); 1679 if (len < 0) 1680 return len; 1681 1682 i += len; 1683 if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) { 1684 pkt_dev->vlan_p = value; 1685 sprintf(pg_result, "OK: vlan_p=%u", pkt_dev->vlan_p); 1686 } else { 1687 sprintf(pg_result, "ERROR: vlan_p must be 0-7"); 1688 } 1689 return count; 1690 } 1691 1692 if (!strcmp(name, "vlan_cfi")) { 1693 len = num_arg(&user_buffer[i], 1, &value); 1694 if (len < 0) 1695 return len; 1696 1697 i += len; 1698 if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) { 1699 pkt_dev->vlan_cfi = value; 1700 sprintf(pg_result, "OK: vlan_cfi=%u", pkt_dev->vlan_cfi); 1701 } else { 1702 sprintf(pg_result, "ERROR: vlan_cfi must be 0-1"); 1703 } 1704 return count; 1705 } 1706 1707 if (!strcmp(name, "svlan_id")) { 1708 len = num_arg(&user_buffer[i], 4, &value); 1709 if (len < 0) 1710 return len; 1711 1712 i += len; 1713 if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) { 1714 pkt_dev->svlan_id = value; /* turn on SVLAN */ 1715 1716 if (debug) 1717 pr_debug("SVLAN turned on\n"); 1718 1719 if (debug && pkt_dev->nr_labels) 1720 pr_debug("MPLS auto turned off\n"); 1721 1722 pkt_dev->nr_labels = 0; /* turn off MPLS */ 1723 sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id); 1724 } else { 1725 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ 1726 pkt_dev->svlan_id = 0xffff; 1727 1728 if (debug) 1729 pr_debug("VLAN/SVLAN turned off\n"); 1730 } 1731 return count; 1732 } 1733 1734 if (!strcmp(name, "svlan_p")) { 1735 len = num_arg(&user_buffer[i], 1, &value); 1736 if (len < 0) 1737 return len; 1738 1739 i += len; 1740 if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) { 1741 pkt_dev->svlan_p = value; 1742 sprintf(pg_result, "OK: svlan_p=%u", pkt_dev->svlan_p); 1743 } else { 1744 sprintf(pg_result, "ERROR: svlan_p must be 0-7"); 1745 } 1746 return count; 1747 } 1748 1749 if (!strcmp(name, "svlan_cfi")) { 1750 len = num_arg(&user_buffer[i], 1, &value); 1751 if (len < 0) 1752 return len; 1753 1754 i += len; 1755 if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) { 1756 pkt_dev->svlan_cfi = value; 1757 sprintf(pg_result, "OK: svlan_cfi=%u", pkt_dev->svlan_cfi); 1758 } else { 1759 sprintf(pg_result, "ERROR: svlan_cfi must be 0-1"); 1760 } 1761 return count; 1762 } 1763 1764 if (!strcmp(name, "tos")) { 1765 __u32 tmp_value = 0; 1766 len = hex32_arg(&user_buffer[i], 2, &tmp_value); 1767 if (len < 0) 1768 return len; 1769 1770 i += len; 1771 if (len == 2) { 1772 pkt_dev->tos = tmp_value; 1773 sprintf(pg_result, "OK: tos=0x%02x", pkt_dev->tos); 1774 } else { 1775 sprintf(pg_result, "ERROR: tos must be 00-ff"); 1776 } 1777 return count; 1778 } 1779 1780 if (!strcmp(name, "traffic_class")) { 1781 __u32 tmp_value = 0; 1782 len = hex32_arg(&user_buffer[i], 2, &tmp_value); 1783 if (len < 0) 1784 return len; 1785 1786 i += len; 1787 if (len == 2) { 1788 pkt_dev->traffic_class = tmp_value; 1789 sprintf(pg_result, "OK: traffic_class=0x%02x", pkt_dev->traffic_class); 1790 } else { 1791 sprintf(pg_result, "ERROR: traffic_class must be 00-ff"); 1792 } 1793 return count; 1794 } 1795 1796 if (!strcmp(name, "skb_priority")) { 1797 len = num_arg(&user_buffer[i], 9, &value); 1798 if (len < 0) 1799 return len; 1800 1801 i += len; 1802 pkt_dev->skb_priority = value; 1803 sprintf(pg_result, "OK: skb_priority=%i", 1804 pkt_dev->skb_priority); 1805 return count; 1806 } 1807 1808 sprintf(pkt_dev->result, "No such parameter \"%s\"", name); 1809 return -EINVAL; 1810 } 1811 1812 static int pktgen_if_open(struct inode *inode, struct file *file) 1813 { 1814 return single_open(file, pktgen_if_show, pde_data(inode)); 1815 } 1816 1817 static const struct proc_ops pktgen_if_proc_ops = { 1818 .proc_open = pktgen_if_open, 1819 .proc_read = seq_read, 1820 .proc_lseek = seq_lseek, 1821 .proc_write = pktgen_if_write, 1822 .proc_release = single_release, 1823 }; 1824 1825 static int pktgen_thread_show(struct seq_file *seq, void *v) 1826 { 1827 struct pktgen_thread *t = seq->private; 1828 const struct pktgen_dev *pkt_dev; 1829 1830 BUG_ON(!t); 1831 1832 seq_puts(seq, "Running: "); 1833 1834 rcu_read_lock(); 1835 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) 1836 if (pkt_dev->running) 1837 seq_printf(seq, "%s ", pkt_dev->odevname); 1838 1839 seq_puts(seq, "\nStopped: "); 1840 1841 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) 1842 if (!pkt_dev->running) 1843 seq_printf(seq, "%s ", pkt_dev->odevname); 1844 1845 if (t->result[0]) 1846 seq_printf(seq, "\nResult: %s\n", t->result); 1847 else 1848 seq_puts(seq, "\nResult: NA\n"); 1849 1850 rcu_read_unlock(); 1851 1852 return 0; 1853 } 1854 1855 static ssize_t pktgen_thread_write(struct file *file, 1856 const char __user * user_buffer, 1857 size_t count, loff_t * offset) 1858 { 1859 struct seq_file *seq = file->private_data; 1860 struct pktgen_thread *t = seq->private; 1861 int i, max, len, ret; 1862 char name[40]; 1863 char *pg_result; 1864 1865 if (count < 1) { 1866 // sprintf(pg_result, "Wrong command format"); 1867 return -EINVAL; 1868 } 1869 1870 max = count; 1871 len = count_trail_chars(user_buffer, max); 1872 if (len < 0) 1873 return len; 1874 1875 i = len; 1876 1877 /* Read variable name */ 1878 1879 len = strn_len(&user_buffer[i], sizeof(name) - 1); 1880 if (len < 0) 1881 return len; 1882 1883 memset(name, 0, sizeof(name)); 1884 if (copy_from_user(name, &user_buffer[i], len)) 1885 return -EFAULT; 1886 i += len; 1887 1888 max = count - i; 1889 len = count_trail_chars(&user_buffer[i], max); 1890 if (len < 0) 1891 return len; 1892 1893 i += len; 1894 1895 if (debug) 1896 pr_debug("t=%s, count=%lu\n", name, (unsigned long)count); 1897 1898 if (!t) { 1899 pr_err("ERROR: No thread\n"); 1900 ret = -EINVAL; 1901 goto out; 1902 } 1903 1904 pg_result = &(t->result[0]); 1905 1906 if (!strcmp(name, "add_device")) { 1907 char f[32]; 1908 memset(f, 0, 32); 1909 len = strn_len(&user_buffer[i], sizeof(f) - 1); 1910 if (len < 0) { 1911 ret = len; 1912 goto out; 1913 } 1914 if (copy_from_user(f, &user_buffer[i], len)) 1915 return -EFAULT; 1916 i += len; 1917 mutex_lock(&pktgen_thread_lock); 1918 ret = pktgen_add_device(t, f); 1919 mutex_unlock(&pktgen_thread_lock); 1920 if (!ret) { 1921 ret = count; 1922 sprintf(pg_result, "OK: add_device=%s", f); 1923 } else 1924 sprintf(pg_result, "ERROR: can not add device %s", f); 1925 goto out; 1926 } 1927 1928 if (!strcmp(name, "rem_device_all")) { 1929 mutex_lock(&pktgen_thread_lock); 1930 t->control |= T_REMDEVALL; 1931 mutex_unlock(&pktgen_thread_lock); 1932 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ 1933 ret = count; 1934 sprintf(pg_result, "OK: rem_device_all"); 1935 goto out; 1936 } 1937 1938 if (!strcmp(name, "max_before_softirq")) { 1939 sprintf(pg_result, "OK: Note! max_before_softirq is obsoleted -- Do not use"); 1940 ret = count; 1941 goto out; 1942 } 1943 1944 ret = -EINVAL; 1945 out: 1946 return ret; 1947 } 1948 1949 static int pktgen_thread_open(struct inode *inode, struct file *file) 1950 { 1951 return single_open(file, pktgen_thread_show, pde_data(inode)); 1952 } 1953 1954 static const struct proc_ops pktgen_thread_proc_ops = { 1955 .proc_open = pktgen_thread_open, 1956 .proc_read = seq_read, 1957 .proc_lseek = seq_lseek, 1958 .proc_write = pktgen_thread_write, 1959 .proc_release = single_release, 1960 }; 1961 1962 /* Think find or remove for NN */ 1963 static struct pktgen_dev *__pktgen_NN_threads(const struct pktgen_net *pn, 1964 const char *ifname, int remove) 1965 { 1966 struct pktgen_thread *t; 1967 struct pktgen_dev *pkt_dev = NULL; 1968 bool exact = (remove == FIND); 1969 1970 list_for_each_entry(t, &pn->pktgen_threads, th_list) { 1971 pkt_dev = pktgen_find_dev(t, ifname, exact); 1972 if (pkt_dev) { 1973 if (remove) { 1974 pkt_dev->removal_mark = 1; 1975 t->control |= T_REMDEV; 1976 } 1977 break; 1978 } 1979 } 1980 return pkt_dev; 1981 } 1982 1983 /* 1984 * mark a device for removal 1985 */ 1986 static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname) 1987 { 1988 struct pktgen_dev *pkt_dev = NULL; 1989 const int max_tries = 10, msec_per_try = 125; 1990 int i = 0; 1991 1992 mutex_lock(&pktgen_thread_lock); 1993 pr_debug("%s: marking %s for removal\n", __func__, ifname); 1994 1995 while (1) { 1996 1997 pkt_dev = __pktgen_NN_threads(pn, ifname, REMOVE); 1998 if (pkt_dev == NULL) 1999 break; /* success */ 2000 2001 mutex_unlock(&pktgen_thread_lock); 2002 pr_debug("%s: waiting for %s to disappear....\n", 2003 __func__, ifname); 2004 schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try)); 2005 mutex_lock(&pktgen_thread_lock); 2006 2007 if (++i >= max_tries) { 2008 pr_err("%s: timed out after waiting %d msec for device %s to be removed\n", 2009 __func__, msec_per_try * i, ifname); 2010 break; 2011 } 2012 2013 } 2014 2015 mutex_unlock(&pktgen_thread_lock); 2016 } 2017 2018 static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *dev) 2019 { 2020 struct pktgen_thread *t; 2021 2022 mutex_lock(&pktgen_thread_lock); 2023 2024 list_for_each_entry(t, &pn->pktgen_threads, th_list) { 2025 struct pktgen_dev *pkt_dev; 2026 2027 if_lock(t); 2028 list_for_each_entry(pkt_dev, &t->if_list, list) { 2029 if (pkt_dev->odev != dev) 2030 continue; 2031 2032 proc_remove(pkt_dev->entry); 2033 2034 pkt_dev->entry = proc_create_data(dev->name, 0600, 2035 pn->proc_dir, 2036 &pktgen_if_proc_ops, 2037 pkt_dev); 2038 if (!pkt_dev->entry) 2039 pr_err("can't move proc entry for '%s'\n", 2040 dev->name); 2041 break; 2042 } 2043 if_unlock(t); 2044 } 2045 mutex_unlock(&pktgen_thread_lock); 2046 } 2047 2048 static int pktgen_device_event(struct notifier_block *unused, 2049 unsigned long event, void *ptr) 2050 { 2051 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2052 struct pktgen_net *pn = net_generic(dev_net(dev), pg_net_id); 2053 2054 if (pn->pktgen_exiting) 2055 return NOTIFY_DONE; 2056 2057 /* It is OK that we do not hold the group lock right now, 2058 * as we run under the RTNL lock. 2059 */ 2060 2061 switch (event) { 2062 case NETDEV_CHANGENAME: 2063 pktgen_change_name(pn, dev); 2064 break; 2065 2066 case NETDEV_UNREGISTER: 2067 pktgen_mark_device(pn, dev->name); 2068 break; 2069 } 2070 2071 return NOTIFY_DONE; 2072 } 2073 2074 static struct net_device *pktgen_dev_get_by_name(const struct pktgen_net *pn, 2075 struct pktgen_dev *pkt_dev, 2076 const char *ifname) 2077 { 2078 char b[IFNAMSIZ+5]; 2079 int i; 2080 2081 for (i = 0; ifname[i] != '@'; i++) { 2082 if (i == IFNAMSIZ) 2083 break; 2084 2085 b[i] = ifname[i]; 2086 } 2087 b[i] = 0; 2088 2089 return dev_get_by_name(pn->net, b); 2090 } 2091 2092 2093 /* Associate pktgen_dev with a device. */ 2094 2095 static int pktgen_setup_dev(const struct pktgen_net *pn, 2096 struct pktgen_dev *pkt_dev, const char *ifname) 2097 { 2098 struct net_device *odev; 2099 int err; 2100 2101 /* Clean old setups */ 2102 if (pkt_dev->odev) { 2103 netdev_put(pkt_dev->odev, &pkt_dev->dev_tracker); 2104 pkt_dev->odev = NULL; 2105 } 2106 2107 odev = pktgen_dev_get_by_name(pn, pkt_dev, ifname); 2108 if (!odev) { 2109 pr_err("no such netdevice: \"%s\"\n", ifname); 2110 return -ENODEV; 2111 } 2112 2113 if (odev->type != ARPHRD_ETHER && odev->type != ARPHRD_LOOPBACK) { 2114 pr_err("not an ethernet or loopback device: \"%s\"\n", ifname); 2115 err = -EINVAL; 2116 } else if (!netif_running(odev)) { 2117 pr_err("device is down: \"%s\"\n", ifname); 2118 err = -ENETDOWN; 2119 } else { 2120 pkt_dev->odev = odev; 2121 netdev_tracker_alloc(odev, &pkt_dev->dev_tracker, GFP_KERNEL); 2122 return 0; 2123 } 2124 2125 dev_put(odev); 2126 return err; 2127 } 2128 2129 /* Read pkt_dev from the interface and set up internal pktgen_dev 2130 * structure to have the right information to create/send packets 2131 */ 2132 static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) 2133 { 2134 int ntxq; 2135 2136 if (!pkt_dev->odev) { 2137 pr_err("ERROR: pkt_dev->odev == NULL in setup_inject\n"); 2138 sprintf(pkt_dev->result, 2139 "ERROR: pkt_dev->odev == NULL in setup_inject.\n"); 2140 return; 2141 } 2142 2143 /* make sure that we don't pick a non-existing transmit queue */ 2144 ntxq = pkt_dev->odev->real_num_tx_queues; 2145 2146 if (ntxq <= pkt_dev->queue_map_min) { 2147 pr_warn("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", 2148 pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq, 2149 pkt_dev->odevname); 2150 pkt_dev->queue_map_min = (ntxq ?: 1) - 1; 2151 } 2152 if (pkt_dev->queue_map_max >= ntxq) { 2153 pr_warn("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", 2154 pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq, 2155 pkt_dev->odevname); 2156 pkt_dev->queue_map_max = (ntxq ?: 1) - 1; 2157 } 2158 2159 /* Default to the interface's mac if not explicitly set. */ 2160 2161 if (is_zero_ether_addr(pkt_dev->src_mac)) 2162 ether_addr_copy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr); 2163 2164 /* Set up Dest MAC */ 2165 ether_addr_copy(&(pkt_dev->hh[0]), pkt_dev->dst_mac); 2166 2167 if (pkt_dev->flags & F_IPV6) { 2168 int i, set = 0, err = 1; 2169 struct inet6_dev *idev; 2170 2171 if (pkt_dev->min_pkt_size == 0) { 2172 pkt_dev->min_pkt_size = 14 + sizeof(struct ipv6hdr) 2173 + sizeof(struct udphdr) 2174 + sizeof(struct pktgen_hdr) 2175 + pkt_dev->pkt_overhead; 2176 } 2177 2178 for (i = 0; i < sizeof(struct in6_addr); i++) 2179 if (pkt_dev->cur_in6_saddr.s6_addr[i]) { 2180 set = 1; 2181 break; 2182 } 2183 2184 if (!set) { 2185 2186 /* 2187 * Use linklevel address if unconfigured. 2188 * 2189 * use ipv6_get_lladdr if/when it's get exported 2190 */ 2191 2192 rcu_read_lock(); 2193 idev = __in6_dev_get(pkt_dev->odev); 2194 if (idev) { 2195 struct inet6_ifaddr *ifp; 2196 2197 read_lock_bh(&idev->lock); 2198 list_for_each_entry(ifp, &idev->addr_list, if_list) { 2199 if ((ifp->scope & IFA_LINK) && 2200 !(ifp->flags & IFA_F_TENTATIVE)) { 2201 pkt_dev->cur_in6_saddr = ifp->addr; 2202 err = 0; 2203 break; 2204 } 2205 } 2206 read_unlock_bh(&idev->lock); 2207 } 2208 rcu_read_unlock(); 2209 if (err) 2210 pr_err("ERROR: IPv6 link address not available\n"); 2211 } 2212 } else { 2213 if (pkt_dev->min_pkt_size == 0) { 2214 pkt_dev->min_pkt_size = 14 + sizeof(struct iphdr) 2215 + sizeof(struct udphdr) 2216 + sizeof(struct pktgen_hdr) 2217 + pkt_dev->pkt_overhead; 2218 } 2219 2220 pkt_dev->saddr_min = 0; 2221 pkt_dev->saddr_max = 0; 2222 if (strlen(pkt_dev->src_min) == 0) { 2223 2224 struct in_device *in_dev; 2225 2226 rcu_read_lock(); 2227 in_dev = __in_dev_get_rcu(pkt_dev->odev); 2228 if (in_dev) { 2229 const struct in_ifaddr *ifa; 2230 2231 ifa = rcu_dereference(in_dev->ifa_list); 2232 if (ifa) { 2233 pkt_dev->saddr_min = ifa->ifa_address; 2234 pkt_dev->saddr_max = pkt_dev->saddr_min; 2235 } 2236 } 2237 rcu_read_unlock(); 2238 } else { 2239 pkt_dev->saddr_min = in_aton(pkt_dev->src_min); 2240 pkt_dev->saddr_max = in_aton(pkt_dev->src_max); 2241 } 2242 2243 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); 2244 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); 2245 } 2246 /* Initialize current values. */ 2247 pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size; 2248 if (pkt_dev->min_pkt_size > pkt_dev->max_pkt_size) 2249 pkt_dev->max_pkt_size = pkt_dev->min_pkt_size; 2250 2251 pkt_dev->cur_dst_mac_offset = 0; 2252 pkt_dev->cur_src_mac_offset = 0; 2253 pkt_dev->cur_saddr = pkt_dev->saddr_min; 2254 pkt_dev->cur_daddr = pkt_dev->daddr_min; 2255 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min; 2256 pkt_dev->cur_udp_src = pkt_dev->udp_src_min; 2257 pkt_dev->nflows = 0; 2258 } 2259 2260 2261 static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) 2262 { 2263 ktime_t start_time, end_time; 2264 s64 remaining; 2265 struct hrtimer_sleeper t; 2266 2267 hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 2268 hrtimer_set_expires(&t.timer, spin_until); 2269 2270 remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); 2271 if (remaining <= 0) 2272 goto out; 2273 2274 start_time = ktime_get(); 2275 if (remaining < 100000) { 2276 /* for small delays (<100us), just loop until limit is reached */ 2277 do { 2278 end_time = ktime_get(); 2279 } while (ktime_compare(end_time, spin_until) < 0); 2280 } else { 2281 do { 2282 set_current_state(TASK_INTERRUPTIBLE); 2283 hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_ABS); 2284 2285 if (likely(t.task)) 2286 schedule(); 2287 2288 hrtimer_cancel(&t.timer); 2289 } while (t.task && pkt_dev->running && !signal_pending(current)); 2290 __set_current_state(TASK_RUNNING); 2291 end_time = ktime_get(); 2292 } 2293 2294 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2295 out: 2296 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); 2297 destroy_hrtimer_on_stack(&t.timer); 2298 } 2299 2300 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2301 { 2302 pkt_dev->pkt_overhead = 0; 2303 pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32); 2304 pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev); 2305 pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev); 2306 } 2307 2308 static inline int f_seen(const struct pktgen_dev *pkt_dev, int flow) 2309 { 2310 return !!(pkt_dev->flows[flow].flags & F_INIT); 2311 } 2312 2313 static inline int f_pick(struct pktgen_dev *pkt_dev) 2314 { 2315 int flow = pkt_dev->curfl; 2316 2317 if (pkt_dev->flags & F_FLOW_SEQ) { 2318 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { 2319 /* reset time */ 2320 pkt_dev->flows[flow].count = 0; 2321 pkt_dev->flows[flow].flags = 0; 2322 pkt_dev->curfl += 1; 2323 if (pkt_dev->curfl >= pkt_dev->cflows) 2324 pkt_dev->curfl = 0; /*reset */ 2325 } 2326 } else { 2327 flow = get_random_u32_below(pkt_dev->cflows); 2328 pkt_dev->curfl = flow; 2329 2330 if (pkt_dev->flows[flow].count > pkt_dev->lflow) { 2331 pkt_dev->flows[flow].count = 0; 2332 pkt_dev->flows[flow].flags = 0; 2333 } 2334 } 2335 2336 return pkt_dev->curfl; 2337 } 2338 2339 2340 #ifdef CONFIG_XFRM 2341 /* If there was already an IPSEC SA, we keep it as is, else 2342 * we go look for it ... 2343 */ 2344 #define DUMMY_MARK 0 2345 static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) 2346 { 2347 struct xfrm_state *x = pkt_dev->flows[flow].x; 2348 struct pktgen_net *pn = net_generic(dev_net(pkt_dev->odev), pg_net_id); 2349 if (!x) { 2350 2351 if (pkt_dev->spi) { 2352 /* We need as quick as possible to find the right SA 2353 * Searching with minimum criteria to archieve this. 2354 */ 2355 x = xfrm_state_lookup_byspi(pn->net, htonl(pkt_dev->spi), AF_INET); 2356 } else { 2357 /* slow path: we dont already have xfrm_state */ 2358 x = xfrm_stateonly_find(pn->net, DUMMY_MARK, 0, 2359 (xfrm_address_t *)&pkt_dev->cur_daddr, 2360 (xfrm_address_t *)&pkt_dev->cur_saddr, 2361 AF_INET, 2362 pkt_dev->ipsmode, 2363 pkt_dev->ipsproto, 0); 2364 } 2365 if (x) { 2366 pkt_dev->flows[flow].x = x; 2367 set_pkt_overhead(pkt_dev); 2368 pkt_dev->pkt_overhead += x->props.header_len; 2369 } 2370 2371 } 2372 } 2373 #endif 2374 static void set_cur_queue_map(struct pktgen_dev *pkt_dev) 2375 { 2376 2377 if (pkt_dev->flags & F_QUEUE_MAP_CPU) 2378 pkt_dev->cur_queue_map = smp_processor_id(); 2379 2380 else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) { 2381 __u16 t; 2382 if (pkt_dev->flags & F_QUEUE_MAP_RND) { 2383 t = get_random_u32_inclusive(pkt_dev->queue_map_min, 2384 pkt_dev->queue_map_max); 2385 } else { 2386 t = pkt_dev->cur_queue_map + 1; 2387 if (t > pkt_dev->queue_map_max) 2388 t = pkt_dev->queue_map_min; 2389 } 2390 pkt_dev->cur_queue_map = t; 2391 } 2392 pkt_dev->cur_queue_map = pkt_dev->cur_queue_map % pkt_dev->odev->real_num_tx_queues; 2393 } 2394 2395 /* Increment/randomize headers according to flags and current values 2396 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst 2397 */ 2398 static void mod_cur_headers(struct pktgen_dev *pkt_dev) 2399 { 2400 __u32 imn; 2401 __u32 imx; 2402 int flow = 0; 2403 2404 if (pkt_dev->cflows) 2405 flow = f_pick(pkt_dev); 2406 2407 /* Deal with source MAC */ 2408 if (pkt_dev->src_mac_count > 1) { 2409 __u32 mc; 2410 __u32 tmp; 2411 2412 if (pkt_dev->flags & F_MACSRC_RND) 2413 mc = get_random_u32_below(pkt_dev->src_mac_count); 2414 else { 2415 mc = pkt_dev->cur_src_mac_offset++; 2416 if (pkt_dev->cur_src_mac_offset >= 2417 pkt_dev->src_mac_count) 2418 pkt_dev->cur_src_mac_offset = 0; 2419 } 2420 2421 tmp = pkt_dev->src_mac[5] + (mc & 0xFF); 2422 pkt_dev->hh[11] = tmp; 2423 tmp = (pkt_dev->src_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8)); 2424 pkt_dev->hh[10] = tmp; 2425 tmp = (pkt_dev->src_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8)); 2426 pkt_dev->hh[9] = tmp; 2427 tmp = (pkt_dev->src_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8)); 2428 pkt_dev->hh[8] = tmp; 2429 tmp = (pkt_dev->src_mac[1] + (tmp >> 8)); 2430 pkt_dev->hh[7] = tmp; 2431 } 2432 2433 /* Deal with Destination MAC */ 2434 if (pkt_dev->dst_mac_count > 1) { 2435 __u32 mc; 2436 __u32 tmp; 2437 2438 if (pkt_dev->flags & F_MACDST_RND) 2439 mc = get_random_u32_below(pkt_dev->dst_mac_count); 2440 2441 else { 2442 mc = pkt_dev->cur_dst_mac_offset++; 2443 if (pkt_dev->cur_dst_mac_offset >= 2444 pkt_dev->dst_mac_count) { 2445 pkt_dev->cur_dst_mac_offset = 0; 2446 } 2447 } 2448 2449 tmp = pkt_dev->dst_mac[5] + (mc & 0xFF); 2450 pkt_dev->hh[5] = tmp; 2451 tmp = (pkt_dev->dst_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8)); 2452 pkt_dev->hh[4] = tmp; 2453 tmp = (pkt_dev->dst_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8)); 2454 pkt_dev->hh[3] = tmp; 2455 tmp = (pkt_dev->dst_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8)); 2456 pkt_dev->hh[2] = tmp; 2457 tmp = (pkt_dev->dst_mac[1] + (tmp >> 8)); 2458 pkt_dev->hh[1] = tmp; 2459 } 2460 2461 if (pkt_dev->flags & F_MPLS_RND) { 2462 unsigned int i; 2463 for (i = 0; i < pkt_dev->nr_labels; i++) 2464 if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) 2465 pkt_dev->labels[i] = MPLS_STACK_BOTTOM | 2466 ((__force __be32)get_random_u32() & 2467 htonl(0x000fffff)); 2468 } 2469 2470 if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) { 2471 pkt_dev->vlan_id = get_random_u32_below(4096); 2472 } 2473 2474 if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) { 2475 pkt_dev->svlan_id = get_random_u32_below(4096); 2476 } 2477 2478 if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) { 2479 if (pkt_dev->flags & F_UDPSRC_RND) 2480 pkt_dev->cur_udp_src = get_random_u32_inclusive(pkt_dev->udp_src_min, 2481 pkt_dev->udp_src_max - 1); 2482 2483 else { 2484 pkt_dev->cur_udp_src++; 2485 if (pkt_dev->cur_udp_src >= pkt_dev->udp_src_max) 2486 pkt_dev->cur_udp_src = pkt_dev->udp_src_min; 2487 } 2488 } 2489 2490 if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) { 2491 if (pkt_dev->flags & F_UDPDST_RND) { 2492 pkt_dev->cur_udp_dst = get_random_u32_inclusive(pkt_dev->udp_dst_min, 2493 pkt_dev->udp_dst_max - 1); 2494 } else { 2495 pkt_dev->cur_udp_dst++; 2496 if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max) 2497 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min; 2498 } 2499 } 2500 2501 if (!(pkt_dev->flags & F_IPV6)) { 2502 2503 imn = ntohl(pkt_dev->saddr_min); 2504 imx = ntohl(pkt_dev->saddr_max); 2505 if (imn < imx) { 2506 __u32 t; 2507 if (pkt_dev->flags & F_IPSRC_RND) 2508 t = get_random_u32_inclusive(imn, imx - 1); 2509 else { 2510 t = ntohl(pkt_dev->cur_saddr); 2511 t++; 2512 if (t > imx) 2513 t = imn; 2514 2515 } 2516 pkt_dev->cur_saddr = htonl(t); 2517 } 2518 2519 if (pkt_dev->cflows && f_seen(pkt_dev, flow)) { 2520 pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr; 2521 } else { 2522 imn = ntohl(pkt_dev->daddr_min); 2523 imx = ntohl(pkt_dev->daddr_max); 2524 if (imn < imx) { 2525 __u32 t; 2526 __be32 s; 2527 if (pkt_dev->flags & F_IPDST_RND) { 2528 2529 do { 2530 t = get_random_u32_inclusive(imn, imx - 1); 2531 s = htonl(t); 2532 } while (ipv4_is_loopback(s) || 2533 ipv4_is_multicast(s) || 2534 ipv4_is_lbcast(s) || 2535 ipv4_is_zeronet(s) || 2536 ipv4_is_local_multicast(s)); 2537 pkt_dev->cur_daddr = s; 2538 } else { 2539 t = ntohl(pkt_dev->cur_daddr); 2540 t++; 2541 if (t > imx) { 2542 t = imn; 2543 } 2544 pkt_dev->cur_daddr = htonl(t); 2545 } 2546 } 2547 if (pkt_dev->cflows) { 2548 pkt_dev->flows[flow].flags |= F_INIT; 2549 pkt_dev->flows[flow].cur_daddr = 2550 pkt_dev->cur_daddr; 2551 #ifdef CONFIG_XFRM 2552 if (pkt_dev->flags & F_IPSEC) 2553 get_ipsec_sa(pkt_dev, flow); 2554 #endif 2555 pkt_dev->nflows++; 2556 } 2557 } 2558 } else { /* IPV6 * */ 2559 2560 if (!ipv6_addr_any(&pkt_dev->min_in6_daddr)) { 2561 int i; 2562 2563 /* Only random destinations yet */ 2564 2565 for (i = 0; i < 4; i++) { 2566 pkt_dev->cur_in6_daddr.s6_addr32[i] = 2567 (((__force __be32)get_random_u32() | 2568 pkt_dev->min_in6_daddr.s6_addr32[i]) & 2569 pkt_dev->max_in6_daddr.s6_addr32[i]); 2570 } 2571 } 2572 } 2573 2574 if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) { 2575 __u32 t; 2576 if (pkt_dev->flags & F_TXSIZE_RND) { 2577 t = get_random_u32_inclusive(pkt_dev->min_pkt_size, 2578 pkt_dev->max_pkt_size - 1); 2579 } else { 2580 t = pkt_dev->cur_pkt_size + 1; 2581 if (t > pkt_dev->max_pkt_size) 2582 t = pkt_dev->min_pkt_size; 2583 } 2584 pkt_dev->cur_pkt_size = t; 2585 } else if (pkt_dev->n_imix_entries > 0) { 2586 struct imix_pkt *entry; 2587 __u32 t = get_random_u32_below(IMIX_PRECISION); 2588 __u8 entry_index = pkt_dev->imix_distribution[t]; 2589 2590 entry = &pkt_dev->imix_entries[entry_index]; 2591 entry->count_so_far++; 2592 pkt_dev->cur_pkt_size = entry->size; 2593 } 2594 2595 set_cur_queue_map(pkt_dev); 2596 2597 pkt_dev->flows[flow].count++; 2598 } 2599 2600 static void fill_imix_distribution(struct pktgen_dev *pkt_dev) 2601 { 2602 int cumulative_probabilites[MAX_IMIX_ENTRIES]; 2603 int j = 0; 2604 __u64 cumulative_prob = 0; 2605 __u64 total_weight = 0; 2606 int i = 0; 2607 2608 for (i = 0; i < pkt_dev->n_imix_entries; i++) 2609 total_weight += pkt_dev->imix_entries[i].weight; 2610 2611 /* Fill cumulative_probabilites with sum of normalized probabilities */ 2612 for (i = 0; i < pkt_dev->n_imix_entries - 1; i++) { 2613 cumulative_prob += div64_u64(pkt_dev->imix_entries[i].weight * 2614 IMIX_PRECISION, 2615 total_weight); 2616 cumulative_probabilites[i] = cumulative_prob; 2617 } 2618 cumulative_probabilites[pkt_dev->n_imix_entries - 1] = 100; 2619 2620 for (i = 0; i < IMIX_PRECISION; i++) { 2621 if (i == cumulative_probabilites[j]) 2622 j++; 2623 pkt_dev->imix_distribution[i] = j; 2624 } 2625 } 2626 2627 #ifdef CONFIG_XFRM 2628 static u32 pktgen_dst_metrics[RTAX_MAX + 1] = { 2629 2630 [RTAX_HOPLIMIT] = 0x5, /* Set a static hoplimit */ 2631 }; 2632 2633 static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev) 2634 { 2635 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; 2636 int err = 0; 2637 struct net *net = dev_net(pkt_dev->odev); 2638 2639 if (!x) 2640 return 0; 2641 /* XXX: we dont support tunnel mode for now until 2642 * we resolve the dst issue */ 2643 if ((x->props.mode != XFRM_MODE_TRANSPORT) && (pkt_dev->spi == 0)) 2644 return 0; 2645 2646 /* But when user specify an valid SPI, transformation 2647 * supports both transport/tunnel mode + ESP/AH type. 2648 */ 2649 if ((x->props.mode == XFRM_MODE_TUNNEL) && (pkt_dev->spi != 0)) 2650 skb->_skb_refdst = (unsigned long)&pkt_dev->xdst.u.dst | SKB_DST_NOREF; 2651 2652 rcu_read_lock_bh(); 2653 err = pktgen_xfrm_outer_mode_output(x, skb); 2654 rcu_read_unlock_bh(); 2655 if (err) { 2656 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR); 2657 goto error; 2658 } 2659 err = x->type->output(x, skb); 2660 if (err) { 2661 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR); 2662 goto error; 2663 } 2664 spin_lock_bh(&x->lock); 2665 x->curlft.bytes += skb->len; 2666 x->curlft.packets++; 2667 spin_unlock_bh(&x->lock); 2668 error: 2669 return err; 2670 } 2671 2672 static void free_SAs(struct pktgen_dev *pkt_dev) 2673 { 2674 if (pkt_dev->cflows) { 2675 /* let go of the SAs if we have them */ 2676 int i; 2677 for (i = 0; i < pkt_dev->cflows; i++) { 2678 struct xfrm_state *x = pkt_dev->flows[i].x; 2679 if (x) { 2680 xfrm_state_put(x); 2681 pkt_dev->flows[i].x = NULL; 2682 } 2683 } 2684 } 2685 } 2686 2687 static int process_ipsec(struct pktgen_dev *pkt_dev, 2688 struct sk_buff *skb, __be16 protocol) 2689 { 2690 if (pkt_dev->flags & F_IPSEC) { 2691 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; 2692 int nhead = 0; 2693 if (x) { 2694 struct ethhdr *eth; 2695 struct iphdr *iph; 2696 int ret; 2697 2698 nhead = x->props.header_len - skb_headroom(skb); 2699 if (nhead > 0) { 2700 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); 2701 if (ret < 0) { 2702 pr_err("Error expanding ipsec packet %d\n", 2703 ret); 2704 goto err; 2705 } 2706 } 2707 2708 /* ipsec is not expecting ll header */ 2709 skb_pull(skb, ETH_HLEN); 2710 ret = pktgen_output_ipsec(skb, pkt_dev); 2711 if (ret) { 2712 pr_err("Error creating ipsec packet %d\n", ret); 2713 goto err; 2714 } 2715 /* restore ll */ 2716 eth = skb_push(skb, ETH_HLEN); 2717 memcpy(eth, pkt_dev->hh, 2 * ETH_ALEN); 2718 eth->h_proto = protocol; 2719 2720 /* Update IPv4 header len as well as checksum value */ 2721 iph = ip_hdr(skb); 2722 iph->tot_len = htons(skb->len - ETH_HLEN); 2723 ip_send_check(iph); 2724 } 2725 } 2726 return 1; 2727 err: 2728 kfree_skb(skb); 2729 return 0; 2730 } 2731 #endif 2732 2733 static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) 2734 { 2735 unsigned int i; 2736 for (i = 0; i < pkt_dev->nr_labels; i++) 2737 *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; 2738 2739 mpls--; 2740 *mpls |= MPLS_STACK_BOTTOM; 2741 } 2742 2743 static inline __be16 build_tci(unsigned int id, unsigned int cfi, 2744 unsigned int prio) 2745 { 2746 return htons(id | (cfi << 12) | (prio << 13)); 2747 } 2748 2749 static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb, 2750 int datalen) 2751 { 2752 struct timespec64 timestamp; 2753 struct pktgen_hdr *pgh; 2754 2755 pgh = skb_put(skb, sizeof(*pgh)); 2756 datalen -= sizeof(*pgh); 2757 2758 if (pkt_dev->nfrags <= 0) { 2759 skb_put_zero(skb, datalen); 2760 } else { 2761 int frags = pkt_dev->nfrags; 2762 int i, len; 2763 int frag_len; 2764 2765 2766 if (frags > MAX_SKB_FRAGS) 2767 frags = MAX_SKB_FRAGS; 2768 len = datalen - frags * PAGE_SIZE; 2769 if (len > 0) { 2770 skb_put_zero(skb, len); 2771 datalen = frags * PAGE_SIZE; 2772 } 2773 2774 i = 0; 2775 frag_len = (datalen/frags) < PAGE_SIZE ? 2776 (datalen/frags) : PAGE_SIZE; 2777 while (datalen > 0) { 2778 if (unlikely(!pkt_dev->page)) { 2779 int node = numa_node_id(); 2780 2781 if (pkt_dev->node >= 0 && (pkt_dev->flags & F_NODE)) 2782 node = pkt_dev->node; 2783 pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); 2784 if (!pkt_dev->page) 2785 break; 2786 } 2787 get_page(pkt_dev->page); 2788 2789 /*last fragment, fill rest of data*/ 2790 if (i == (frags - 1)) 2791 skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[i], 2792 pkt_dev->page, 0, 2793 (datalen < PAGE_SIZE ? 2794 datalen : PAGE_SIZE)); 2795 else 2796 skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[i], 2797 pkt_dev->page, 0, frag_len); 2798 2799 datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]); 2800 skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 2801 skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 2802 i++; 2803 skb_shinfo(skb)->nr_frags = i; 2804 } 2805 } 2806 2807 /* Stamp the time, and sequence number, 2808 * convert them to network byte order 2809 */ 2810 pgh->pgh_magic = htonl(PKTGEN_MAGIC); 2811 pgh->seq_num = htonl(pkt_dev->seq_num); 2812 2813 if (pkt_dev->flags & F_NO_TIMESTAMP) { 2814 pgh->tv_sec = 0; 2815 pgh->tv_usec = 0; 2816 } else { 2817 /* 2818 * pgh->tv_sec wraps in y2106 when interpreted as unsigned 2819 * as done by wireshark, or y2038 when interpreted as signed. 2820 * This is probably harmless, but if anyone wants to improve 2821 * it, we could introduce a variant that puts 64-bit nanoseconds 2822 * into the respective header bytes. 2823 * This would also be slightly faster to read. 2824 */ 2825 ktime_get_real_ts64(×tamp); 2826 pgh->tv_sec = htonl(timestamp.tv_sec); 2827 pgh->tv_usec = htonl(timestamp.tv_nsec / NSEC_PER_USEC); 2828 } 2829 } 2830 2831 static struct sk_buff *pktgen_alloc_skb(struct net_device *dev, 2832 struct pktgen_dev *pkt_dev) 2833 { 2834 unsigned int extralen = LL_RESERVED_SPACE(dev); 2835 struct sk_buff *skb = NULL; 2836 unsigned int size; 2837 2838 size = pkt_dev->cur_pkt_size + 64 + extralen + pkt_dev->pkt_overhead; 2839 if (pkt_dev->flags & F_NODE) { 2840 int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id(); 2841 2842 skb = __alloc_skb(NET_SKB_PAD + size, GFP_NOWAIT, 0, node); 2843 if (likely(skb)) { 2844 skb_reserve(skb, NET_SKB_PAD); 2845 skb->dev = dev; 2846 } 2847 } else { 2848 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT); 2849 } 2850 2851 /* the caller pre-fetches from skb->data and reserves for the mac hdr */ 2852 if (likely(skb)) 2853 skb_reserve(skb, extralen - 16); 2854 2855 return skb; 2856 } 2857 2858 static struct sk_buff *fill_packet_ipv4(struct net_device *odev, 2859 struct pktgen_dev *pkt_dev) 2860 { 2861 struct sk_buff *skb = NULL; 2862 __u8 *eth; 2863 struct udphdr *udph; 2864 int datalen, iplen; 2865 struct iphdr *iph; 2866 __be16 protocol = htons(ETH_P_IP); 2867 __be32 *mpls; 2868 __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */ 2869 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 2870 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 2871 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 2872 u16 queue_map; 2873 2874 if (pkt_dev->nr_labels) 2875 protocol = htons(ETH_P_MPLS_UC); 2876 2877 if (pkt_dev->vlan_id != 0xffff) 2878 protocol = htons(ETH_P_8021Q); 2879 2880 /* Update any of the values, used when we're incrementing various 2881 * fields. 2882 */ 2883 mod_cur_headers(pkt_dev); 2884 queue_map = pkt_dev->cur_queue_map; 2885 2886 skb = pktgen_alloc_skb(odev, pkt_dev); 2887 if (!skb) { 2888 sprintf(pkt_dev->result, "No memory"); 2889 return NULL; 2890 } 2891 2892 prefetchw(skb->data); 2893 skb_reserve(skb, 16); 2894 2895 /* Reserve for ethernet and IP header */ 2896 eth = skb_push(skb, 14); 2897 mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32)); 2898 if (pkt_dev->nr_labels) 2899 mpls_push(mpls, pkt_dev); 2900 2901 if (pkt_dev->vlan_id != 0xffff) { 2902 if (pkt_dev->svlan_id != 0xffff) { 2903 svlan_tci = skb_put(skb, sizeof(__be16)); 2904 *svlan_tci = build_tci(pkt_dev->svlan_id, 2905 pkt_dev->svlan_cfi, 2906 pkt_dev->svlan_p); 2907 svlan_encapsulated_proto = skb_put(skb, 2908 sizeof(__be16)); 2909 *svlan_encapsulated_proto = htons(ETH_P_8021Q); 2910 } 2911 vlan_tci = skb_put(skb, sizeof(__be16)); 2912 *vlan_tci = build_tci(pkt_dev->vlan_id, 2913 pkt_dev->vlan_cfi, 2914 pkt_dev->vlan_p); 2915 vlan_encapsulated_proto = skb_put(skb, sizeof(__be16)); 2916 *vlan_encapsulated_proto = htons(ETH_P_IP); 2917 } 2918 2919 skb_reset_mac_header(skb); 2920 skb_set_network_header(skb, skb->len); 2921 iph = skb_put(skb, sizeof(struct iphdr)); 2922 2923 skb_set_transport_header(skb, skb->len); 2924 udph = skb_put(skb, sizeof(struct udphdr)); 2925 skb_set_queue_mapping(skb, queue_map); 2926 skb->priority = pkt_dev->skb_priority; 2927 2928 memcpy(eth, pkt_dev->hh, 12); 2929 *(__be16 *) & eth[12] = protocol; 2930 2931 /* Eth + IPh + UDPh + mpls */ 2932 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - 2933 pkt_dev->pkt_overhead; 2934 if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) 2935 datalen = sizeof(struct pktgen_hdr); 2936 2937 udph->source = htons(pkt_dev->cur_udp_src); 2938 udph->dest = htons(pkt_dev->cur_udp_dst); 2939 udph->len = htons(datalen + 8); /* DATA + udphdr */ 2940 udph->check = 0; 2941 2942 iph->ihl = 5; 2943 iph->version = 4; 2944 iph->ttl = 32; 2945 iph->tos = pkt_dev->tos; 2946 iph->protocol = IPPROTO_UDP; /* UDP */ 2947 iph->saddr = pkt_dev->cur_saddr; 2948 iph->daddr = pkt_dev->cur_daddr; 2949 iph->id = htons(pkt_dev->ip_id); 2950 pkt_dev->ip_id++; 2951 iph->frag_off = 0; 2952 iplen = 20 + 8 + datalen; 2953 iph->tot_len = htons(iplen); 2954 ip_send_check(iph); 2955 skb->protocol = protocol; 2956 skb->dev = odev; 2957 skb->pkt_type = PACKET_HOST; 2958 2959 pktgen_finalize_skb(pkt_dev, skb, datalen); 2960 2961 if (!(pkt_dev->flags & F_UDPCSUM)) { 2962 skb->ip_summed = CHECKSUM_NONE; 2963 } else if (odev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM)) { 2964 skb->ip_summed = CHECKSUM_PARTIAL; 2965 skb->csum = 0; 2966 udp4_hwcsum(skb, iph->saddr, iph->daddr); 2967 } else { 2968 __wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0); 2969 2970 /* add protocol-dependent pseudo-header */ 2971 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 2972 datalen + 8, IPPROTO_UDP, csum); 2973 2974 if (udph->check == 0) 2975 udph->check = CSUM_MANGLED_0; 2976 } 2977 2978 #ifdef CONFIG_XFRM 2979 if (!process_ipsec(pkt_dev, skb, protocol)) 2980 return NULL; 2981 #endif 2982 2983 return skb; 2984 } 2985 2986 static struct sk_buff *fill_packet_ipv6(struct net_device *odev, 2987 struct pktgen_dev *pkt_dev) 2988 { 2989 struct sk_buff *skb = NULL; 2990 __u8 *eth; 2991 struct udphdr *udph; 2992 int datalen, udplen; 2993 struct ipv6hdr *iph; 2994 __be16 protocol = htons(ETH_P_IPV6); 2995 __be32 *mpls; 2996 __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */ 2997 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 2998 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 2999 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 3000 u16 queue_map; 3001 3002 if (pkt_dev->nr_labels) 3003 protocol = htons(ETH_P_MPLS_UC); 3004 3005 if (pkt_dev->vlan_id != 0xffff) 3006 protocol = htons(ETH_P_8021Q); 3007 3008 /* Update any of the values, used when we're incrementing various 3009 * fields. 3010 */ 3011 mod_cur_headers(pkt_dev); 3012 queue_map = pkt_dev->cur_queue_map; 3013 3014 skb = pktgen_alloc_skb(odev, pkt_dev); 3015 if (!skb) { 3016 sprintf(pkt_dev->result, "No memory"); 3017 return NULL; 3018 } 3019 3020 prefetchw(skb->data); 3021 skb_reserve(skb, 16); 3022 3023 /* Reserve for ethernet and IP header */ 3024 eth = skb_push(skb, 14); 3025 mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32)); 3026 if (pkt_dev->nr_labels) 3027 mpls_push(mpls, pkt_dev); 3028 3029 if (pkt_dev->vlan_id != 0xffff) { 3030 if (pkt_dev->svlan_id != 0xffff) { 3031 svlan_tci = skb_put(skb, sizeof(__be16)); 3032 *svlan_tci = build_tci(pkt_dev->svlan_id, 3033 pkt_dev->svlan_cfi, 3034 pkt_dev->svlan_p); 3035 svlan_encapsulated_proto = skb_put(skb, 3036 sizeof(__be16)); 3037 *svlan_encapsulated_proto = htons(ETH_P_8021Q); 3038 } 3039 vlan_tci = skb_put(skb, sizeof(__be16)); 3040 *vlan_tci = build_tci(pkt_dev->vlan_id, 3041 pkt_dev->vlan_cfi, 3042 pkt_dev->vlan_p); 3043 vlan_encapsulated_proto = skb_put(skb, sizeof(__be16)); 3044 *vlan_encapsulated_proto = htons(ETH_P_IPV6); 3045 } 3046 3047 skb_reset_mac_header(skb); 3048 skb_set_network_header(skb, skb->len); 3049 iph = skb_put(skb, sizeof(struct ipv6hdr)); 3050 3051 skb_set_transport_header(skb, skb->len); 3052 udph = skb_put(skb, sizeof(struct udphdr)); 3053 skb_set_queue_mapping(skb, queue_map); 3054 skb->priority = pkt_dev->skb_priority; 3055 3056 memcpy(eth, pkt_dev->hh, 12); 3057 *(__be16 *) ð[12] = protocol; 3058 3059 /* Eth + IPh + UDPh + mpls */ 3060 datalen = pkt_dev->cur_pkt_size - 14 - 3061 sizeof(struct ipv6hdr) - sizeof(struct udphdr) - 3062 pkt_dev->pkt_overhead; 3063 3064 if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) { 3065 datalen = sizeof(struct pktgen_hdr); 3066 net_info_ratelimited("increased datalen to %d\n", datalen); 3067 } 3068 3069 udplen = datalen + sizeof(struct udphdr); 3070 udph->source = htons(pkt_dev->cur_udp_src); 3071 udph->dest = htons(pkt_dev->cur_udp_dst); 3072 udph->len = htons(udplen); 3073 udph->check = 0; 3074 3075 *(__be32 *) iph = htonl(0x60000000); /* Version + flow */ 3076 3077 if (pkt_dev->traffic_class) { 3078 /* Version + traffic class + flow (0) */ 3079 *(__be32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20)); 3080 } 3081 3082 iph->hop_limit = 32; 3083 3084 iph->payload_len = htons(udplen); 3085 iph->nexthdr = IPPROTO_UDP; 3086 3087 iph->daddr = pkt_dev->cur_in6_daddr; 3088 iph->saddr = pkt_dev->cur_in6_saddr; 3089 3090 skb->protocol = protocol; 3091 skb->dev = odev; 3092 skb->pkt_type = PACKET_HOST; 3093 3094 pktgen_finalize_skb(pkt_dev, skb, datalen); 3095 3096 if (!(pkt_dev->flags & F_UDPCSUM)) { 3097 skb->ip_summed = CHECKSUM_NONE; 3098 } else if (odev->features & (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM)) { 3099 skb->ip_summed = CHECKSUM_PARTIAL; 3100 skb->csum_start = skb_transport_header(skb) - skb->head; 3101 skb->csum_offset = offsetof(struct udphdr, check); 3102 udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0); 3103 } else { 3104 __wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0); 3105 3106 /* add protocol-dependent pseudo-header */ 3107 udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum); 3108 3109 if (udph->check == 0) 3110 udph->check = CSUM_MANGLED_0; 3111 } 3112 3113 return skb; 3114 } 3115 3116 static struct sk_buff *fill_packet(struct net_device *odev, 3117 struct pktgen_dev *pkt_dev) 3118 { 3119 if (pkt_dev->flags & F_IPV6) 3120 return fill_packet_ipv6(odev, pkt_dev); 3121 else 3122 return fill_packet_ipv4(odev, pkt_dev); 3123 } 3124 3125 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev) 3126 { 3127 pkt_dev->seq_num = 1; 3128 pkt_dev->idle_acc = 0; 3129 pkt_dev->sofar = 0; 3130 pkt_dev->tx_bytes = 0; 3131 pkt_dev->errors = 0; 3132 } 3133 3134 /* Set up structure for sending pkts, clear counters */ 3135 3136 static void pktgen_run(struct pktgen_thread *t) 3137 { 3138 struct pktgen_dev *pkt_dev; 3139 int started = 0; 3140 3141 func_enter(); 3142 3143 rcu_read_lock(); 3144 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) { 3145 3146 /* 3147 * setup odev and create initial packet. 3148 */ 3149 pktgen_setup_inject(pkt_dev); 3150 3151 if (pkt_dev->odev) { 3152 pktgen_clear_counters(pkt_dev); 3153 pkt_dev->skb = NULL; 3154 pkt_dev->started_at = pkt_dev->next_tx = ktime_get(); 3155 3156 set_pkt_overhead(pkt_dev); 3157 3158 strcpy(pkt_dev->result, "Starting"); 3159 pkt_dev->running = 1; /* Cranke yeself! */ 3160 started++; 3161 } else 3162 strcpy(pkt_dev->result, "Error starting"); 3163 } 3164 rcu_read_unlock(); 3165 if (started) 3166 t->control &= ~(T_STOP); 3167 } 3168 3169 static void pktgen_handle_all_threads(struct pktgen_net *pn, u32 flags) 3170 { 3171 struct pktgen_thread *t; 3172 3173 mutex_lock(&pktgen_thread_lock); 3174 3175 list_for_each_entry(t, &pn->pktgen_threads, th_list) 3176 t->control |= (flags); 3177 3178 mutex_unlock(&pktgen_thread_lock); 3179 } 3180 3181 static void pktgen_stop_all_threads(struct pktgen_net *pn) 3182 { 3183 func_enter(); 3184 3185 pktgen_handle_all_threads(pn, T_STOP); 3186 } 3187 3188 static int thread_is_running(const struct pktgen_thread *t) 3189 { 3190 const struct pktgen_dev *pkt_dev; 3191 3192 rcu_read_lock(); 3193 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) 3194 if (pkt_dev->running) { 3195 rcu_read_unlock(); 3196 return 1; 3197 } 3198 rcu_read_unlock(); 3199 return 0; 3200 } 3201 3202 static int pktgen_wait_thread_run(struct pktgen_thread *t) 3203 { 3204 while (thread_is_running(t)) { 3205 3206 /* note: 't' will still be around even after the unlock/lock 3207 * cycle because pktgen_thread threads are only cleared at 3208 * net exit 3209 */ 3210 mutex_unlock(&pktgen_thread_lock); 3211 msleep_interruptible(100); 3212 mutex_lock(&pktgen_thread_lock); 3213 3214 if (signal_pending(current)) 3215 goto signal; 3216 } 3217 return 1; 3218 signal: 3219 return 0; 3220 } 3221 3222 static int pktgen_wait_all_threads_run(struct pktgen_net *pn) 3223 { 3224 struct pktgen_thread *t; 3225 int sig = 1; 3226 3227 /* prevent from racing with rmmod */ 3228 if (!try_module_get(THIS_MODULE)) 3229 return sig; 3230 3231 mutex_lock(&pktgen_thread_lock); 3232 3233 list_for_each_entry(t, &pn->pktgen_threads, th_list) { 3234 sig = pktgen_wait_thread_run(t); 3235 if (sig == 0) 3236 break; 3237 } 3238 3239 if (sig == 0) 3240 list_for_each_entry(t, &pn->pktgen_threads, th_list) 3241 t->control |= (T_STOP); 3242 3243 mutex_unlock(&pktgen_thread_lock); 3244 module_put(THIS_MODULE); 3245 return sig; 3246 } 3247 3248 static void pktgen_run_all_threads(struct pktgen_net *pn) 3249 { 3250 func_enter(); 3251 3252 pktgen_handle_all_threads(pn, T_RUN); 3253 3254 /* Propagate thread->control */ 3255 schedule_timeout_interruptible(msecs_to_jiffies(125)); 3256 3257 pktgen_wait_all_threads_run(pn); 3258 } 3259 3260 static void pktgen_reset_all_threads(struct pktgen_net *pn) 3261 { 3262 func_enter(); 3263 3264 pktgen_handle_all_threads(pn, T_REMDEVALL); 3265 3266 /* Propagate thread->control */ 3267 schedule_timeout_interruptible(msecs_to_jiffies(125)); 3268 3269 pktgen_wait_all_threads_run(pn); 3270 } 3271 3272 static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) 3273 { 3274 __u64 bps, mbps, pps; 3275 char *p = pkt_dev->result; 3276 ktime_t elapsed = ktime_sub(pkt_dev->stopped_at, 3277 pkt_dev->started_at); 3278 ktime_t idle = ns_to_ktime(pkt_dev->idle_acc); 3279 3280 p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n", 3281 (unsigned long long)ktime_to_us(elapsed), 3282 (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)), 3283 (unsigned long long)ktime_to_us(idle), 3284 (unsigned long long)pkt_dev->sofar, 3285 pkt_dev->cur_pkt_size, nr_frags); 3286 3287 pps = div64_u64(pkt_dev->sofar * NSEC_PER_SEC, 3288 ktime_to_ns(elapsed)); 3289 3290 if (pkt_dev->n_imix_entries > 0) { 3291 int i; 3292 struct imix_pkt *entry; 3293 3294 bps = 0; 3295 for (i = 0; i < pkt_dev->n_imix_entries; i++) { 3296 entry = &pkt_dev->imix_entries[i]; 3297 bps += entry->size * entry->count_so_far; 3298 } 3299 bps = div64_u64(bps * 8 * NSEC_PER_SEC, ktime_to_ns(elapsed)); 3300 } else { 3301 bps = pps * 8 * pkt_dev->cur_pkt_size; 3302 } 3303 3304 mbps = bps; 3305 do_div(mbps, 1000000); 3306 p += sprintf(p, " %llupps %lluMb/sec (%llubps) errors: %llu", 3307 (unsigned long long)pps, 3308 (unsigned long long)mbps, 3309 (unsigned long long)bps, 3310 (unsigned long long)pkt_dev->errors); 3311 } 3312 3313 /* Set stopped-at timer, remove from running list, do counters & statistics */ 3314 static int pktgen_stop_device(struct pktgen_dev *pkt_dev) 3315 { 3316 int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; 3317 3318 if (!pkt_dev->running) { 3319 pr_warn("interface: %s is already stopped\n", 3320 pkt_dev->odevname); 3321 return -EINVAL; 3322 } 3323 3324 pkt_dev->running = 0; 3325 kfree_skb(pkt_dev->skb); 3326 pkt_dev->skb = NULL; 3327 pkt_dev->stopped_at = ktime_get(); 3328 3329 show_results(pkt_dev, nr_frags); 3330 3331 return 0; 3332 } 3333 3334 static struct pktgen_dev *next_to_run(struct pktgen_thread *t) 3335 { 3336 struct pktgen_dev *pkt_dev, *best = NULL; 3337 3338 rcu_read_lock(); 3339 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) { 3340 if (!pkt_dev->running) 3341 continue; 3342 if (best == NULL) 3343 best = pkt_dev; 3344 else if (ktime_compare(pkt_dev->next_tx, best->next_tx) < 0) 3345 best = pkt_dev; 3346 } 3347 rcu_read_unlock(); 3348 3349 return best; 3350 } 3351 3352 static void pktgen_stop(struct pktgen_thread *t) 3353 { 3354 struct pktgen_dev *pkt_dev; 3355 3356 func_enter(); 3357 3358 rcu_read_lock(); 3359 3360 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) { 3361 pktgen_stop_device(pkt_dev); 3362 } 3363 3364 rcu_read_unlock(); 3365 } 3366 3367 /* 3368 * one of our devices needs to be removed - find it 3369 * and remove it 3370 */ 3371 static void pktgen_rem_one_if(struct pktgen_thread *t) 3372 { 3373 struct list_head *q, *n; 3374 struct pktgen_dev *cur; 3375 3376 func_enter(); 3377 3378 list_for_each_safe(q, n, &t->if_list) { 3379 cur = list_entry(q, struct pktgen_dev, list); 3380 3381 if (!cur->removal_mark) 3382 continue; 3383 3384 kfree_skb(cur->skb); 3385 cur->skb = NULL; 3386 3387 pktgen_remove_device(t, cur); 3388 3389 break; 3390 } 3391 } 3392 3393 static void pktgen_rem_all_ifs(struct pktgen_thread *t) 3394 { 3395 struct list_head *q, *n; 3396 struct pktgen_dev *cur; 3397 3398 func_enter(); 3399 3400 /* Remove all devices, free mem */ 3401 3402 list_for_each_safe(q, n, &t->if_list) { 3403 cur = list_entry(q, struct pktgen_dev, list); 3404 3405 kfree_skb(cur->skb); 3406 cur->skb = NULL; 3407 3408 pktgen_remove_device(t, cur); 3409 } 3410 } 3411 3412 static void pktgen_rem_thread(struct pktgen_thread *t) 3413 { 3414 /* Remove from the thread list */ 3415 remove_proc_entry(t->tsk->comm, t->net->proc_dir); 3416 } 3417 3418 static void pktgen_resched(struct pktgen_dev *pkt_dev) 3419 { 3420 ktime_t idle_start = ktime_get(); 3421 schedule(); 3422 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start)); 3423 } 3424 3425 static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev) 3426 { 3427 ktime_t idle_start = ktime_get(); 3428 3429 while (refcount_read(&(pkt_dev->skb->users)) != 1) { 3430 if (signal_pending(current)) 3431 break; 3432 3433 if (need_resched()) 3434 pktgen_resched(pkt_dev); 3435 else 3436 cpu_relax(); 3437 } 3438 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start)); 3439 } 3440 3441 static void pktgen_xmit(struct pktgen_dev *pkt_dev) 3442 { 3443 unsigned int burst = READ_ONCE(pkt_dev->burst); 3444 struct net_device *odev = pkt_dev->odev; 3445 struct netdev_queue *txq; 3446 struct sk_buff *skb; 3447 int ret; 3448 3449 /* If device is offline, then don't send */ 3450 if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) { 3451 pktgen_stop_device(pkt_dev); 3452 return; 3453 } 3454 3455 /* This is max DELAY, this has special meaning of 3456 * "never transmit" 3457 */ 3458 if (unlikely(pkt_dev->delay == ULLONG_MAX)) { 3459 pkt_dev->next_tx = ktime_add_ns(ktime_get(), ULONG_MAX); 3460 return; 3461 } 3462 3463 /* If no skb or clone count exhausted then get new one */ 3464 if (!pkt_dev->skb || (pkt_dev->last_ok && 3465 ++pkt_dev->clone_count >= pkt_dev->clone_skb)) { 3466 /* build a new pkt */ 3467 kfree_skb(pkt_dev->skb); 3468 3469 pkt_dev->skb = fill_packet(odev, pkt_dev); 3470 if (pkt_dev->skb == NULL) { 3471 pr_err("ERROR: couldn't allocate skb in fill_packet\n"); 3472 schedule(); 3473 pkt_dev->clone_count--; /* back out increment, OOM */ 3474 return; 3475 } 3476 pkt_dev->last_pkt_size = pkt_dev->skb->len; 3477 pkt_dev->clone_count = 0; /* reset counter */ 3478 } 3479 3480 if (pkt_dev->delay && pkt_dev->last_ok) 3481 spin(pkt_dev, pkt_dev->next_tx); 3482 3483 if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) { 3484 skb = pkt_dev->skb; 3485 skb->protocol = eth_type_trans(skb, skb->dev); 3486 refcount_add(burst, &skb->users); 3487 local_bh_disable(); 3488 do { 3489 ret = netif_receive_skb(skb); 3490 if (ret == NET_RX_DROP) 3491 pkt_dev->errors++; 3492 pkt_dev->sofar++; 3493 pkt_dev->seq_num++; 3494 if (refcount_read(&skb->users) != burst) { 3495 /* skb was queued by rps/rfs or taps, 3496 * so cannot reuse this skb 3497 */ 3498 WARN_ON(refcount_sub_and_test(burst - 1, &skb->users)); 3499 /* get out of the loop and wait 3500 * until skb is consumed 3501 */ 3502 break; 3503 } 3504 /* skb was 'freed' by stack, so clean few 3505 * bits and reuse it 3506 */ 3507 skb_reset_redirect(skb); 3508 } while (--burst > 0); 3509 goto out; /* Skips xmit_mode M_START_XMIT */ 3510 } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) { 3511 local_bh_disable(); 3512 refcount_inc(&pkt_dev->skb->users); 3513 3514 ret = dev_queue_xmit(pkt_dev->skb); 3515 switch (ret) { 3516 case NET_XMIT_SUCCESS: 3517 pkt_dev->sofar++; 3518 pkt_dev->seq_num++; 3519 pkt_dev->tx_bytes += pkt_dev->last_pkt_size; 3520 break; 3521 case NET_XMIT_DROP: 3522 case NET_XMIT_CN: 3523 /* These are all valid return codes for a qdisc but 3524 * indicate packets are being dropped or will likely 3525 * be dropped soon. 3526 */ 3527 case NETDEV_TX_BUSY: 3528 /* qdisc may call dev_hard_start_xmit directly in cases 3529 * where no queues exist e.g. loopback device, virtual 3530 * devices, etc. In this case we need to handle 3531 * NETDEV_TX_ codes. 3532 */ 3533 default: 3534 pkt_dev->errors++; 3535 net_info_ratelimited("%s xmit error: %d\n", 3536 pkt_dev->odevname, ret); 3537 break; 3538 } 3539 goto out; 3540 } 3541 3542 txq = skb_get_tx_queue(odev, pkt_dev->skb); 3543 3544 local_bh_disable(); 3545 3546 HARD_TX_LOCK(odev, txq, smp_processor_id()); 3547 3548 if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) { 3549 pkt_dev->last_ok = 0; 3550 goto unlock; 3551 } 3552 refcount_add(burst, &pkt_dev->skb->users); 3553 3554 xmit_more: 3555 ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0); 3556 3557 switch (ret) { 3558 case NETDEV_TX_OK: 3559 pkt_dev->last_ok = 1; 3560 pkt_dev->sofar++; 3561 pkt_dev->seq_num++; 3562 pkt_dev->tx_bytes += pkt_dev->last_pkt_size; 3563 if (burst > 0 && !netif_xmit_frozen_or_drv_stopped(txq)) 3564 goto xmit_more; 3565 break; 3566 case NET_XMIT_DROP: 3567 case NET_XMIT_CN: 3568 /* skb has been consumed */ 3569 pkt_dev->errors++; 3570 break; 3571 default: /* Drivers are not supposed to return other values! */ 3572 net_info_ratelimited("%s xmit error: %d\n", 3573 pkt_dev->odevname, ret); 3574 pkt_dev->errors++; 3575 fallthrough; 3576 case NETDEV_TX_BUSY: 3577 /* Retry it next time */ 3578 refcount_dec(&(pkt_dev->skb->users)); 3579 pkt_dev->last_ok = 0; 3580 } 3581 if (unlikely(burst)) 3582 WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users)); 3583 unlock: 3584 HARD_TX_UNLOCK(odev, txq); 3585 3586 out: 3587 local_bh_enable(); 3588 3589 /* If pkt_dev->count is zero, then run forever */ 3590 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 3591 pktgen_wait_for_skb(pkt_dev); 3592 3593 /* Done with this */ 3594 pktgen_stop_device(pkt_dev); 3595 } 3596 } 3597 3598 /* 3599 * Main loop of the thread goes here 3600 */ 3601 3602 static int pktgen_thread_worker(void *arg) 3603 { 3604 struct pktgen_thread *t = arg; 3605 struct pktgen_dev *pkt_dev = NULL; 3606 int cpu = t->cpu; 3607 3608 WARN_ON(smp_processor_id() != cpu); 3609 3610 init_waitqueue_head(&t->queue); 3611 complete(&t->start_done); 3612 3613 pr_debug("starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); 3614 3615 set_freezable(); 3616 3617 while (!kthread_should_stop()) { 3618 pkt_dev = next_to_run(t); 3619 3620 if (unlikely(!pkt_dev && t->control == 0)) { 3621 if (t->net->pktgen_exiting) 3622 break; 3623 wait_event_interruptible_timeout(t->queue, 3624 t->control != 0, 3625 HZ/10); 3626 try_to_freeze(); 3627 continue; 3628 } 3629 3630 if (likely(pkt_dev)) { 3631 pktgen_xmit(pkt_dev); 3632 3633 if (need_resched()) 3634 pktgen_resched(pkt_dev); 3635 else 3636 cpu_relax(); 3637 } 3638 3639 if (t->control & T_STOP) { 3640 pktgen_stop(t); 3641 t->control &= ~(T_STOP); 3642 } 3643 3644 if (t->control & T_RUN) { 3645 pktgen_run(t); 3646 t->control &= ~(T_RUN); 3647 } 3648 3649 if (t->control & T_REMDEVALL) { 3650 pktgen_rem_all_ifs(t); 3651 t->control &= ~(T_REMDEVALL); 3652 } 3653 3654 if (t->control & T_REMDEV) { 3655 pktgen_rem_one_if(t); 3656 t->control &= ~(T_REMDEV); 3657 } 3658 3659 try_to_freeze(); 3660 } 3661 3662 pr_debug("%s stopping all device\n", t->tsk->comm); 3663 pktgen_stop(t); 3664 3665 pr_debug("%s removing all device\n", t->tsk->comm); 3666 pktgen_rem_all_ifs(t); 3667 3668 pr_debug("%s removing thread\n", t->tsk->comm); 3669 pktgen_rem_thread(t); 3670 3671 return 0; 3672 } 3673 3674 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 3675 const char *ifname, bool exact) 3676 { 3677 struct pktgen_dev *p, *pkt_dev = NULL; 3678 size_t len = strlen(ifname); 3679 3680 rcu_read_lock(); 3681 list_for_each_entry_rcu(p, &t->if_list, list) 3682 if (strncmp(p->odevname, ifname, len) == 0) { 3683 if (p->odevname[len]) { 3684 if (exact || p->odevname[len] != '@') 3685 continue; 3686 } 3687 pkt_dev = p; 3688 break; 3689 } 3690 3691 rcu_read_unlock(); 3692 pr_debug("find_dev(%s) returning %p\n", ifname, pkt_dev); 3693 return pkt_dev; 3694 } 3695 3696 /* 3697 * Adds a dev at front of if_list. 3698 */ 3699 3700 static int add_dev_to_thread(struct pktgen_thread *t, 3701 struct pktgen_dev *pkt_dev) 3702 { 3703 int rv = 0; 3704 3705 /* This function cannot be called concurrently, as its called 3706 * under pktgen_thread_lock mutex, but it can run from 3707 * userspace on another CPU than the kthread. The if_lock() 3708 * is used here to sync with concurrent instances of 3709 * _rem_dev_from_if_list() invoked via kthread, which is also 3710 * updating the if_list */ 3711 if_lock(t); 3712 3713 if (pkt_dev->pg_thread) { 3714 pr_err("ERROR: already assigned to a thread\n"); 3715 rv = -EBUSY; 3716 goto out; 3717 } 3718 3719 pkt_dev->running = 0; 3720 pkt_dev->pg_thread = t; 3721 list_add_rcu(&pkt_dev->list, &t->if_list); 3722 3723 out: 3724 if_unlock(t); 3725 return rv; 3726 } 3727 3728 /* Called under thread lock */ 3729 3730 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) 3731 { 3732 struct pktgen_dev *pkt_dev; 3733 int err; 3734 int node = cpu_to_node(t->cpu); 3735 3736 /* We don't allow a device to be on several threads */ 3737 3738 pkt_dev = __pktgen_NN_threads(t->net, ifname, FIND); 3739 if (pkt_dev) { 3740 pr_err("ERROR: interface already used\n"); 3741 return -EBUSY; 3742 } 3743 3744 pkt_dev = kzalloc_node(sizeof(struct pktgen_dev), GFP_KERNEL, node); 3745 if (!pkt_dev) 3746 return -ENOMEM; 3747 3748 strcpy(pkt_dev->odevname, ifname); 3749 pkt_dev->flows = vzalloc_node(array_size(MAX_CFLOWS, 3750 sizeof(struct flow_state)), 3751 node); 3752 if (pkt_dev->flows == NULL) { 3753 kfree(pkt_dev); 3754 return -ENOMEM; 3755 } 3756 3757 pkt_dev->removal_mark = 0; 3758 pkt_dev->nfrags = 0; 3759 pkt_dev->delay = pg_delay_d; 3760 pkt_dev->count = pg_count_d; 3761 pkt_dev->sofar = 0; 3762 pkt_dev->udp_src_min = 9; /* sink port */ 3763 pkt_dev->udp_src_max = 9; 3764 pkt_dev->udp_dst_min = 9; 3765 pkt_dev->udp_dst_max = 9; 3766 pkt_dev->vlan_p = 0; 3767 pkt_dev->vlan_cfi = 0; 3768 pkt_dev->vlan_id = 0xffff; 3769 pkt_dev->svlan_p = 0; 3770 pkt_dev->svlan_cfi = 0; 3771 pkt_dev->svlan_id = 0xffff; 3772 pkt_dev->burst = 1; 3773 pkt_dev->node = NUMA_NO_NODE; 3774 3775 err = pktgen_setup_dev(t->net, pkt_dev, ifname); 3776 if (err) 3777 goto out1; 3778 if (pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING) 3779 pkt_dev->clone_skb = pg_clone_skb_d; 3780 3781 pkt_dev->entry = proc_create_data(ifname, 0600, t->net->proc_dir, 3782 &pktgen_if_proc_ops, pkt_dev); 3783 if (!pkt_dev->entry) { 3784 pr_err("cannot create %s/%s procfs entry\n", 3785 PG_PROC_DIR, ifname); 3786 err = -EINVAL; 3787 goto out2; 3788 } 3789 #ifdef CONFIG_XFRM 3790 pkt_dev->ipsmode = XFRM_MODE_TRANSPORT; 3791 pkt_dev->ipsproto = IPPROTO_ESP; 3792 3793 /* xfrm tunnel mode needs additional dst to extract outter 3794 * ip header protocol/ttl/id field, here creat a phony one. 3795 * instead of looking for a valid rt, which definitely hurting 3796 * performance under such circumstance. 3797 */ 3798 pkt_dev->dstops.family = AF_INET; 3799 pkt_dev->xdst.u.dst.dev = pkt_dev->odev; 3800 dst_init_metrics(&pkt_dev->xdst.u.dst, pktgen_dst_metrics, false); 3801 pkt_dev->xdst.child = &pkt_dev->xdst.u.dst; 3802 pkt_dev->xdst.u.dst.ops = &pkt_dev->dstops; 3803 #endif 3804 3805 return add_dev_to_thread(t, pkt_dev); 3806 out2: 3807 netdev_put(pkt_dev->odev, &pkt_dev->dev_tracker); 3808 out1: 3809 #ifdef CONFIG_XFRM 3810 free_SAs(pkt_dev); 3811 #endif 3812 vfree(pkt_dev->flows); 3813 kfree(pkt_dev); 3814 return err; 3815 } 3816 3817 static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn) 3818 { 3819 struct pktgen_thread *t; 3820 struct proc_dir_entry *pe; 3821 struct task_struct *p; 3822 3823 t = kzalloc_node(sizeof(struct pktgen_thread), GFP_KERNEL, 3824 cpu_to_node(cpu)); 3825 if (!t) { 3826 pr_err("ERROR: out of memory, can't create new thread\n"); 3827 return -ENOMEM; 3828 } 3829 3830 mutex_init(&t->if_lock); 3831 t->cpu = cpu; 3832 3833 INIT_LIST_HEAD(&t->if_list); 3834 3835 list_add_tail(&t->th_list, &pn->pktgen_threads); 3836 init_completion(&t->start_done); 3837 3838 p = kthread_create_on_node(pktgen_thread_worker, 3839 t, 3840 cpu_to_node(cpu), 3841 "kpktgend_%d", cpu); 3842 if (IS_ERR(p)) { 3843 pr_err("kthread_create_on_node() failed for cpu %d\n", t->cpu); 3844 list_del(&t->th_list); 3845 kfree(t); 3846 return PTR_ERR(p); 3847 } 3848 kthread_bind(p, cpu); 3849 t->tsk = p; 3850 3851 pe = proc_create_data(t->tsk->comm, 0600, pn->proc_dir, 3852 &pktgen_thread_proc_ops, t); 3853 if (!pe) { 3854 pr_err("cannot create %s/%s procfs entry\n", 3855 PG_PROC_DIR, t->tsk->comm); 3856 kthread_stop(p); 3857 list_del(&t->th_list); 3858 kfree(t); 3859 return -EINVAL; 3860 } 3861 3862 t->net = pn; 3863 get_task_struct(p); 3864 wake_up_process(p); 3865 wait_for_completion(&t->start_done); 3866 3867 return 0; 3868 } 3869 3870 /* 3871 * Removes a device from the thread if_list. 3872 */ 3873 static void _rem_dev_from_if_list(struct pktgen_thread *t, 3874 struct pktgen_dev *pkt_dev) 3875 { 3876 struct list_head *q, *n; 3877 struct pktgen_dev *p; 3878 3879 if_lock(t); 3880 list_for_each_safe(q, n, &t->if_list) { 3881 p = list_entry(q, struct pktgen_dev, list); 3882 if (p == pkt_dev) 3883 list_del_rcu(&p->list); 3884 } 3885 if_unlock(t); 3886 } 3887 3888 static int pktgen_remove_device(struct pktgen_thread *t, 3889 struct pktgen_dev *pkt_dev) 3890 { 3891 pr_debug("remove_device pkt_dev=%p\n", pkt_dev); 3892 3893 if (pkt_dev->running) { 3894 pr_warn("WARNING: trying to remove a running interface, stopping it now\n"); 3895 pktgen_stop_device(pkt_dev); 3896 } 3897 3898 /* Dis-associate from the interface */ 3899 3900 if (pkt_dev->odev) { 3901 netdev_put(pkt_dev->odev, &pkt_dev->dev_tracker); 3902 pkt_dev->odev = NULL; 3903 } 3904 3905 /* Remove proc before if_list entry, because add_device uses 3906 * list to determine if interface already exist, avoid race 3907 * with proc_create_data() */ 3908 proc_remove(pkt_dev->entry); 3909 3910 /* And update the thread if_list */ 3911 _rem_dev_from_if_list(t, pkt_dev); 3912 3913 #ifdef CONFIG_XFRM 3914 free_SAs(pkt_dev); 3915 #endif 3916 vfree(pkt_dev->flows); 3917 if (pkt_dev->page) 3918 put_page(pkt_dev->page); 3919 kfree_rcu(pkt_dev, rcu); 3920 return 0; 3921 } 3922 3923 static int __net_init pg_net_init(struct net *net) 3924 { 3925 struct pktgen_net *pn = net_generic(net, pg_net_id); 3926 struct proc_dir_entry *pe; 3927 int cpu, ret = 0; 3928 3929 pn->net = net; 3930 INIT_LIST_HEAD(&pn->pktgen_threads); 3931 pn->pktgen_exiting = false; 3932 pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net); 3933 if (!pn->proc_dir) { 3934 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR); 3935 return -ENODEV; 3936 } 3937 pe = proc_create(PGCTRL, 0600, pn->proc_dir, &pktgen_proc_ops); 3938 if (pe == NULL) { 3939 pr_err("cannot create %s procfs entry\n", PGCTRL); 3940 ret = -EINVAL; 3941 goto remove; 3942 } 3943 3944 for_each_online_cpu(cpu) { 3945 int err; 3946 3947 err = pktgen_create_thread(cpu, pn); 3948 if (err) 3949 pr_warn("Cannot create thread for cpu %d (%d)\n", 3950 cpu, err); 3951 } 3952 3953 if (list_empty(&pn->pktgen_threads)) { 3954 pr_err("Initialization failed for all threads\n"); 3955 ret = -ENODEV; 3956 goto remove_entry; 3957 } 3958 3959 return 0; 3960 3961 remove_entry: 3962 remove_proc_entry(PGCTRL, pn->proc_dir); 3963 remove: 3964 remove_proc_entry(PG_PROC_DIR, pn->net->proc_net); 3965 return ret; 3966 } 3967 3968 static void __net_exit pg_net_exit(struct net *net) 3969 { 3970 struct pktgen_net *pn = net_generic(net, pg_net_id); 3971 struct pktgen_thread *t; 3972 struct list_head *q, *n; 3973 LIST_HEAD(list); 3974 3975 /* Stop all interfaces & threads */ 3976 pn->pktgen_exiting = true; 3977 3978 mutex_lock(&pktgen_thread_lock); 3979 list_splice_init(&pn->pktgen_threads, &list); 3980 mutex_unlock(&pktgen_thread_lock); 3981 3982 list_for_each_safe(q, n, &list) { 3983 t = list_entry(q, struct pktgen_thread, th_list); 3984 list_del(&t->th_list); 3985 kthread_stop(t->tsk); 3986 put_task_struct(t->tsk); 3987 kfree(t); 3988 } 3989 3990 remove_proc_entry(PGCTRL, pn->proc_dir); 3991 remove_proc_entry(PG_PROC_DIR, pn->net->proc_net); 3992 } 3993 3994 static struct pernet_operations pg_net_ops = { 3995 .init = pg_net_init, 3996 .exit = pg_net_exit, 3997 .id = &pg_net_id, 3998 .size = sizeof(struct pktgen_net), 3999 }; 4000 4001 static int __init pg_init(void) 4002 { 4003 int ret = 0; 4004 4005 pr_info("%s", version); 4006 ret = register_pernet_subsys(&pg_net_ops); 4007 if (ret) 4008 return ret; 4009 ret = register_netdevice_notifier(&pktgen_notifier_block); 4010 if (ret) 4011 unregister_pernet_subsys(&pg_net_ops); 4012 4013 return ret; 4014 } 4015 4016 static void __exit pg_cleanup(void) 4017 { 4018 unregister_netdevice_notifier(&pktgen_notifier_block); 4019 unregister_pernet_subsys(&pg_net_ops); 4020 /* Don't need rcu_barrier() due to use of kfree_rcu() */ 4021 } 4022 4023 module_init(pg_init); 4024 module_exit(pg_cleanup); 4025 4026 MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se>"); 4027 MODULE_DESCRIPTION("Packet Generator tool"); 4028 MODULE_LICENSE("GPL"); 4029 MODULE_VERSION(VERSION); 4030 module_param(pg_count_d, int, 0); 4031 MODULE_PARM_DESC(pg_count_d, "Default number of packets to inject"); 4032 module_param(pg_delay_d, int, 0); 4033 MODULE_PARM_DESC(pg_delay_d, "Default delay between packets (nanoseconds)"); 4034 module_param(pg_clone_skb_d, int, 0); 4035 MODULE_PARM_DESC(pg_clone_skb_d, "Default number of copies of the same packet"); 4036 module_param(debug, int, 0); 4037 MODULE_PARM_DESC(debug, "Enable debugging of pktgen module"); 4038