1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Authors: 4 * Copyright 2001, 2002 by Robert Olsson <robert.olsson@its.uu.se> 5 * Uppsala University and 6 * Swedish University of Agricultural Sciences 7 * 8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 9 * Ben Greear <greearb@candelatech.com> 10 * Jens Låås <jens.laas@data.slu.se> 11 * 12 * A tool for loading the network with preconfigurated packets. 13 * The tool is implemented as a linux module. Parameters are output 14 * device, delay (to hard_xmit), number of packets, and whether 15 * to use multiple SKBs or just the same one. 16 * pktgen uses the installed interface's output routine. 17 * 18 * Additional hacking by: 19 * 20 * Jens.Laas@data.slu.se 21 * Improved by ANK. 010120. 22 * Improved by ANK even more. 010212. 23 * MAC address typo fixed. 010417 --ro 24 * Integrated. 020301 --DaveM 25 * Added multiskb option 020301 --DaveM 26 * Scaling of results. 020417--sigurdur@linpro.no 27 * Significant re-work of the module: 28 * * Convert to threaded model to more efficiently be able to transmit 29 * and receive on multiple interfaces at once. 30 * * Converted many counters to __u64 to allow longer runs. 31 * * Allow configuration of ranges, like min/max IP address, MACs, 32 * and UDP-ports, for both source and destination, and can 33 * set to use a random distribution or sequentially walk the range. 34 * * Can now change most values after starting. 35 * * Place 12-byte packet in UDP payload with magic number, 36 * sequence number, and timestamp. 37 * * Add receiver code that detects dropped pkts, re-ordered pkts, and 38 * latencies (with micro-second) precision. 39 * * Add IOCTL interface to easily get counters & configuration. 40 * --Ben Greear <greearb@candelatech.com> 41 * 42 * Renamed multiskb to clone_skb and cleaned up sending core for two distinct 43 * skb modes. A clone_skb=0 mode for Ben "ranges" work and a clone_skb != 0 44 * as a "fastpath" with a configurable number of clones after alloc's. 45 * clone_skb=0 means all packets are allocated this also means ranges time 46 * stamps etc can be used. clone_skb=100 means 1 malloc is followed by 100 47 * clones. 48 * 49 * Also moved to /proc/net/pktgen/ 50 * --ro 51 * 52 * Sept 10: Fixed threading/locking. Lots of bone-headed and more clever 53 * mistakes. Also merged in DaveM's patch in the -pre6 patch. 54 * --Ben Greear <greearb@candelatech.com> 55 * 56 * Integrated to 2.5.x 021029 --Lucio Maciel (luciomaciel@zipmail.com.br) 57 * 58 * 021124 Finished major redesign and rewrite for new functionality. 59 * See Documentation/networking/pktgen.rst for how to use this. 60 * 61 * The new operation: 62 * For each CPU one thread/process is created at start. This process checks 63 * for running devices in the if_list and sends packets until count is 0 it 64 * also the thread checks the thread->control which is used for inter-process 65 * communication. controlling process "posts" operations to the threads this 66 * way. 67 * The if_list is RCU protected, and the if_lock remains to protect updating 68 * of if_list, from "add_device" as it invoked from userspace (via proc write). 69 * 70 * By design there should only be *one* "controlling" process. In practice 71 * multiple write accesses gives unpredictable result. Understood by "write" 72 * to /proc gives result code that should be read be the "writer". 73 * For practical use this should be no problem. 74 * 75 * Note when adding devices to a specific CPU there good idea to also assign 76 * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU. 77 * --ro 78 * 79 * Fix refcount off by one if first packet fails, potential null deref, 80 * memleak 030710- KJP 81 * 82 * First "ranges" functionality for ipv6 030726 --ro 83 * 84 * Included flow support. 030802 ANK. 85 * 86 * Fixed unaligned access on IA-64 Grant Grundler <grundler@parisc-linux.org> 87 * 88 * Remove if fix from added Harald Welte <laforge@netfilter.org> 040419 89 * ia64 compilation fix from Aron Griffis <aron@hp.com> 040604 90 * 91 * New xmit() return, do_div and misc clean up by Stephen Hemminger 92 * <shemminger@osdl.org> 040923 93 * 94 * Randy Dunlap fixed u64 printk compiler warning 95 * 96 * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org> 97 * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213 98 * 99 * Corrections from Nikolai Malykh (nmalykh@bilim.com) 100 * Removed unused flags F_SET_SRCMAC & F_SET_SRCIP 041230 101 * 102 * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan <nacc@us.ibm.com> 103 * 050103 104 * 105 * MPLS support by Steven Whitehouse <steve@chygwyn.com> 106 * 107 * 802.1Q/Q-in-Q support by Francesco Fondelli (FF) <francesco.fondelli@gmail.com> 108 * 109 * Fixed src_mac command to set source mac of packet to value specified in 110 * command by Adit Ranadive <adit.262@gmail.com> 111 */ 112 113 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 114 115 #include <linux/sys.h> 116 #include <linux/types.h> 117 #include <linux/module.h> 118 #include <linux/moduleparam.h> 119 #include <linux/kernel.h> 120 #include <linux/mutex.h> 121 #include <linux/sched.h> 122 #include <linux/slab.h> 123 #include <linux/vmalloc.h> 124 #include <linux/unistd.h> 125 #include <linux/string.h> 126 #include <linux/ptrace.h> 127 #include <linux/errno.h> 128 #include <linux/ioport.h> 129 #include <linux/interrupt.h> 130 #include <linux/capability.h> 131 #include <linux/hrtimer.h> 132 #include <linux/freezer.h> 133 #include <linux/delay.h> 134 #include <linux/timer.h> 135 #include <linux/list.h> 136 #include <linux/init.h> 137 #include <linux/skbuff.h> 138 #include <linux/netdevice.h> 139 #include <linux/inet.h> 140 #include <linux/inetdevice.h> 141 #include <linux/rtnetlink.h> 142 #include <linux/if_arp.h> 143 #include <linux/if_vlan.h> 144 #include <linux/in.h> 145 #include <linux/ip.h> 146 #include <linux/ipv6.h> 147 #include <linux/udp.h> 148 #include <linux/proc_fs.h> 149 #include <linux/seq_file.h> 150 #include <linux/wait.h> 151 #include <linux/etherdevice.h> 152 #include <linux/kthread.h> 153 #include <linux/prefetch.h> 154 #include <linux/mmzone.h> 155 #include <net/net_namespace.h> 156 #include <net/checksum.h> 157 #include <net/ipv6.h> 158 #include <net/udp.h> 159 #include <net/ip6_checksum.h> 160 #include <net/addrconf.h> 161 #include <net/xfrm.h> 162 #include <net/netns/generic.h> 163 #include <asm/byteorder.h> 164 #include <linux/rcupdate.h> 165 #include <linux/bitops.h> 166 #include <linux/io.h> 167 #include <linux/timex.h> 168 #include <linux/uaccess.h> 169 #include <asm/dma.h> 170 #include <asm/div64.h> /* do_div */ 171 172 #define VERSION "2.75" 173 #define IP_NAME_SZ 32 174 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ 175 #define MPLS_STACK_BOTTOM htonl(0x00000100) 176 /* Max number of internet mix entries that can be specified in imix_weights. */ 177 #define MAX_IMIX_ENTRIES 20 178 #define IMIX_PRECISION 100 /* Precision of IMIX distribution */ 179 180 #define func_enter() pr_debug("entering %s\n", __func__) 181 182 #define PKT_FLAGS \ 183 pf(IPV6) /* Interface in IPV6 Mode */ \ 184 pf(IPSRC_RND) /* IP-Src Random */ \ 185 pf(IPDST_RND) /* IP-Dst Random */ \ 186 pf(TXSIZE_RND) /* Transmit size is random */ \ 187 pf(UDPSRC_RND) /* UDP-Src Random */ \ 188 pf(UDPDST_RND) /* UDP-Dst Random */ \ 189 pf(UDPCSUM) /* Include UDP checksum */ \ 190 pf(NO_TIMESTAMP) /* Don't timestamp packets (default TS) */ \ 191 pf(MPLS_RND) /* Random MPLS labels */ \ 192 pf(QUEUE_MAP_RND) /* queue map Random */ \ 193 pf(QUEUE_MAP_CPU) /* queue map mirrors smp_processor_id() */ \ 194 pf(FLOW_SEQ) /* Sequential flows */ \ 195 pf(IPSEC) /* ipsec on for flows */ \ 196 pf(MACSRC_RND) /* MAC-Src Random */ \ 197 pf(MACDST_RND) /* MAC-Dst Random */ \ 198 pf(VID_RND) /* Random VLAN ID */ \ 199 pf(SVID_RND) /* Random SVLAN ID */ \ 200 pf(NODE) /* Node memory alloc*/ \ 201 pf(SHARED) /* Shared SKB */ \ 202 203 #define pf(flag) flag##_SHIFT, 204 enum pkt_flags { 205 PKT_FLAGS 206 }; 207 #undef pf 208 209 /* Device flag bits */ 210 #define pf(flag) static const __u32 F_##flag = (1<<flag##_SHIFT); 211 PKT_FLAGS 212 #undef pf 213 214 #define pf(flag) __stringify(flag), 215 static char *pkt_flag_names[] = { 216 PKT_FLAGS 217 }; 218 #undef pf 219 220 #define NR_PKT_FLAGS ARRAY_SIZE(pkt_flag_names) 221 222 /* Thread control flag bits */ 223 #define T_STOP (1<<0) /* Stop run */ 224 #define T_RUN (1<<1) /* Start run */ 225 #define T_REMDEVALL (1<<2) /* Remove all devs */ 226 #define T_REMDEV (1<<3) /* Remove one dev */ 227 228 /* Xmit modes */ 229 #define M_START_XMIT 0 /* Default normal TX */ 230 #define M_NETIF_RECEIVE 1 /* Inject packets into stack */ 231 #define M_QUEUE_XMIT 2 /* Inject packet into qdisc */ 232 233 /* If lock -- protects updating of if_list */ 234 #define if_lock(t) mutex_lock(&(t->if_lock)) 235 #define if_unlock(t) mutex_unlock(&(t->if_lock)) 236 237 /* Used to help with determining the pkts on receive */ 238 #define PKTGEN_MAGIC 0xbe9be955 239 #define PG_PROC_DIR "pktgen" 240 #define PGCTRL "pgctrl" 241 242 #define MAX_CFLOWS 65536 243 244 #define VLAN_TAG_SIZE(x) ((x)->vlan_id == 0xffff ? 0 : 4) 245 #define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4) 246 247 struct imix_pkt { 248 u64 size; 249 u64 weight; 250 u64 count_so_far; 251 }; 252 253 struct flow_state { 254 __be32 cur_daddr; 255 int count; 256 #ifdef CONFIG_XFRM 257 struct xfrm_state *x; 258 #endif 259 __u32 flags; 260 }; 261 262 /* flow flag bits */ 263 #define F_INIT (1<<0) /* flow has been initialized */ 264 265 struct pktgen_dev { 266 /* 267 * Try to keep frequent/infrequent used vars. separated. 268 */ 269 struct proc_dir_entry *entry; /* proc file */ 270 struct pktgen_thread *pg_thread;/* the owner */ 271 struct list_head list; /* chaining in the thread's run-queue */ 272 struct rcu_head rcu; /* freed by RCU */ 273 274 int running; /* if false, the test will stop */ 275 276 /* If min != max, then we will either do a linear iteration, or 277 * we will do a random selection from within the range. 278 */ 279 __u32 flags; 280 int xmit_mode; 281 int min_pkt_size; 282 int max_pkt_size; 283 int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */ 284 int nfrags; 285 int removal_mark; /* non-zero => the device is marked for 286 * removal by worker thread 287 */ 288 289 struct page *page; 290 u64 delay; /* nano-seconds */ 291 292 __u64 count; /* Default No packets to send */ 293 __u64 sofar; /* How many pkts we've sent so far */ 294 __u64 tx_bytes; /* How many bytes we've transmitted */ 295 __u64 errors; /* Errors when trying to transmit, */ 296 297 /* runtime counters relating to clone_skb */ 298 299 __u32 clone_count; 300 int last_ok; /* Was last skb sent? 301 * Or a failed transmit of some sort? 302 * This will keep sequence numbers in order 303 */ 304 ktime_t next_tx; 305 ktime_t started_at; 306 ktime_t stopped_at; 307 u64 idle_acc; /* nano-seconds */ 308 309 __u32 seq_num; 310 311 int clone_skb; /* 312 * Use multiple SKBs during packet gen. 313 * If this number is greater than 1, then 314 * that many copies of the same packet will be 315 * sent before a new packet is allocated. 316 * If you want to send 1024 identical packets 317 * before creating a new packet, 318 * set clone_skb to 1024. 319 */ 320 321 char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 322 char dst_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 323 char src_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 324 char src_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 325 326 struct in6_addr in6_saddr; 327 struct in6_addr in6_daddr; 328 struct in6_addr cur_in6_daddr; 329 struct in6_addr cur_in6_saddr; 330 /* For ranges */ 331 struct in6_addr min_in6_daddr; 332 struct in6_addr max_in6_daddr; 333 struct in6_addr min_in6_saddr; 334 struct in6_addr max_in6_saddr; 335 336 /* If we're doing ranges, random or incremental, then this 337 * defines the min/max for those ranges. 338 */ 339 __be32 saddr_min; /* inclusive, source IP address */ 340 __be32 saddr_max; /* exclusive, source IP address */ 341 __be32 daddr_min; /* inclusive, dest IP address */ 342 __be32 daddr_max; /* exclusive, dest IP address */ 343 344 __u16 udp_src_min; /* inclusive, source UDP port */ 345 __u16 udp_src_max; /* exclusive, source UDP port */ 346 __u16 udp_dst_min; /* inclusive, dest UDP port */ 347 __u16 udp_dst_max; /* exclusive, dest UDP port */ 348 349 /* DSCP + ECN */ 350 __u8 tos; /* six MSB of (former) IPv4 TOS 351 * are for dscp codepoint 352 */ 353 __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6 354 * (see RFC 3260, sec. 4) 355 */ 356 357 /* IMIX */ 358 unsigned int n_imix_entries; 359 struct imix_pkt imix_entries[MAX_IMIX_ENTRIES]; 360 /* Maps 0-IMIX_PRECISION range to imix_entry based on probability*/ 361 __u8 imix_distribution[IMIX_PRECISION]; 362 363 /* MPLS */ 364 unsigned int nr_labels; /* Depth of stack, 0 = no MPLS */ 365 __be32 labels[MAX_MPLS_LABELS]; 366 367 /* VLAN/SVLAN (802.1Q/Q-in-Q) */ 368 __u8 vlan_p; 369 __u8 vlan_cfi; 370 __u16 vlan_id; /* 0xffff means no vlan tag */ 371 372 __u8 svlan_p; 373 __u8 svlan_cfi; 374 __u16 svlan_id; /* 0xffff means no svlan tag */ 375 376 __u32 src_mac_count; /* How many MACs to iterate through */ 377 __u32 dst_mac_count; /* How many MACs to iterate through */ 378 379 unsigned char dst_mac[ETH_ALEN]; 380 unsigned char src_mac[ETH_ALEN]; 381 382 __u32 cur_dst_mac_offset; 383 __u32 cur_src_mac_offset; 384 __be32 cur_saddr; 385 __be32 cur_daddr; 386 __u16 ip_id; 387 __u16 cur_udp_dst; 388 __u16 cur_udp_src; 389 __u16 cur_queue_map; 390 __u32 cur_pkt_size; 391 __u32 last_pkt_size; 392 393 __u8 hh[14]; 394 /* = { 395 * 0x00, 0x80, 0xC8, 0x79, 0xB3, 0xCB, 396 * 397 * We fill in SRC address later 398 * 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 399 * 0x08, 0x00 400 * }; 401 */ 402 __u16 pad; /* pad out the hh struct to an even 16 bytes */ 403 404 struct sk_buff *skb; /* skb we are to transmit next, used for when we 405 * are transmitting the same one multiple times 406 */ 407 struct net_device *odev; /* The out-going device. 408 * Note that the device should have it's 409 * pg_info pointer pointing back to this 410 * device. 411 * Set when the user specifies the out-going 412 * device name (not when the inject is 413 * started as it used to do.) 414 */ 415 netdevice_tracker dev_tracker; 416 char odevname[32]; 417 struct flow_state *flows; 418 unsigned int cflows; /* Concurrent flows (config) */ 419 unsigned int lflow; /* Flow length (config) */ 420 unsigned int nflows; /* accumulated flows (stats) */ 421 unsigned int curfl; /* current sequenced flow (state)*/ 422 423 u16 queue_map_min; 424 u16 queue_map_max; 425 __u32 skb_priority; /* skb priority field */ 426 unsigned int burst; /* number of duplicated packets to burst */ 427 int node; /* Memory node */ 428 429 #ifdef CONFIG_XFRM 430 __u8 ipsmode; /* IPSEC mode (config) */ 431 __u8 ipsproto; /* IPSEC type (config) */ 432 __u32 spi; 433 struct xfrm_dst xdst; 434 struct dst_ops dstops; 435 #endif 436 char result[512]; 437 }; 438 439 struct pktgen_hdr { 440 __be32 pgh_magic; 441 __be32 seq_num; 442 __be32 tv_sec; 443 __be32 tv_usec; 444 }; 445 446 447 static unsigned int pg_net_id __read_mostly; 448 449 struct pktgen_net { 450 struct net *net; 451 struct proc_dir_entry *proc_dir; 452 struct list_head pktgen_threads; 453 bool pktgen_exiting; 454 }; 455 456 struct pktgen_thread { 457 struct mutex if_lock; /* for list of devices */ 458 struct list_head if_list; /* All device here */ 459 struct list_head th_list; 460 struct task_struct *tsk; 461 char result[512]; 462 463 /* Field for thread to receive "posted" events terminate, 464 * stop ifs etc. 465 */ 466 467 u32 control; 468 int cpu; 469 470 wait_queue_head_t queue; 471 struct completion start_done; 472 struct pktgen_net *net; 473 }; 474 475 #define REMOVE 1 476 #define FIND 0 477 478 static const char version[] = 479 "Packet Generator for packet performance testing. Version: " VERSION "\n"; 480 481 static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); 482 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); 483 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 484 const char *ifname, bool exact); 485 static int pktgen_device_event(struct notifier_block *, unsigned long, void *); 486 static void pktgen_run_all_threads(struct pktgen_net *pn); 487 static void pktgen_reset_all_threads(struct pktgen_net *pn); 488 static void pktgen_stop_all_threads(struct pktgen_net *pn); 489 490 static void pktgen_stop(struct pktgen_thread *t); 491 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); 492 static void fill_imix_distribution(struct pktgen_dev *pkt_dev); 493 494 /* Module parameters, defaults. */ 495 static int pg_count_d __read_mostly = 1000; 496 static int pg_delay_d __read_mostly; 497 static int pg_clone_skb_d __read_mostly; 498 static int debug __read_mostly; 499 500 static DEFINE_MUTEX(pktgen_thread_lock); 501 502 static struct notifier_block pktgen_notifier_block = { 503 .notifier_call = pktgen_device_event, 504 }; 505 506 /* 507 * /proc handling functions 508 * 509 */ 510 511 static int pgctrl_show(struct seq_file *seq, void *v) 512 { 513 seq_puts(seq, version); 514 return 0; 515 } 516 517 static ssize_t pgctrl_write(struct file *file, const char __user *buf, 518 size_t count, loff_t *ppos) 519 { 520 char data[128]; 521 size_t max; 522 struct pktgen_net *pn = net_generic(current->nsproxy->net_ns, pg_net_id); 523 524 if (!capable(CAP_NET_ADMIN)) 525 return -EPERM; 526 527 if (count < 1) 528 return -EINVAL; 529 530 max = min(count, sizeof(data) - 1); 531 if (copy_from_user(data, buf, max)) 532 return -EFAULT; 533 534 if (data[max - 1] == '\n') 535 data[max - 1] = 0; /* strip trailing '\n', terminate string */ 536 else 537 data[max] = 0; /* terminate string */ 538 539 if (!strcmp(data, "stop")) 540 pktgen_stop_all_threads(pn); 541 else if (!strcmp(data, "start")) 542 pktgen_run_all_threads(pn); 543 else if (!strcmp(data, "reset")) 544 pktgen_reset_all_threads(pn); 545 else 546 return -EINVAL; 547 548 return count; 549 } 550 551 static int pgctrl_open(struct inode *inode, struct file *file) 552 { 553 return single_open(file, pgctrl_show, pde_data(inode)); 554 } 555 556 static const struct proc_ops pktgen_proc_ops = { 557 .proc_open = pgctrl_open, 558 .proc_read = seq_read, 559 .proc_lseek = seq_lseek, 560 .proc_write = pgctrl_write, 561 .proc_release = single_release, 562 }; 563 564 static int pktgen_if_show(struct seq_file *seq, void *v) 565 { 566 const struct pktgen_dev *pkt_dev = seq->private; 567 ktime_t stopped; 568 unsigned int i; 569 u64 idle; 570 571 seq_printf(seq, 572 "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", 573 (unsigned long long)pkt_dev->count, pkt_dev->min_pkt_size, 574 pkt_dev->max_pkt_size); 575 576 if (pkt_dev->n_imix_entries > 0) { 577 seq_puts(seq, " imix_weights: "); 578 for (i = 0; i < pkt_dev->n_imix_entries; i++) { 579 seq_printf(seq, "%llu,%llu ", 580 pkt_dev->imix_entries[i].size, 581 pkt_dev->imix_entries[i].weight); 582 } 583 seq_puts(seq, "\n"); 584 } 585 586 seq_printf(seq, 587 " frags: %d delay: %llu clone_skb: %d ifname: %s\n", 588 pkt_dev->nfrags, (unsigned long long) pkt_dev->delay, 589 pkt_dev->clone_skb, pkt_dev->odevname); 590 591 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, 592 pkt_dev->lflow); 593 594 seq_printf(seq, 595 " queue_map_min: %u queue_map_max: %u\n", 596 pkt_dev->queue_map_min, 597 pkt_dev->queue_map_max); 598 599 if (pkt_dev->skb_priority) 600 seq_printf(seq, " skb_priority: %u\n", 601 pkt_dev->skb_priority); 602 603 if (pkt_dev->flags & F_IPV6) { 604 seq_printf(seq, 605 " saddr: %pI6c min_saddr: %pI6c max_saddr: %pI6c\n" 606 " daddr: %pI6c min_daddr: %pI6c max_daddr: %pI6c\n", 607 &pkt_dev->in6_saddr, 608 &pkt_dev->min_in6_saddr, &pkt_dev->max_in6_saddr, 609 &pkt_dev->in6_daddr, 610 &pkt_dev->min_in6_daddr, &pkt_dev->max_in6_daddr); 611 } else { 612 seq_printf(seq, 613 " dst_min: %s dst_max: %s\n", 614 pkt_dev->dst_min, pkt_dev->dst_max); 615 seq_printf(seq, 616 " src_min: %s src_max: %s\n", 617 pkt_dev->src_min, pkt_dev->src_max); 618 } 619 620 seq_puts(seq, " src_mac: "); 621 622 seq_printf(seq, "%pM ", 623 is_zero_ether_addr(pkt_dev->src_mac) ? 624 pkt_dev->odev->dev_addr : pkt_dev->src_mac); 625 626 seq_puts(seq, "dst_mac: "); 627 seq_printf(seq, "%pM\n", pkt_dev->dst_mac); 628 629 seq_printf(seq, 630 " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", 631 pkt_dev->udp_src_min, pkt_dev->udp_src_max, 632 pkt_dev->udp_dst_min, pkt_dev->udp_dst_max); 633 634 seq_printf(seq, 635 " src_mac_count: %d dst_mac_count: %d\n", 636 pkt_dev->src_mac_count, pkt_dev->dst_mac_count); 637 638 if (pkt_dev->nr_labels) { 639 seq_puts(seq, " mpls: "); 640 for (i = 0; i < pkt_dev->nr_labels; i++) 641 seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), 642 i == pkt_dev->nr_labels-1 ? "\n" : ", "); 643 } 644 645 if (pkt_dev->vlan_id != 0xffff) 646 seq_printf(seq, " vlan_id: %u vlan_p: %u vlan_cfi: %u\n", 647 pkt_dev->vlan_id, pkt_dev->vlan_p, 648 pkt_dev->vlan_cfi); 649 650 if (pkt_dev->svlan_id != 0xffff) 651 seq_printf(seq, " svlan_id: %u vlan_p: %u vlan_cfi: %u\n", 652 pkt_dev->svlan_id, pkt_dev->svlan_p, 653 pkt_dev->svlan_cfi); 654 655 if (pkt_dev->tos) 656 seq_printf(seq, " tos: 0x%02x\n", pkt_dev->tos); 657 658 if (pkt_dev->traffic_class) 659 seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); 660 661 if (pkt_dev->burst > 1) 662 seq_printf(seq, " burst: %d\n", pkt_dev->burst); 663 664 if (pkt_dev->node >= 0) 665 seq_printf(seq, " node: %d\n", pkt_dev->node); 666 667 if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) 668 seq_puts(seq, " xmit_mode: netif_receive\n"); 669 else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) 670 seq_puts(seq, " xmit_mode: xmit_queue\n"); 671 672 seq_puts(seq, " Flags: "); 673 674 for (i = 0; i < NR_PKT_FLAGS; i++) { 675 if (i == FLOW_SEQ_SHIFT) 676 if (!pkt_dev->cflows) 677 continue; 678 679 if (pkt_dev->flags & (1 << i)) { 680 seq_printf(seq, "%s ", pkt_flag_names[i]); 681 #ifdef CONFIG_XFRM 682 if (i == IPSEC_SHIFT && pkt_dev->spi) 683 seq_printf(seq, "spi:%u ", pkt_dev->spi); 684 #endif 685 } else if (i == FLOW_SEQ_SHIFT) { 686 seq_puts(seq, "FLOW_RND "); 687 } 688 } 689 690 seq_puts(seq, "\n"); 691 692 /* not really stopped, more like last-running-at */ 693 stopped = pkt_dev->running ? ktime_get() : pkt_dev->stopped_at; 694 idle = pkt_dev->idle_acc; 695 do_div(idle, NSEC_PER_USEC); 696 697 seq_printf(seq, 698 "Current:\n pkts-sofar: %llu errors: %llu\n", 699 (unsigned long long)pkt_dev->sofar, 700 (unsigned long long)pkt_dev->errors); 701 702 if (pkt_dev->n_imix_entries > 0) { 703 int i; 704 705 seq_puts(seq, " imix_size_counts: "); 706 for (i = 0; i < pkt_dev->n_imix_entries; i++) { 707 seq_printf(seq, "%llu,%llu ", 708 pkt_dev->imix_entries[i].size, 709 pkt_dev->imix_entries[i].count_so_far); 710 } 711 seq_puts(seq, "\n"); 712 } 713 714 seq_printf(seq, 715 " started: %lluus stopped: %lluus idle: %lluus\n", 716 (unsigned long long) ktime_to_us(pkt_dev->started_at), 717 (unsigned long long) ktime_to_us(stopped), 718 (unsigned long long) idle); 719 720 seq_printf(seq, 721 " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", 722 pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, 723 pkt_dev->cur_src_mac_offset); 724 725 if (pkt_dev->flags & F_IPV6) { 726 seq_printf(seq, " cur_saddr: %pI6c cur_daddr: %pI6c\n", 727 &pkt_dev->cur_in6_saddr, 728 &pkt_dev->cur_in6_daddr); 729 } else 730 seq_printf(seq, " cur_saddr: %pI4 cur_daddr: %pI4\n", 731 &pkt_dev->cur_saddr, &pkt_dev->cur_daddr); 732 733 seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n", 734 pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); 735 736 seq_printf(seq, " cur_queue_map: %u\n", pkt_dev->cur_queue_map); 737 738 seq_printf(seq, " flows: %u\n", pkt_dev->nflows); 739 740 if (pkt_dev->result[0]) 741 seq_printf(seq, "Result: %s\n", pkt_dev->result); 742 else 743 seq_puts(seq, "Result: Idle\n"); 744 745 return 0; 746 } 747 748 749 static ssize_t hex32_arg(const char __user *user_buffer, size_t maxlen, 750 __u32 *num) 751 { 752 size_t i = 0; 753 754 *num = 0; 755 756 for (; i < maxlen; i++) { 757 int value; 758 char c; 759 760 if (get_user(c, &user_buffer[i])) 761 return -EFAULT; 762 value = hex_to_bin(c); 763 if (value >= 0) { 764 *num <<= 4; 765 *num |= value; 766 } else { 767 break; 768 } 769 } 770 return i; 771 } 772 773 static ssize_t count_trail_chars(const char __user *user_buffer, size_t maxlen) 774 { 775 size_t i; 776 777 for (i = 0; i < maxlen; i++) { 778 char c; 779 780 if (get_user(c, &user_buffer[i])) 781 return -EFAULT; 782 switch (c) { 783 case '\"': 784 case '\n': 785 case '\r': 786 case '\t': 787 case ' ': 788 case '=': 789 break; 790 default: 791 goto done; 792 } 793 } 794 done: 795 return i; 796 } 797 798 static ssize_t num_arg(const char __user *user_buffer, size_t maxlen, 799 unsigned long *num) 800 { 801 size_t i; 802 *num = 0; 803 804 for (i = 0; i < maxlen; i++) { 805 char c; 806 807 if (get_user(c, &user_buffer[i])) 808 return -EFAULT; 809 if ((c >= '0') && (c <= '9')) { 810 *num *= 10; 811 *num += c - '0'; 812 } else 813 break; 814 } 815 return i; 816 } 817 818 static ssize_t strn_len(const char __user *user_buffer, size_t maxlen) 819 { 820 size_t i; 821 822 for (i = 0; i < maxlen; i++) { 823 char c; 824 825 if (get_user(c, &user_buffer[i])) 826 return -EFAULT; 827 switch (c) { 828 case '\"': 829 case '\n': 830 case '\r': 831 case '\t': 832 case ' ': 833 case '=': 834 goto done_str; 835 default: 836 break; 837 } 838 } 839 done_str: 840 return i; 841 } 842 843 /* Parses imix entries from user buffer. 844 * The user buffer should consist of imix entries separated by spaces 845 * where each entry consists of size and weight delimited by commas. 846 * "size1,weight_1 size2,weight_2 ... size_n,weight_n" for example. 847 */ 848 static ssize_t get_imix_entries(const char __user *buffer, 849 size_t maxlen, 850 struct pktgen_dev *pkt_dev) 851 { 852 size_t i = 0, max; 853 ssize_t len; 854 char c; 855 856 pkt_dev->n_imix_entries = 0; 857 858 do { 859 unsigned long weight; 860 unsigned long size; 861 862 if (pkt_dev->n_imix_entries >= MAX_IMIX_ENTRIES) 863 return -E2BIG; 864 865 if (i >= maxlen) 866 return -EINVAL; 867 868 max = min(10, maxlen - i); 869 len = num_arg(&buffer[i], max, &size); 870 if (len < 0) 871 return len; 872 i += len; 873 if (i >= maxlen) 874 return -EINVAL; 875 if (get_user(c, &buffer[i])) 876 return -EFAULT; 877 /* Check for comma between size_i and weight_i */ 878 if (c != ',') 879 return -EINVAL; 880 i++; 881 if (i >= maxlen) 882 return -EINVAL; 883 884 if (size < 14 + 20 + 8) 885 size = 14 + 20 + 8; 886 887 max = min(10, maxlen - i); 888 len = num_arg(&buffer[i], max, &weight); 889 if (len < 0) 890 return len; 891 if (weight <= 0) 892 return -EINVAL; 893 894 pkt_dev->imix_entries[pkt_dev->n_imix_entries].size = size; 895 pkt_dev->imix_entries[pkt_dev->n_imix_entries].weight = weight; 896 897 i += len; 898 pkt_dev->n_imix_entries++; 899 900 if (i >= maxlen) 901 break; 902 if (get_user(c, &buffer[i])) 903 return -EFAULT; 904 i++; 905 } while (c == ' '); 906 907 return i; 908 } 909 910 static ssize_t get_labels(const char __user *buffer, 911 size_t maxlen, struct pktgen_dev *pkt_dev) 912 { 913 unsigned int n = 0; 914 size_t i = 0, max; 915 ssize_t len; 916 char c; 917 918 pkt_dev->nr_labels = 0; 919 do { 920 __u32 tmp; 921 922 if (n >= MAX_MPLS_LABELS) 923 return -E2BIG; 924 925 if (i >= maxlen) 926 return -EINVAL; 927 928 max = min(8, maxlen - i); 929 len = hex32_arg(&buffer[i], max, &tmp); 930 if (len < 0) 931 return len; 932 933 /* return empty list in case of invalid input or zero value */ 934 if (len == 0 || tmp == 0) 935 return maxlen; 936 937 pkt_dev->labels[n] = htonl(tmp); 938 if (pkt_dev->labels[n] & MPLS_STACK_BOTTOM) 939 pkt_dev->flags |= F_MPLS_RND; 940 i += len; 941 n++; 942 if (i >= maxlen) 943 break; 944 if (get_user(c, &buffer[i])) 945 return -EFAULT; 946 i++; 947 } while (c == ','); 948 949 pkt_dev->nr_labels = n; 950 return i; 951 } 952 953 static __u32 pktgen_read_flag(const char *f, bool *disable) 954 { 955 __u32 i; 956 957 if (f[0] == '!') { 958 *disable = true; 959 f++; 960 } 961 962 for (i = 0; i < NR_PKT_FLAGS; i++) { 963 if (!IS_ENABLED(CONFIG_XFRM) && i == IPSEC_SHIFT) 964 continue; 965 966 /* allow only disabling ipv6 flag */ 967 if (!*disable && i == IPV6_SHIFT) 968 continue; 969 970 if (strcmp(f, pkt_flag_names[i]) == 0) 971 return 1 << i; 972 } 973 974 if (strcmp(f, "FLOW_RND") == 0) { 975 *disable = !*disable; 976 return F_FLOW_SEQ; 977 } 978 979 return 0; 980 } 981 982 static ssize_t pktgen_if_write(struct file *file, 983 const char __user *user_buffer, size_t count, 984 loff_t *offset) 985 { 986 struct seq_file *seq = file->private_data; 987 struct pktgen_dev *pkt_dev = seq->private; 988 size_t i, max; 989 ssize_t len; 990 char name[16], valstr[32]; 991 unsigned long value = 0; 992 char *pg_result = NULL; 993 char buf[128]; 994 995 pg_result = &(pkt_dev->result[0]); 996 997 if (count < 1) { 998 pr_warn("wrong command format\n"); 999 return -EINVAL; 1000 } 1001 1002 max = count; 1003 len = count_trail_chars(user_buffer, max); 1004 if (len < 0) { 1005 pr_warn("illegal format\n"); 1006 return len; 1007 } 1008 i = len; 1009 1010 /* Read variable name */ 1011 max = min(sizeof(name) - 1, count - i); 1012 len = strn_len(&user_buffer[i], max); 1013 if (len < 0) 1014 return len; 1015 1016 memset(name, 0, sizeof(name)); 1017 if (copy_from_user(name, &user_buffer[i], len)) 1018 return -EFAULT; 1019 i += len; 1020 1021 max = count - i; 1022 len = count_trail_chars(&user_buffer[i], max); 1023 if (len < 0) 1024 return len; 1025 1026 i += len; 1027 1028 if (debug) { 1029 size_t copy = min_t(size_t, count + 1, 1024); 1030 char *tp = strndup_user(user_buffer, copy); 1031 1032 if (IS_ERR(tp)) 1033 return PTR_ERR(tp); 1034 1035 pr_debug("%s,%zu buffer -:%s:-\n", name, count, tp); 1036 kfree(tp); 1037 } 1038 1039 if (!strcmp(name, "min_pkt_size")) { 1040 max = min(10, count - i); 1041 len = num_arg(&user_buffer[i], max, &value); 1042 if (len < 0) 1043 return len; 1044 1045 if (value < 14 + 20 + 8) 1046 value = 14 + 20 + 8; 1047 if (value != pkt_dev->min_pkt_size) { 1048 pkt_dev->min_pkt_size = value; 1049 pkt_dev->cur_pkt_size = value; 1050 } 1051 sprintf(pg_result, "OK: min_pkt_size=%d", 1052 pkt_dev->min_pkt_size); 1053 return count; 1054 } 1055 1056 if (!strcmp(name, "max_pkt_size")) { 1057 max = min(10, count - i); 1058 len = num_arg(&user_buffer[i], max, &value); 1059 if (len < 0) 1060 return len; 1061 1062 if (value < 14 + 20 + 8) 1063 value = 14 + 20 + 8; 1064 if (value != pkt_dev->max_pkt_size) { 1065 pkt_dev->max_pkt_size = value; 1066 pkt_dev->cur_pkt_size = value; 1067 } 1068 sprintf(pg_result, "OK: max_pkt_size=%d", 1069 pkt_dev->max_pkt_size); 1070 return count; 1071 } 1072 1073 /* Shortcut for min = max */ 1074 1075 if (!strcmp(name, "pkt_size")) { 1076 max = min(10, count - i); 1077 len = num_arg(&user_buffer[i], max, &value); 1078 if (len < 0) 1079 return len; 1080 1081 if (value < 14 + 20 + 8) 1082 value = 14 + 20 + 8; 1083 if (value != pkt_dev->min_pkt_size) { 1084 pkt_dev->min_pkt_size = value; 1085 pkt_dev->max_pkt_size = value; 1086 pkt_dev->cur_pkt_size = value; 1087 } 1088 sprintf(pg_result, "OK: pkt_size=%d", pkt_dev->min_pkt_size); 1089 return count; 1090 } 1091 1092 if (!strcmp(name, "imix_weights")) { 1093 if (pkt_dev->clone_skb > 0) 1094 return -EINVAL; 1095 1096 max = count - i; 1097 len = get_imix_entries(&user_buffer[i], max, pkt_dev); 1098 if (len < 0) 1099 return len; 1100 1101 fill_imix_distribution(pkt_dev); 1102 1103 return count; 1104 } 1105 1106 if (!strcmp(name, "debug")) { 1107 max = min(10, count - i); 1108 len = num_arg(&user_buffer[i], max, &value); 1109 if (len < 0) 1110 return len; 1111 1112 debug = value; 1113 sprintf(pg_result, "OK: debug=%u", debug); 1114 return count; 1115 } 1116 1117 if (!strcmp(name, "frags")) { 1118 max = min(10, count - i); 1119 len = num_arg(&user_buffer[i], max, &value); 1120 if (len < 0) 1121 return len; 1122 1123 pkt_dev->nfrags = value; 1124 sprintf(pg_result, "OK: frags=%d", pkt_dev->nfrags); 1125 return count; 1126 } 1127 if (!strcmp(name, "delay")) { 1128 max = min(10, count - i); 1129 len = num_arg(&user_buffer[i], max, &value); 1130 if (len < 0) 1131 return len; 1132 1133 if (value == 0x7FFFFFFF) 1134 pkt_dev->delay = ULLONG_MAX; 1135 else 1136 pkt_dev->delay = (u64)value; 1137 1138 sprintf(pg_result, "OK: delay=%llu", 1139 (unsigned long long) pkt_dev->delay); 1140 return count; 1141 } 1142 if (!strcmp(name, "rate")) { 1143 max = min(10, count - i); 1144 len = num_arg(&user_buffer[i], max, &value); 1145 if (len < 0) 1146 return len; 1147 1148 if (!value) 1149 return -EINVAL; 1150 pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value; 1151 if (debug) 1152 pr_info("Delay set at: %llu ns\n", pkt_dev->delay); 1153 1154 sprintf(pg_result, "OK: rate=%lu", value); 1155 return count; 1156 } 1157 if (!strcmp(name, "ratep")) { 1158 max = min(10, count - i); 1159 len = num_arg(&user_buffer[i], max, &value); 1160 if (len < 0) 1161 return len; 1162 1163 if (!value) 1164 return -EINVAL; 1165 pkt_dev->delay = NSEC_PER_SEC/value; 1166 if (debug) 1167 pr_info("Delay set at: %llu ns\n", pkt_dev->delay); 1168 1169 sprintf(pg_result, "OK: rate=%lu", value); 1170 return count; 1171 } 1172 if (!strcmp(name, "udp_src_min")) { 1173 max = min(10, count - i); 1174 len = num_arg(&user_buffer[i], max, &value); 1175 if (len < 0) 1176 return len; 1177 1178 if (value != pkt_dev->udp_src_min) { 1179 pkt_dev->udp_src_min = value; 1180 pkt_dev->cur_udp_src = value; 1181 } 1182 sprintf(pg_result, "OK: udp_src_min=%u", pkt_dev->udp_src_min); 1183 return count; 1184 } 1185 if (!strcmp(name, "udp_dst_min")) { 1186 max = min(10, count - i); 1187 len = num_arg(&user_buffer[i], max, &value); 1188 if (len < 0) 1189 return len; 1190 1191 if (value != pkt_dev->udp_dst_min) { 1192 pkt_dev->udp_dst_min = value; 1193 pkt_dev->cur_udp_dst = value; 1194 } 1195 sprintf(pg_result, "OK: udp_dst_min=%u", pkt_dev->udp_dst_min); 1196 return count; 1197 } 1198 if (!strcmp(name, "udp_src_max")) { 1199 max = min(10, count - i); 1200 len = num_arg(&user_buffer[i], max, &value); 1201 if (len < 0) 1202 return len; 1203 1204 if (value != pkt_dev->udp_src_max) { 1205 pkt_dev->udp_src_max = value; 1206 pkt_dev->cur_udp_src = value; 1207 } 1208 sprintf(pg_result, "OK: udp_src_max=%u", pkt_dev->udp_src_max); 1209 return count; 1210 } 1211 if (!strcmp(name, "udp_dst_max")) { 1212 max = min(10, count - i); 1213 len = num_arg(&user_buffer[i], max, &value); 1214 if (len < 0) 1215 return len; 1216 1217 if (value != pkt_dev->udp_dst_max) { 1218 pkt_dev->udp_dst_max = value; 1219 pkt_dev->cur_udp_dst = value; 1220 } 1221 sprintf(pg_result, "OK: udp_dst_max=%u", pkt_dev->udp_dst_max); 1222 return count; 1223 } 1224 if (!strcmp(name, "clone_skb")) { 1225 max = min(10, count - i); 1226 len = num_arg(&user_buffer[i], max, &value); 1227 if (len < 0) 1228 return len; 1229 /* clone_skb is not supported for netif_receive xmit_mode and 1230 * IMIX mode. 1231 */ 1232 if ((value > 0) && 1233 ((pkt_dev->xmit_mode == M_NETIF_RECEIVE) || 1234 !(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))) 1235 return -EOPNOTSUPP; 1236 if (value > 0 && (pkt_dev->n_imix_entries > 0 || 1237 !(pkt_dev->flags & F_SHARED))) 1238 return -EINVAL; 1239 1240 pkt_dev->clone_skb = value; 1241 1242 sprintf(pg_result, "OK: clone_skb=%d", pkt_dev->clone_skb); 1243 return count; 1244 } 1245 if (!strcmp(name, "count")) { 1246 max = min(10, count - i); 1247 len = num_arg(&user_buffer[i], max, &value); 1248 if (len < 0) 1249 return len; 1250 1251 pkt_dev->count = value; 1252 sprintf(pg_result, "OK: count=%llu", 1253 (unsigned long long)pkt_dev->count); 1254 return count; 1255 } 1256 if (!strcmp(name, "src_mac_count")) { 1257 max = min(10, count - i); 1258 len = num_arg(&user_buffer[i], max, &value); 1259 if (len < 0) 1260 return len; 1261 1262 if (pkt_dev->src_mac_count != value) { 1263 pkt_dev->src_mac_count = value; 1264 pkt_dev->cur_src_mac_offset = 0; 1265 } 1266 sprintf(pg_result, "OK: src_mac_count=%d", 1267 pkt_dev->src_mac_count); 1268 return count; 1269 } 1270 if (!strcmp(name, "dst_mac_count")) { 1271 max = min(10, count - i); 1272 len = num_arg(&user_buffer[i], max, &value); 1273 if (len < 0) 1274 return len; 1275 1276 if (pkt_dev->dst_mac_count != value) { 1277 pkt_dev->dst_mac_count = value; 1278 pkt_dev->cur_dst_mac_offset = 0; 1279 } 1280 sprintf(pg_result, "OK: dst_mac_count=%d", 1281 pkt_dev->dst_mac_count); 1282 return count; 1283 } 1284 if (!strcmp(name, "burst")) { 1285 max = min(10, count - i); 1286 len = num_arg(&user_buffer[i], max, &value); 1287 if (len < 0) 1288 return len; 1289 1290 if ((value > 1) && 1291 ((pkt_dev->xmit_mode == M_QUEUE_XMIT) || 1292 ((pkt_dev->xmit_mode == M_START_XMIT) && 1293 (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))))) 1294 return -EOPNOTSUPP; 1295 1296 if (value > 1 && !(pkt_dev->flags & F_SHARED)) 1297 return -EINVAL; 1298 1299 pkt_dev->burst = value < 1 ? 1 : value; 1300 sprintf(pg_result, "OK: burst=%u", pkt_dev->burst); 1301 return count; 1302 } 1303 if (!strcmp(name, "node")) { 1304 max = min(10, count - i); 1305 len = num_arg(&user_buffer[i], max, &value); 1306 if (len < 0) 1307 return len; 1308 1309 if (node_possible(value)) { 1310 pkt_dev->node = value; 1311 sprintf(pg_result, "OK: node=%d", pkt_dev->node); 1312 if (pkt_dev->page) { 1313 put_page(pkt_dev->page); 1314 pkt_dev->page = NULL; 1315 } 1316 } else { 1317 sprintf(pg_result, "ERROR: node not possible"); 1318 } 1319 return count; 1320 } 1321 if (!strcmp(name, "xmit_mode")) { 1322 char f[32]; 1323 1324 max = min(sizeof(f) - 1, count - i); 1325 len = strn_len(&user_buffer[i], max); 1326 if (len < 0) 1327 return len; 1328 1329 memset(f, 0, sizeof(f)); 1330 if (copy_from_user(f, &user_buffer[i], len)) 1331 return -EFAULT; 1332 1333 if (strcmp(f, "start_xmit") == 0) { 1334 pkt_dev->xmit_mode = M_START_XMIT; 1335 } else if (strcmp(f, "netif_receive") == 0) { 1336 /* clone_skb set earlier, not supported in this mode */ 1337 if (pkt_dev->clone_skb > 0) 1338 return -EOPNOTSUPP; 1339 1340 pkt_dev->xmit_mode = M_NETIF_RECEIVE; 1341 1342 /* make sure new packet is allocated every time 1343 * pktgen_xmit() is called 1344 */ 1345 pkt_dev->last_ok = 1; 1346 } else if (strcmp(f, "queue_xmit") == 0) { 1347 pkt_dev->xmit_mode = M_QUEUE_XMIT; 1348 pkt_dev->last_ok = 1; 1349 } else { 1350 sprintf(pg_result, 1351 "xmit_mode -:%s:- unknown\nAvailable modes: %s", 1352 f, "start_xmit, netif_receive\n"); 1353 return count; 1354 } 1355 sprintf(pg_result, "OK: xmit_mode=%s", f); 1356 return count; 1357 } 1358 if (!strcmp(name, "flag")) { 1359 bool disable = false; 1360 __u32 flag; 1361 char f[32]; 1362 char *end; 1363 1364 max = min(sizeof(f) - 1, count - i); 1365 len = strn_len(&user_buffer[i], max); 1366 if (len < 0) 1367 return len; 1368 1369 memset(f, 0, 32); 1370 if (copy_from_user(f, &user_buffer[i], len)) 1371 return -EFAULT; 1372 1373 flag = pktgen_read_flag(f, &disable); 1374 if (flag) { 1375 if (disable) { 1376 /* If "clone_skb", or "burst" parameters are 1377 * configured, it means that the skb still 1378 * needs to be referenced by the pktgen, so 1379 * the skb must be shared. 1380 */ 1381 if (flag == F_SHARED && (pkt_dev->clone_skb || 1382 pkt_dev->burst > 1)) 1383 return -EINVAL; 1384 pkt_dev->flags &= ~flag; 1385 } else { 1386 pkt_dev->flags |= flag; 1387 } 1388 1389 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); 1390 return count; 1391 } 1392 1393 /* Unknown flag */ 1394 end = pkt_dev->result + sizeof(pkt_dev->result); 1395 pg_result += sprintf(pg_result, 1396 "Flag -:%s:- unknown\n" 1397 "Available flags, (prepend ! to un-set flag):\n", f); 1398 1399 for (int n = 0; n < NR_PKT_FLAGS && pg_result < end; n++) { 1400 if (!IS_ENABLED(CONFIG_XFRM) && n == IPSEC_SHIFT) 1401 continue; 1402 pg_result += snprintf(pg_result, end - pg_result, 1403 "%s, ", pkt_flag_names[n]); 1404 } 1405 if (!WARN_ON_ONCE(pg_result >= end)) { 1406 /* Remove the comma and whitespace at the end */ 1407 *(pg_result - 2) = '\0'; 1408 } 1409 1410 return count; 1411 } 1412 if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) { 1413 max = min(sizeof(pkt_dev->dst_min) - 1, count - i); 1414 len = strn_len(&user_buffer[i], max); 1415 if (len < 0) 1416 return len; 1417 1418 if (copy_from_user(buf, &user_buffer[i], len)) 1419 return -EFAULT; 1420 buf[len] = 0; 1421 if (strcmp(buf, pkt_dev->dst_min) != 0) { 1422 strscpy_pad(pkt_dev->dst_min, buf); 1423 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); 1424 pkt_dev->cur_daddr = pkt_dev->daddr_min; 1425 } 1426 if (debug) 1427 pr_debug("dst_min set to: %s\n", pkt_dev->dst_min); 1428 1429 sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min); 1430 return count; 1431 } 1432 if (!strcmp(name, "dst_max")) { 1433 max = min(sizeof(pkt_dev->dst_max) - 1, count - i); 1434 len = strn_len(&user_buffer[i], max); 1435 if (len < 0) 1436 return len; 1437 1438 if (copy_from_user(buf, &user_buffer[i], len)) 1439 return -EFAULT; 1440 buf[len] = 0; 1441 if (strcmp(buf, pkt_dev->dst_max) != 0) { 1442 strscpy_pad(pkt_dev->dst_max, buf); 1443 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); 1444 pkt_dev->cur_daddr = pkt_dev->daddr_max; 1445 } 1446 if (debug) 1447 pr_debug("dst_max set to: %s\n", pkt_dev->dst_max); 1448 1449 sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max); 1450 return count; 1451 } 1452 if (!strcmp(name, "dst6")) { 1453 max = min(sizeof(buf) - 1, count - i); 1454 len = strn_len(&user_buffer[i], max); 1455 if (len < 0) 1456 return len; 1457 1458 pkt_dev->flags |= F_IPV6; 1459 1460 if (copy_from_user(buf, &user_buffer[i], len)) 1461 return -EFAULT; 1462 buf[len] = 0; 1463 1464 in6_pton(buf, -1, pkt_dev->in6_daddr.s6_addr, -1, NULL); 1465 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr); 1466 1467 pkt_dev->cur_in6_daddr = pkt_dev->in6_daddr; 1468 1469 if (debug) 1470 pr_debug("dst6 set to: %s\n", buf); 1471 1472 sprintf(pg_result, "OK: dst6=%s", buf); 1473 return count; 1474 } 1475 if (!strcmp(name, "dst6_min")) { 1476 max = min(sizeof(buf) - 1, count - i); 1477 len = strn_len(&user_buffer[i], max); 1478 if (len < 0) 1479 return len; 1480 1481 pkt_dev->flags |= F_IPV6; 1482 1483 if (copy_from_user(buf, &user_buffer[i], len)) 1484 return -EFAULT; 1485 buf[len] = 0; 1486 1487 in6_pton(buf, -1, pkt_dev->min_in6_daddr.s6_addr, -1, NULL); 1488 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr); 1489 1490 pkt_dev->cur_in6_daddr = pkt_dev->min_in6_daddr; 1491 if (debug) 1492 pr_debug("dst6_min set to: %s\n", buf); 1493 1494 sprintf(pg_result, "OK: dst6_min=%s", buf); 1495 return count; 1496 } 1497 if (!strcmp(name, "dst6_max")) { 1498 max = min(sizeof(buf) - 1, count - i); 1499 len = strn_len(&user_buffer[i], max); 1500 if (len < 0) 1501 return len; 1502 1503 pkt_dev->flags |= F_IPV6; 1504 1505 if (copy_from_user(buf, &user_buffer[i], len)) 1506 return -EFAULT; 1507 buf[len] = 0; 1508 1509 in6_pton(buf, -1, pkt_dev->max_in6_daddr.s6_addr, -1, NULL); 1510 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr); 1511 1512 if (debug) 1513 pr_debug("dst6_max set to: %s\n", buf); 1514 1515 sprintf(pg_result, "OK: dst6_max=%s", buf); 1516 return count; 1517 } 1518 if (!strcmp(name, "src6")) { 1519 max = min(sizeof(buf) - 1, count - i); 1520 len = strn_len(&user_buffer[i], max); 1521 if (len < 0) 1522 return len; 1523 1524 pkt_dev->flags |= F_IPV6; 1525 1526 if (copy_from_user(buf, &user_buffer[i], len)) 1527 return -EFAULT; 1528 buf[len] = 0; 1529 1530 in6_pton(buf, -1, pkt_dev->in6_saddr.s6_addr, -1, NULL); 1531 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr); 1532 1533 pkt_dev->cur_in6_saddr = pkt_dev->in6_saddr; 1534 1535 if (debug) 1536 pr_debug("src6 set to: %s\n", buf); 1537 1538 sprintf(pg_result, "OK: src6=%s", buf); 1539 return count; 1540 } 1541 if (!strcmp(name, "src_min")) { 1542 max = min(sizeof(pkt_dev->src_min) - 1, count - i); 1543 len = strn_len(&user_buffer[i], max); 1544 if (len < 0) 1545 return len; 1546 1547 if (copy_from_user(buf, &user_buffer[i], len)) 1548 return -EFAULT; 1549 buf[len] = 0; 1550 if (strcmp(buf, pkt_dev->src_min) != 0) { 1551 strscpy_pad(pkt_dev->src_min, buf); 1552 pkt_dev->saddr_min = in_aton(pkt_dev->src_min); 1553 pkt_dev->cur_saddr = pkt_dev->saddr_min; 1554 } 1555 if (debug) 1556 pr_debug("src_min set to: %s\n", pkt_dev->src_min); 1557 1558 sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min); 1559 return count; 1560 } 1561 if (!strcmp(name, "src_max")) { 1562 max = min(sizeof(pkt_dev->src_max) - 1, count - i); 1563 len = strn_len(&user_buffer[i], max); 1564 if (len < 0) 1565 return len; 1566 1567 if (copy_from_user(buf, &user_buffer[i], len)) 1568 return -EFAULT; 1569 buf[len] = 0; 1570 if (strcmp(buf, pkt_dev->src_max) != 0) { 1571 strscpy_pad(pkt_dev->src_max, buf); 1572 pkt_dev->saddr_max = in_aton(pkt_dev->src_max); 1573 pkt_dev->cur_saddr = pkt_dev->saddr_max; 1574 } 1575 if (debug) 1576 pr_debug("src_max set to: %s\n", pkt_dev->src_max); 1577 1578 sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max); 1579 return count; 1580 } 1581 if (!strcmp(name, "dst_mac")) { 1582 max = min(sizeof(valstr) - 1, count - i); 1583 len = strn_len(&user_buffer[i], max); 1584 if (len < 0) 1585 return len; 1586 1587 memset(valstr, 0, sizeof(valstr)); 1588 if (copy_from_user(valstr, &user_buffer[i], len)) 1589 return -EFAULT; 1590 1591 if (!mac_pton(valstr, pkt_dev->dst_mac)) 1592 return -EINVAL; 1593 /* Set up Dest MAC */ 1594 ether_addr_copy(&pkt_dev->hh[0], pkt_dev->dst_mac); 1595 1596 sprintf(pg_result, "OK: dstmac %pM", pkt_dev->dst_mac); 1597 return count; 1598 } 1599 if (!strcmp(name, "src_mac")) { 1600 max = min(sizeof(valstr) - 1, count - i); 1601 len = strn_len(&user_buffer[i], max); 1602 if (len < 0) 1603 return len; 1604 1605 memset(valstr, 0, sizeof(valstr)); 1606 if (copy_from_user(valstr, &user_buffer[i], len)) 1607 return -EFAULT; 1608 1609 if (!mac_pton(valstr, pkt_dev->src_mac)) 1610 return -EINVAL; 1611 /* Set up Src MAC */ 1612 ether_addr_copy(&pkt_dev->hh[6], pkt_dev->src_mac); 1613 1614 sprintf(pg_result, "OK: srcmac %pM", pkt_dev->src_mac); 1615 return count; 1616 } 1617 1618 if (!strcmp(name, "clear_counters")) { 1619 pktgen_clear_counters(pkt_dev); 1620 sprintf(pg_result, "OK: Clearing counters.\n"); 1621 return count; 1622 } 1623 1624 if (!strcmp(name, "flows")) { 1625 max = min(10, count - i); 1626 len = num_arg(&user_buffer[i], max, &value); 1627 if (len < 0) 1628 return len; 1629 1630 if (value > MAX_CFLOWS) 1631 value = MAX_CFLOWS; 1632 1633 pkt_dev->cflows = value; 1634 sprintf(pg_result, "OK: flows=%u", pkt_dev->cflows); 1635 return count; 1636 } 1637 #ifdef CONFIG_XFRM 1638 if (!strcmp(name, "spi")) { 1639 max = min(10, count - i); 1640 len = num_arg(&user_buffer[i], max, &value); 1641 if (len < 0) 1642 return len; 1643 1644 pkt_dev->spi = value; 1645 sprintf(pg_result, "OK: spi=%u", pkt_dev->spi); 1646 return count; 1647 } 1648 #endif 1649 if (!strcmp(name, "flowlen")) { 1650 max = min(10, count - i); 1651 len = num_arg(&user_buffer[i], max, &value); 1652 if (len < 0) 1653 return len; 1654 1655 pkt_dev->lflow = value; 1656 sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow); 1657 return count; 1658 } 1659 1660 if (!strcmp(name, "queue_map_min")) { 1661 max = min(5, count - i); 1662 len = num_arg(&user_buffer[i], max, &value); 1663 if (len < 0) 1664 return len; 1665 1666 pkt_dev->queue_map_min = value; 1667 sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min); 1668 return count; 1669 } 1670 1671 if (!strcmp(name, "queue_map_max")) { 1672 max = min(5, count - i); 1673 len = num_arg(&user_buffer[i], max, &value); 1674 if (len < 0) 1675 return len; 1676 1677 pkt_dev->queue_map_max = value; 1678 sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max); 1679 return count; 1680 } 1681 1682 if (!strcmp(name, "mpls")) { 1683 unsigned int n, cnt; 1684 1685 max = count - i; 1686 len = get_labels(&user_buffer[i], max, pkt_dev); 1687 if (len < 0) 1688 return len; 1689 1690 cnt = sprintf(pg_result, "OK: mpls="); 1691 for (n = 0; n < pkt_dev->nr_labels; n++) 1692 cnt += sprintf(pg_result + cnt, 1693 "%08x%s", ntohl(pkt_dev->labels[n]), 1694 n == pkt_dev->nr_labels-1 ? "" : ","); 1695 1696 if (pkt_dev->nr_labels && pkt_dev->vlan_id != 0xffff) { 1697 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ 1698 pkt_dev->svlan_id = 0xffff; 1699 1700 if (debug) 1701 pr_debug("VLAN/SVLAN auto turned off\n"); 1702 } 1703 return count; 1704 } 1705 1706 if (!strcmp(name, "vlan_id")) { 1707 max = min(4, count - i); 1708 len = num_arg(&user_buffer[i], max, &value); 1709 if (len < 0) 1710 return len; 1711 1712 if (value <= 4095) { 1713 pkt_dev->vlan_id = value; /* turn on VLAN */ 1714 1715 if (debug) 1716 pr_debug("VLAN turned on\n"); 1717 1718 if (debug && pkt_dev->nr_labels) 1719 pr_debug("MPLS auto turned off\n"); 1720 1721 pkt_dev->nr_labels = 0; /* turn off MPLS */ 1722 sprintf(pg_result, "OK: vlan_id=%u", pkt_dev->vlan_id); 1723 } else { 1724 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ 1725 pkt_dev->svlan_id = 0xffff; 1726 1727 if (debug) 1728 pr_debug("VLAN/SVLAN turned off\n"); 1729 } 1730 return count; 1731 } 1732 1733 if (!strcmp(name, "vlan_p")) { 1734 max = min(1, count - i); 1735 len = num_arg(&user_buffer[i], max, &value); 1736 if (len < 0) 1737 return len; 1738 1739 if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) { 1740 pkt_dev->vlan_p = value; 1741 sprintf(pg_result, "OK: vlan_p=%u", pkt_dev->vlan_p); 1742 } else { 1743 sprintf(pg_result, "ERROR: vlan_p must be 0-7"); 1744 } 1745 return count; 1746 } 1747 1748 if (!strcmp(name, "vlan_cfi")) { 1749 max = min(1, count - i); 1750 len = num_arg(&user_buffer[i], max, &value); 1751 if (len < 0) 1752 return len; 1753 1754 if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) { 1755 pkt_dev->vlan_cfi = value; 1756 sprintf(pg_result, "OK: vlan_cfi=%u", pkt_dev->vlan_cfi); 1757 } else { 1758 sprintf(pg_result, "ERROR: vlan_cfi must be 0-1"); 1759 } 1760 return count; 1761 } 1762 1763 if (!strcmp(name, "svlan_id")) { 1764 max = min(4, count - i); 1765 len = num_arg(&user_buffer[i], max, &value); 1766 if (len < 0) 1767 return len; 1768 1769 if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) { 1770 pkt_dev->svlan_id = value; /* turn on SVLAN */ 1771 1772 if (debug) 1773 pr_debug("SVLAN turned on\n"); 1774 1775 if (debug && pkt_dev->nr_labels) 1776 pr_debug("MPLS auto turned off\n"); 1777 1778 pkt_dev->nr_labels = 0; /* turn off MPLS */ 1779 sprintf(pg_result, "OK: svlan_id=%u", pkt_dev->svlan_id); 1780 } else { 1781 pkt_dev->vlan_id = 0xffff; /* turn off VLAN/SVLAN */ 1782 pkt_dev->svlan_id = 0xffff; 1783 1784 if (debug) 1785 pr_debug("VLAN/SVLAN turned off\n"); 1786 } 1787 return count; 1788 } 1789 1790 if (!strcmp(name, "svlan_p")) { 1791 max = min(1, count - i); 1792 len = num_arg(&user_buffer[i], max, &value); 1793 if (len < 0) 1794 return len; 1795 1796 if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) { 1797 pkt_dev->svlan_p = value; 1798 sprintf(pg_result, "OK: svlan_p=%u", pkt_dev->svlan_p); 1799 } else { 1800 sprintf(pg_result, "ERROR: svlan_p must be 0-7"); 1801 } 1802 return count; 1803 } 1804 1805 if (!strcmp(name, "svlan_cfi")) { 1806 max = min(1, count - i); 1807 len = num_arg(&user_buffer[i], max, &value); 1808 if (len < 0) 1809 return len; 1810 1811 if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) { 1812 pkt_dev->svlan_cfi = value; 1813 sprintf(pg_result, "OK: svlan_cfi=%u", pkt_dev->svlan_cfi); 1814 } else { 1815 sprintf(pg_result, "ERROR: svlan_cfi must be 0-1"); 1816 } 1817 return count; 1818 } 1819 1820 if (!strcmp(name, "tos")) { 1821 __u32 tmp_value; 1822 1823 max = min(2, count - i); 1824 len = hex32_arg(&user_buffer[i], max, &tmp_value); 1825 if (len < 0) 1826 return len; 1827 1828 if (len == 2) { 1829 pkt_dev->tos = tmp_value; 1830 sprintf(pg_result, "OK: tos=0x%02x", pkt_dev->tos); 1831 } else { 1832 sprintf(pg_result, "ERROR: tos must be 00-ff"); 1833 } 1834 return count; 1835 } 1836 1837 if (!strcmp(name, "traffic_class")) { 1838 __u32 tmp_value; 1839 1840 max = min(2, count - i); 1841 len = hex32_arg(&user_buffer[i], max, &tmp_value); 1842 if (len < 0) 1843 return len; 1844 1845 if (len == 2) { 1846 pkt_dev->traffic_class = tmp_value; 1847 sprintf(pg_result, "OK: traffic_class=0x%02x", pkt_dev->traffic_class); 1848 } else { 1849 sprintf(pg_result, "ERROR: traffic_class must be 00-ff"); 1850 } 1851 return count; 1852 } 1853 1854 if (!strcmp(name, "skb_priority")) { 1855 max = min(9, count - i); 1856 len = num_arg(&user_buffer[i], max, &value); 1857 if (len < 0) 1858 return len; 1859 1860 pkt_dev->skb_priority = value; 1861 sprintf(pg_result, "OK: skb_priority=%i", 1862 pkt_dev->skb_priority); 1863 return count; 1864 } 1865 1866 sprintf(pkt_dev->result, "No such parameter \"%s\"", name); 1867 return -EINVAL; 1868 } 1869 1870 static int pktgen_if_open(struct inode *inode, struct file *file) 1871 { 1872 return single_open(file, pktgen_if_show, pde_data(inode)); 1873 } 1874 1875 static const struct proc_ops pktgen_if_proc_ops = { 1876 .proc_open = pktgen_if_open, 1877 .proc_read = seq_read, 1878 .proc_lseek = seq_lseek, 1879 .proc_write = pktgen_if_write, 1880 .proc_release = single_release, 1881 }; 1882 1883 static int pktgen_thread_show(struct seq_file *seq, void *v) 1884 { 1885 struct pktgen_thread *t = seq->private; 1886 const struct pktgen_dev *pkt_dev; 1887 1888 BUG_ON(!t); 1889 1890 seq_puts(seq, "Running: "); 1891 1892 rcu_read_lock(); 1893 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) 1894 if (pkt_dev->running) 1895 seq_printf(seq, "%s ", pkt_dev->odevname); 1896 1897 seq_puts(seq, "\nStopped: "); 1898 1899 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) 1900 if (!pkt_dev->running) 1901 seq_printf(seq, "%s ", pkt_dev->odevname); 1902 1903 if (t->result[0]) 1904 seq_printf(seq, "\nResult: %s\n", t->result); 1905 else 1906 seq_puts(seq, "\nResult: NA\n"); 1907 1908 rcu_read_unlock(); 1909 1910 return 0; 1911 } 1912 1913 static ssize_t pktgen_thread_write(struct file *file, 1914 const char __user *user_buffer, 1915 size_t count, loff_t *offset) 1916 { 1917 struct seq_file *seq = file->private_data; 1918 struct pktgen_thread *t = seq->private; 1919 size_t i, max; 1920 ssize_t len, ret; 1921 char name[40]; 1922 char *pg_result; 1923 1924 if (count < 1) { 1925 // sprintf(pg_result, "Wrong command format"); 1926 return -EINVAL; 1927 } 1928 1929 max = count; 1930 len = count_trail_chars(user_buffer, max); 1931 if (len < 0) 1932 return len; 1933 1934 i = len; 1935 1936 /* Read variable name */ 1937 max = min(sizeof(name) - 1, count - i); 1938 len = strn_len(&user_buffer[i], max); 1939 if (len < 0) 1940 return len; 1941 1942 memset(name, 0, sizeof(name)); 1943 if (copy_from_user(name, &user_buffer[i], len)) 1944 return -EFAULT; 1945 i += len; 1946 1947 max = count - i; 1948 len = count_trail_chars(&user_buffer[i], max); 1949 if (len < 0) 1950 return len; 1951 1952 i += len; 1953 1954 if (debug) 1955 pr_debug("t=%s, count=%lu\n", name, (unsigned long)count); 1956 1957 if (!t) { 1958 pr_err("ERROR: No thread\n"); 1959 ret = -EINVAL; 1960 goto out; 1961 } 1962 1963 pg_result = &(t->result[0]); 1964 1965 if (!strcmp(name, "add_device")) { 1966 char f[32]; 1967 1968 memset(f, 0, 32); 1969 max = min(sizeof(f) - 1, count - i); 1970 len = strn_len(&user_buffer[i], max); 1971 if (len < 0) { 1972 ret = len; 1973 goto out; 1974 } 1975 if (copy_from_user(f, &user_buffer[i], len)) 1976 return -EFAULT; 1977 1978 mutex_lock(&pktgen_thread_lock); 1979 ret = pktgen_add_device(t, f); 1980 mutex_unlock(&pktgen_thread_lock); 1981 if (!ret) { 1982 ret = count; 1983 sprintf(pg_result, "OK: add_device=%s", f); 1984 } else 1985 sprintf(pg_result, "ERROR: can not add device %s", f); 1986 goto out; 1987 } 1988 1989 if (!strcmp(name, "rem_device_all")) { 1990 mutex_lock(&pktgen_thread_lock); 1991 t->control |= T_REMDEVALL; 1992 mutex_unlock(&pktgen_thread_lock); 1993 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ 1994 ret = count; 1995 sprintf(pg_result, "OK: rem_device_all"); 1996 goto out; 1997 } 1998 1999 if (!strcmp(name, "max_before_softirq")) { 2000 sprintf(pg_result, "OK: Note! max_before_softirq is obsoleted -- Do not use"); 2001 ret = count; 2002 goto out; 2003 } 2004 2005 ret = -EINVAL; 2006 out: 2007 return ret; 2008 } 2009 2010 static int pktgen_thread_open(struct inode *inode, struct file *file) 2011 { 2012 return single_open(file, pktgen_thread_show, pde_data(inode)); 2013 } 2014 2015 static const struct proc_ops pktgen_thread_proc_ops = { 2016 .proc_open = pktgen_thread_open, 2017 .proc_read = seq_read, 2018 .proc_lseek = seq_lseek, 2019 .proc_write = pktgen_thread_write, 2020 .proc_release = single_release, 2021 }; 2022 2023 /* Think find or remove for NN */ 2024 static struct pktgen_dev *__pktgen_NN_threads(const struct pktgen_net *pn, 2025 const char *ifname, int remove) 2026 { 2027 struct pktgen_thread *t; 2028 struct pktgen_dev *pkt_dev = NULL; 2029 bool exact = (remove == FIND); 2030 2031 list_for_each_entry(t, &pn->pktgen_threads, th_list) { 2032 pkt_dev = pktgen_find_dev(t, ifname, exact); 2033 if (pkt_dev) { 2034 if (remove) { 2035 pkt_dev->removal_mark = 1; 2036 t->control |= T_REMDEV; 2037 } 2038 break; 2039 } 2040 } 2041 return pkt_dev; 2042 } 2043 2044 /* 2045 * mark a device for removal 2046 */ 2047 static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname) 2048 { 2049 struct pktgen_dev *pkt_dev = NULL; 2050 const int max_tries = 10, msec_per_try = 125; 2051 int i = 0; 2052 2053 mutex_lock(&pktgen_thread_lock); 2054 pr_debug("%s: marking %s for removal\n", __func__, ifname); 2055 2056 while (1) { 2057 2058 pkt_dev = __pktgen_NN_threads(pn, ifname, REMOVE); 2059 if (pkt_dev == NULL) 2060 break; /* success */ 2061 2062 mutex_unlock(&pktgen_thread_lock); 2063 pr_debug("%s: waiting for %s to disappear....\n", 2064 __func__, ifname); 2065 schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try)); 2066 mutex_lock(&pktgen_thread_lock); 2067 2068 if (++i >= max_tries) { 2069 pr_err("%s: timed out after waiting %d msec for device %s to be removed\n", 2070 __func__, msec_per_try * i, ifname); 2071 break; 2072 } 2073 2074 } 2075 2076 mutex_unlock(&pktgen_thread_lock); 2077 } 2078 2079 static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *dev) 2080 { 2081 struct pktgen_thread *t; 2082 2083 mutex_lock(&pktgen_thread_lock); 2084 2085 list_for_each_entry(t, &pn->pktgen_threads, th_list) { 2086 struct pktgen_dev *pkt_dev; 2087 2088 if_lock(t); 2089 list_for_each_entry(pkt_dev, &t->if_list, list) { 2090 if (pkt_dev->odev != dev) 2091 continue; 2092 2093 proc_remove(pkt_dev->entry); 2094 2095 pkt_dev->entry = proc_create_data(dev->name, 0600, 2096 pn->proc_dir, 2097 &pktgen_if_proc_ops, 2098 pkt_dev); 2099 if (!pkt_dev->entry) 2100 pr_err("can't move proc entry for '%s'\n", 2101 dev->name); 2102 break; 2103 } 2104 if_unlock(t); 2105 } 2106 mutex_unlock(&pktgen_thread_lock); 2107 } 2108 2109 static int pktgen_device_event(struct notifier_block *unused, 2110 unsigned long event, void *ptr) 2111 { 2112 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2113 struct pktgen_net *pn = net_generic(dev_net(dev), pg_net_id); 2114 2115 if (pn->pktgen_exiting) 2116 return NOTIFY_DONE; 2117 2118 /* It is OK that we do not hold the group lock right now, 2119 * as we run under the RTNL lock. 2120 */ 2121 2122 switch (event) { 2123 case NETDEV_CHANGENAME: 2124 pktgen_change_name(pn, dev); 2125 break; 2126 2127 case NETDEV_UNREGISTER: 2128 pktgen_mark_device(pn, dev->name); 2129 break; 2130 } 2131 2132 return NOTIFY_DONE; 2133 } 2134 2135 static struct net_device *pktgen_dev_get_by_name(const struct pktgen_net *pn, 2136 struct pktgen_dev *pkt_dev, 2137 const char *ifname) 2138 { 2139 char b[IFNAMSIZ+5]; 2140 int i; 2141 2142 for (i = 0; ifname[i] != '@'; i++) { 2143 if (i == IFNAMSIZ) 2144 break; 2145 2146 b[i] = ifname[i]; 2147 } 2148 b[i] = 0; 2149 2150 return dev_get_by_name(pn->net, b); 2151 } 2152 2153 2154 /* Associate pktgen_dev with a device. */ 2155 2156 static int pktgen_setup_dev(const struct pktgen_net *pn, 2157 struct pktgen_dev *pkt_dev, const char *ifname) 2158 { 2159 struct net_device *odev; 2160 int err; 2161 2162 /* Clean old setups */ 2163 if (pkt_dev->odev) { 2164 netdev_put(pkt_dev->odev, &pkt_dev->dev_tracker); 2165 pkt_dev->odev = NULL; 2166 } 2167 2168 odev = pktgen_dev_get_by_name(pn, pkt_dev, ifname); 2169 if (!odev) { 2170 pr_err("no such netdevice: \"%s\"\n", ifname); 2171 return -ENODEV; 2172 } 2173 2174 if (odev->type != ARPHRD_ETHER && odev->type != ARPHRD_LOOPBACK) { 2175 pr_err("not an ethernet or loopback device: \"%s\"\n", ifname); 2176 err = -EINVAL; 2177 } else if (!netif_running(odev)) { 2178 pr_err("device is down: \"%s\"\n", ifname); 2179 err = -ENETDOWN; 2180 } else { 2181 pkt_dev->odev = odev; 2182 netdev_tracker_alloc(odev, &pkt_dev->dev_tracker, GFP_KERNEL); 2183 return 0; 2184 } 2185 2186 dev_put(odev); 2187 return err; 2188 } 2189 2190 /* Read pkt_dev from the interface and set up internal pktgen_dev 2191 * structure to have the right information to create/send packets 2192 */ 2193 static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) 2194 { 2195 int ntxq; 2196 2197 if (!pkt_dev->odev) { 2198 pr_err("ERROR: pkt_dev->odev == NULL in setup_inject\n"); 2199 sprintf(pkt_dev->result, 2200 "ERROR: pkt_dev->odev == NULL in setup_inject.\n"); 2201 return; 2202 } 2203 2204 /* make sure that we don't pick a non-existing transmit queue */ 2205 ntxq = pkt_dev->odev->real_num_tx_queues; 2206 2207 if (ntxq <= pkt_dev->queue_map_min) { 2208 pr_warn("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", 2209 pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq, 2210 pkt_dev->odevname); 2211 pkt_dev->queue_map_min = (ntxq ?: 1) - 1; 2212 } 2213 if (pkt_dev->queue_map_max >= ntxq) { 2214 pr_warn("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n", 2215 pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq, 2216 pkt_dev->odevname); 2217 pkt_dev->queue_map_max = (ntxq ?: 1) - 1; 2218 } 2219 2220 /* Default to the interface's mac if not explicitly set. */ 2221 2222 if (is_zero_ether_addr(pkt_dev->src_mac)) 2223 ether_addr_copy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr); 2224 2225 /* Set up Dest MAC */ 2226 ether_addr_copy(&(pkt_dev->hh[0]), pkt_dev->dst_mac); 2227 2228 if (pkt_dev->flags & F_IPV6) { 2229 int i, set = 0, err = 1; 2230 struct inet6_dev *idev; 2231 2232 if (pkt_dev->min_pkt_size == 0) { 2233 pkt_dev->min_pkt_size = 14 + sizeof(struct ipv6hdr) 2234 + sizeof(struct udphdr) 2235 + sizeof(struct pktgen_hdr) 2236 + pkt_dev->pkt_overhead; 2237 } 2238 2239 for (i = 0; i < sizeof(struct in6_addr); i++) 2240 if (pkt_dev->cur_in6_saddr.s6_addr[i]) { 2241 set = 1; 2242 break; 2243 } 2244 2245 if (!set) { 2246 2247 /* 2248 * Use linklevel address if unconfigured. 2249 * 2250 * use ipv6_get_lladdr if/when it's get exported 2251 */ 2252 2253 rcu_read_lock(); 2254 idev = __in6_dev_get(pkt_dev->odev); 2255 if (idev) { 2256 struct inet6_ifaddr *ifp; 2257 2258 read_lock_bh(&idev->lock); 2259 list_for_each_entry(ifp, &idev->addr_list, if_list) { 2260 if ((ifp->scope & IFA_LINK) && 2261 !(ifp->flags & IFA_F_TENTATIVE)) { 2262 pkt_dev->cur_in6_saddr = ifp->addr; 2263 err = 0; 2264 break; 2265 } 2266 } 2267 read_unlock_bh(&idev->lock); 2268 } 2269 rcu_read_unlock(); 2270 if (err) 2271 pr_err("ERROR: IPv6 link address not available\n"); 2272 } 2273 } else { 2274 if (pkt_dev->min_pkt_size == 0) { 2275 pkt_dev->min_pkt_size = 14 + sizeof(struct iphdr) 2276 + sizeof(struct udphdr) 2277 + sizeof(struct pktgen_hdr) 2278 + pkt_dev->pkt_overhead; 2279 } 2280 2281 pkt_dev->saddr_min = 0; 2282 pkt_dev->saddr_max = 0; 2283 if (strlen(pkt_dev->src_min) == 0) { 2284 2285 struct in_device *in_dev; 2286 2287 rcu_read_lock(); 2288 in_dev = __in_dev_get_rcu(pkt_dev->odev); 2289 if (in_dev) { 2290 const struct in_ifaddr *ifa; 2291 2292 ifa = rcu_dereference(in_dev->ifa_list); 2293 if (ifa) { 2294 pkt_dev->saddr_min = ifa->ifa_address; 2295 pkt_dev->saddr_max = pkt_dev->saddr_min; 2296 } 2297 } 2298 rcu_read_unlock(); 2299 } else { 2300 pkt_dev->saddr_min = in_aton(pkt_dev->src_min); 2301 pkt_dev->saddr_max = in_aton(pkt_dev->src_max); 2302 } 2303 2304 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); 2305 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); 2306 } 2307 /* Initialize current values. */ 2308 pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size; 2309 if (pkt_dev->min_pkt_size > pkt_dev->max_pkt_size) 2310 pkt_dev->max_pkt_size = pkt_dev->min_pkt_size; 2311 2312 pkt_dev->cur_dst_mac_offset = 0; 2313 pkt_dev->cur_src_mac_offset = 0; 2314 pkt_dev->cur_saddr = pkt_dev->saddr_min; 2315 pkt_dev->cur_daddr = pkt_dev->daddr_min; 2316 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min; 2317 pkt_dev->cur_udp_src = pkt_dev->udp_src_min; 2318 pkt_dev->nflows = 0; 2319 } 2320 2321 2322 static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) 2323 { 2324 ktime_t start_time, end_time; 2325 s64 remaining; 2326 struct hrtimer_sleeper t; 2327 2328 hrtimer_setup_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 2329 hrtimer_set_expires(&t.timer, spin_until); 2330 2331 remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); 2332 if (remaining <= 0) 2333 goto out; 2334 2335 start_time = ktime_get(); 2336 if (remaining < 100000) { 2337 /* for small delays (<100us), just loop until limit is reached */ 2338 do { 2339 end_time = ktime_get(); 2340 } while (ktime_compare(end_time, spin_until) < 0); 2341 } else { 2342 do { 2343 set_current_state(TASK_INTERRUPTIBLE); 2344 hrtimer_sleeper_start_expires(&t, HRTIMER_MODE_ABS); 2345 2346 if (likely(t.task)) 2347 schedule(); 2348 2349 hrtimer_cancel(&t.timer); 2350 } while (t.task && pkt_dev->running && !signal_pending(current)); 2351 __set_current_state(TASK_RUNNING); 2352 end_time = ktime_get(); 2353 } 2354 2355 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2356 out: 2357 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); 2358 destroy_hrtimer_on_stack(&t.timer); 2359 } 2360 2361 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) 2362 { 2363 pkt_dev->pkt_overhead = 0; 2364 pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32); 2365 pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev); 2366 pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev); 2367 } 2368 2369 static inline int f_seen(const struct pktgen_dev *pkt_dev, int flow) 2370 { 2371 return !!(pkt_dev->flows[flow].flags & F_INIT); 2372 } 2373 2374 static inline int f_pick(struct pktgen_dev *pkt_dev) 2375 { 2376 int flow = pkt_dev->curfl; 2377 2378 if (pkt_dev->flags & F_FLOW_SEQ) { 2379 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { 2380 /* reset time */ 2381 pkt_dev->flows[flow].count = 0; 2382 pkt_dev->flows[flow].flags = 0; 2383 pkt_dev->curfl += 1; 2384 if (pkt_dev->curfl >= pkt_dev->cflows) 2385 pkt_dev->curfl = 0; /*reset */ 2386 } 2387 } else { 2388 flow = get_random_u32_below(pkt_dev->cflows); 2389 pkt_dev->curfl = flow; 2390 2391 if (pkt_dev->flows[flow].count > pkt_dev->lflow) { 2392 pkt_dev->flows[flow].count = 0; 2393 pkt_dev->flows[flow].flags = 0; 2394 } 2395 } 2396 2397 return pkt_dev->curfl; 2398 } 2399 2400 2401 /* If there was already an IPSEC SA, we keep it as is, else 2402 * we go look for it ... 2403 */ 2404 #define DUMMY_MARK 0 2405 static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) 2406 { 2407 #ifdef CONFIG_XFRM 2408 struct xfrm_state *x = pkt_dev->flows[flow].x; 2409 struct pktgen_net *pn = net_generic(dev_net(pkt_dev->odev), pg_net_id); 2410 2411 if (!x) { 2412 2413 if (pkt_dev->spi) { 2414 /* We need as quick as possible to find the right SA 2415 * Searching with minimum criteria to achieve, this. 2416 */ 2417 x = xfrm_state_lookup_byspi(pn->net, htonl(pkt_dev->spi), AF_INET); 2418 } else { 2419 /* slow path: we don't already have xfrm_state */ 2420 x = xfrm_stateonly_find(pn->net, DUMMY_MARK, 0, 2421 (xfrm_address_t *)&pkt_dev->cur_daddr, 2422 (xfrm_address_t *)&pkt_dev->cur_saddr, 2423 AF_INET, 2424 pkt_dev->ipsmode, 2425 pkt_dev->ipsproto, 0); 2426 } 2427 if (x) { 2428 pkt_dev->flows[flow].x = x; 2429 set_pkt_overhead(pkt_dev); 2430 pkt_dev->pkt_overhead += x->props.header_len; 2431 } 2432 2433 } 2434 #endif 2435 } 2436 static void set_cur_queue_map(struct pktgen_dev *pkt_dev) 2437 { 2438 if (pkt_dev->flags & F_QUEUE_MAP_CPU) 2439 pkt_dev->cur_queue_map = smp_processor_id(); 2440 2441 else if (pkt_dev->queue_map_min <= pkt_dev->queue_map_max) { 2442 __u16 t; 2443 2444 if (pkt_dev->flags & F_QUEUE_MAP_RND) { 2445 t = get_random_u32_inclusive(pkt_dev->queue_map_min, 2446 pkt_dev->queue_map_max); 2447 } else { 2448 t = pkt_dev->cur_queue_map + 1; 2449 if (t > pkt_dev->queue_map_max) 2450 t = pkt_dev->queue_map_min; 2451 } 2452 pkt_dev->cur_queue_map = t; 2453 } 2454 pkt_dev->cur_queue_map = pkt_dev->cur_queue_map % pkt_dev->odev->real_num_tx_queues; 2455 } 2456 2457 /* Increment/randomize headers according to flags and current values 2458 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst 2459 */ 2460 static void mod_cur_headers(struct pktgen_dev *pkt_dev) 2461 { 2462 __u32 imn; 2463 __u32 imx; 2464 int flow = 0; 2465 2466 if (pkt_dev->cflows) 2467 flow = f_pick(pkt_dev); 2468 2469 /* Deal with source MAC */ 2470 if (pkt_dev->src_mac_count > 1) { 2471 __u32 mc; 2472 __u32 tmp; 2473 2474 if (pkt_dev->flags & F_MACSRC_RND) 2475 mc = get_random_u32_below(pkt_dev->src_mac_count); 2476 else { 2477 mc = pkt_dev->cur_src_mac_offset++; 2478 if (pkt_dev->cur_src_mac_offset >= 2479 pkt_dev->src_mac_count) 2480 pkt_dev->cur_src_mac_offset = 0; 2481 } 2482 2483 tmp = pkt_dev->src_mac[5] + (mc & 0xFF); 2484 pkt_dev->hh[11] = tmp; 2485 tmp = (pkt_dev->src_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8)); 2486 pkt_dev->hh[10] = tmp; 2487 tmp = (pkt_dev->src_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8)); 2488 pkt_dev->hh[9] = tmp; 2489 tmp = (pkt_dev->src_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8)); 2490 pkt_dev->hh[8] = tmp; 2491 tmp = (pkt_dev->src_mac[1] + (tmp >> 8)); 2492 pkt_dev->hh[7] = tmp; 2493 } 2494 2495 /* Deal with Destination MAC */ 2496 if (pkt_dev->dst_mac_count > 1) { 2497 __u32 mc; 2498 __u32 tmp; 2499 2500 if (pkt_dev->flags & F_MACDST_RND) 2501 mc = get_random_u32_below(pkt_dev->dst_mac_count); 2502 2503 else { 2504 mc = pkt_dev->cur_dst_mac_offset++; 2505 if (pkt_dev->cur_dst_mac_offset >= 2506 pkt_dev->dst_mac_count) { 2507 pkt_dev->cur_dst_mac_offset = 0; 2508 } 2509 } 2510 2511 tmp = pkt_dev->dst_mac[5] + (mc & 0xFF); 2512 pkt_dev->hh[5] = tmp; 2513 tmp = (pkt_dev->dst_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8)); 2514 pkt_dev->hh[4] = tmp; 2515 tmp = (pkt_dev->dst_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8)); 2516 pkt_dev->hh[3] = tmp; 2517 tmp = (pkt_dev->dst_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8)); 2518 pkt_dev->hh[2] = tmp; 2519 tmp = (pkt_dev->dst_mac[1] + (tmp >> 8)); 2520 pkt_dev->hh[1] = tmp; 2521 } 2522 2523 if (pkt_dev->flags & F_MPLS_RND) { 2524 unsigned int i; 2525 2526 for (i = 0; i < pkt_dev->nr_labels; i++) 2527 if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) 2528 pkt_dev->labels[i] = MPLS_STACK_BOTTOM | 2529 ((__force __be32)get_random_u32() & 2530 htonl(0x000fffff)); 2531 } 2532 2533 if ((pkt_dev->flags & F_VID_RND) && (pkt_dev->vlan_id != 0xffff)) { 2534 pkt_dev->vlan_id = get_random_u32_below(4096); 2535 } 2536 2537 if ((pkt_dev->flags & F_SVID_RND) && (pkt_dev->svlan_id != 0xffff)) { 2538 pkt_dev->svlan_id = get_random_u32_below(4096); 2539 } 2540 2541 if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) { 2542 if (pkt_dev->flags & F_UDPSRC_RND) 2543 pkt_dev->cur_udp_src = get_random_u32_inclusive(pkt_dev->udp_src_min, 2544 pkt_dev->udp_src_max - 1); 2545 2546 else { 2547 pkt_dev->cur_udp_src++; 2548 if (pkt_dev->cur_udp_src >= pkt_dev->udp_src_max) 2549 pkt_dev->cur_udp_src = pkt_dev->udp_src_min; 2550 } 2551 } 2552 2553 if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) { 2554 if (pkt_dev->flags & F_UDPDST_RND) { 2555 pkt_dev->cur_udp_dst = get_random_u32_inclusive(pkt_dev->udp_dst_min, 2556 pkt_dev->udp_dst_max - 1); 2557 } else { 2558 pkt_dev->cur_udp_dst++; 2559 if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max) 2560 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min; 2561 } 2562 } 2563 2564 if (!(pkt_dev->flags & F_IPV6)) { 2565 2566 imn = ntohl(pkt_dev->saddr_min); 2567 imx = ntohl(pkt_dev->saddr_max); 2568 if (imn < imx) { 2569 __u32 t; 2570 2571 if (pkt_dev->flags & F_IPSRC_RND) 2572 t = get_random_u32_inclusive(imn, imx - 1); 2573 else { 2574 t = ntohl(pkt_dev->cur_saddr); 2575 t++; 2576 if (t > imx) 2577 t = imn; 2578 2579 } 2580 pkt_dev->cur_saddr = htonl(t); 2581 } 2582 2583 if (pkt_dev->cflows && f_seen(pkt_dev, flow)) { 2584 pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr; 2585 } else { 2586 imn = ntohl(pkt_dev->daddr_min); 2587 imx = ntohl(pkt_dev->daddr_max); 2588 if (imn < imx) { 2589 __u32 t; 2590 __be32 s; 2591 2592 if (pkt_dev->flags & F_IPDST_RND) { 2593 2594 do { 2595 t = get_random_u32_inclusive(imn, imx - 1); 2596 s = htonl(t); 2597 } while (ipv4_is_loopback(s) || 2598 ipv4_is_multicast(s) || 2599 ipv4_is_lbcast(s) || 2600 ipv4_is_zeronet(s) || 2601 ipv4_is_local_multicast(s)); 2602 pkt_dev->cur_daddr = s; 2603 } else { 2604 t = ntohl(pkt_dev->cur_daddr); 2605 t++; 2606 if (t > imx) { 2607 t = imn; 2608 } 2609 pkt_dev->cur_daddr = htonl(t); 2610 } 2611 } 2612 if (pkt_dev->cflows) { 2613 pkt_dev->flows[flow].flags |= F_INIT; 2614 pkt_dev->flows[flow].cur_daddr = 2615 pkt_dev->cur_daddr; 2616 if (pkt_dev->flags & F_IPSEC) 2617 get_ipsec_sa(pkt_dev, flow); 2618 pkt_dev->nflows++; 2619 } 2620 } 2621 } else { /* IPV6 * */ 2622 2623 if (!ipv6_addr_any(&pkt_dev->min_in6_daddr)) { 2624 int i; 2625 2626 /* Only random destinations yet */ 2627 2628 for (i = 0; i < 4; i++) { 2629 pkt_dev->cur_in6_daddr.s6_addr32[i] = 2630 (((__force __be32)get_random_u32() | 2631 pkt_dev->min_in6_daddr.s6_addr32[i]) & 2632 pkt_dev->max_in6_daddr.s6_addr32[i]); 2633 } 2634 } 2635 } 2636 2637 if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) { 2638 __u32 t; 2639 2640 if (pkt_dev->flags & F_TXSIZE_RND) { 2641 t = get_random_u32_inclusive(pkt_dev->min_pkt_size, 2642 pkt_dev->max_pkt_size - 1); 2643 } else { 2644 t = pkt_dev->cur_pkt_size + 1; 2645 if (t > pkt_dev->max_pkt_size) 2646 t = pkt_dev->min_pkt_size; 2647 } 2648 pkt_dev->cur_pkt_size = t; 2649 } else if (pkt_dev->n_imix_entries > 0) { 2650 struct imix_pkt *entry; 2651 __u32 t = get_random_u32_below(IMIX_PRECISION); 2652 __u8 entry_index = pkt_dev->imix_distribution[t]; 2653 2654 entry = &pkt_dev->imix_entries[entry_index]; 2655 entry->count_so_far++; 2656 pkt_dev->cur_pkt_size = entry->size; 2657 } 2658 2659 set_cur_queue_map(pkt_dev); 2660 2661 pkt_dev->flows[flow].count++; 2662 } 2663 2664 static void fill_imix_distribution(struct pktgen_dev *pkt_dev) 2665 { 2666 int cumulative_probabilites[MAX_IMIX_ENTRIES]; 2667 int j = 0; 2668 __u64 cumulative_prob = 0; 2669 __u64 total_weight = 0; 2670 int i = 0; 2671 2672 for (i = 0; i < pkt_dev->n_imix_entries; i++) 2673 total_weight += pkt_dev->imix_entries[i].weight; 2674 2675 /* Fill cumulative_probabilites with sum of normalized probabilities */ 2676 for (i = 0; i < pkt_dev->n_imix_entries - 1; i++) { 2677 cumulative_prob += div64_u64(pkt_dev->imix_entries[i].weight * 2678 IMIX_PRECISION, 2679 total_weight); 2680 cumulative_probabilites[i] = cumulative_prob; 2681 } 2682 cumulative_probabilites[pkt_dev->n_imix_entries - 1] = 100; 2683 2684 for (i = 0; i < IMIX_PRECISION; i++) { 2685 if (i == cumulative_probabilites[j]) 2686 j++; 2687 pkt_dev->imix_distribution[i] = j; 2688 } 2689 } 2690 2691 #ifdef CONFIG_XFRM 2692 static u32 pktgen_dst_metrics[RTAX_MAX + 1] = { 2693 2694 [RTAX_HOPLIMIT] = 0x5, /* Set a static hoplimit */ 2695 }; 2696 2697 static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev) 2698 { 2699 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; 2700 int err = 0; 2701 struct net *net = dev_net(pkt_dev->odev); 2702 2703 if (!x) 2704 return 0; 2705 /* XXX: we dont support tunnel mode for now until 2706 * we resolve the dst issue 2707 */ 2708 if ((x->props.mode != XFRM_MODE_TRANSPORT) && (pkt_dev->spi == 0)) 2709 return 0; 2710 2711 /* But when user specify an valid SPI, transformation 2712 * supports both transport/tunnel mode + ESP/AH type. 2713 */ 2714 if ((x->props.mode == XFRM_MODE_TUNNEL) && (pkt_dev->spi != 0)) 2715 skb->_skb_refdst = (unsigned long)&pkt_dev->xdst.u.dst | SKB_DST_NOREF; 2716 2717 rcu_read_lock_bh(); 2718 err = pktgen_xfrm_outer_mode_output(x, skb); 2719 rcu_read_unlock_bh(); 2720 if (err) { 2721 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR); 2722 goto error; 2723 } 2724 err = x->type->output(x, skb); 2725 if (err) { 2726 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR); 2727 goto error; 2728 } 2729 spin_lock_bh(&x->lock); 2730 x->curlft.bytes += skb->len; 2731 x->curlft.packets++; 2732 spin_unlock_bh(&x->lock); 2733 error: 2734 return err; 2735 } 2736 2737 static void free_SAs(struct pktgen_dev *pkt_dev) 2738 { 2739 if (pkt_dev->cflows) { 2740 /* let go of the SAs if we have them */ 2741 int i; 2742 2743 for (i = 0; i < pkt_dev->cflows; i++) { 2744 struct xfrm_state *x = pkt_dev->flows[i].x; 2745 2746 if (x) { 2747 xfrm_state_put(x); 2748 pkt_dev->flows[i].x = NULL; 2749 } 2750 } 2751 } 2752 } 2753 2754 static int process_ipsec(struct pktgen_dev *pkt_dev, 2755 struct sk_buff *skb, __be16 protocol) 2756 { 2757 if (pkt_dev->flags & F_IPSEC) { 2758 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; 2759 int nhead = 0; 2760 2761 if (x) { 2762 struct ethhdr *eth; 2763 struct iphdr *iph; 2764 int ret; 2765 2766 nhead = x->props.header_len - skb_headroom(skb); 2767 if (nhead > 0) { 2768 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); 2769 if (ret < 0) { 2770 pr_err("Error expanding ipsec packet %d\n", 2771 ret); 2772 goto err; 2773 } 2774 } 2775 2776 /* ipsec is not expecting ll header */ 2777 skb_pull(skb, ETH_HLEN); 2778 ret = pktgen_output_ipsec(skb, pkt_dev); 2779 if (ret) { 2780 pr_err("Error creating ipsec packet %d\n", ret); 2781 goto err; 2782 } 2783 /* restore ll */ 2784 eth = skb_push(skb, ETH_HLEN); 2785 memcpy(eth, pkt_dev->hh, 2 * ETH_ALEN); 2786 eth->h_proto = protocol; 2787 2788 /* Update IPv4 header len as well as checksum value */ 2789 iph = ip_hdr(skb); 2790 iph->tot_len = htons(skb->len - ETH_HLEN); 2791 ip_send_check(iph); 2792 } 2793 } 2794 return 1; 2795 err: 2796 kfree_skb(skb); 2797 return 0; 2798 } 2799 #endif 2800 2801 static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) 2802 { 2803 unsigned int i; 2804 2805 for (i = 0; i < pkt_dev->nr_labels; i++) 2806 *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; 2807 2808 mpls--; 2809 *mpls |= MPLS_STACK_BOTTOM; 2810 } 2811 2812 static inline __be16 build_tci(unsigned int id, unsigned int cfi, 2813 unsigned int prio) 2814 { 2815 return htons(id | (cfi << 12) | (prio << 13)); 2816 } 2817 2818 static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb, 2819 int datalen) 2820 { 2821 struct timespec64 timestamp; 2822 struct pktgen_hdr *pgh; 2823 2824 pgh = skb_put(skb, sizeof(*pgh)); 2825 datalen -= sizeof(*pgh); 2826 2827 if (pkt_dev->nfrags <= 0) { 2828 skb_put_zero(skb, datalen); 2829 } else { 2830 int frags = pkt_dev->nfrags; 2831 int i, len; 2832 int frag_len; 2833 2834 2835 if (frags > MAX_SKB_FRAGS) 2836 frags = MAX_SKB_FRAGS; 2837 len = datalen - frags * PAGE_SIZE; 2838 if (len > 0) { 2839 skb_put_zero(skb, len); 2840 datalen = frags * PAGE_SIZE; 2841 } 2842 2843 i = 0; 2844 frag_len = (datalen/frags) < PAGE_SIZE ? 2845 (datalen/frags) : PAGE_SIZE; 2846 while (datalen > 0) { 2847 if (unlikely(!pkt_dev->page)) { 2848 int node = numa_node_id(); 2849 2850 if (pkt_dev->node >= 0 && (pkt_dev->flags & F_NODE)) 2851 node = pkt_dev->node; 2852 pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); 2853 if (!pkt_dev->page) 2854 break; 2855 } 2856 get_page(pkt_dev->page); 2857 2858 /*last fragment, fill rest of data*/ 2859 if (i == (frags - 1)) 2860 skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[i], 2861 pkt_dev->page, 0, 2862 (datalen < PAGE_SIZE ? 2863 datalen : PAGE_SIZE)); 2864 else 2865 skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[i], 2866 pkt_dev->page, 0, frag_len); 2867 2868 datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]); 2869 skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 2870 skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 2871 i++; 2872 skb_shinfo(skb)->nr_frags = i; 2873 } 2874 } 2875 2876 /* Stamp the time, and sequence number, 2877 * convert them to network byte order 2878 */ 2879 pgh->pgh_magic = htonl(PKTGEN_MAGIC); 2880 pgh->seq_num = htonl(pkt_dev->seq_num); 2881 2882 if (pkt_dev->flags & F_NO_TIMESTAMP) { 2883 pgh->tv_sec = 0; 2884 pgh->tv_usec = 0; 2885 } else { 2886 /* 2887 * pgh->tv_sec wraps in y2106 when interpreted as unsigned 2888 * as done by wireshark, or y2038 when interpreted as signed. 2889 * This is probably harmless, but if anyone wants to improve 2890 * it, we could introduce a variant that puts 64-bit nanoseconds 2891 * into the respective header bytes. 2892 * This would also be slightly faster to read. 2893 */ 2894 ktime_get_real_ts64(×tamp); 2895 pgh->tv_sec = htonl(timestamp.tv_sec); 2896 pgh->tv_usec = htonl(timestamp.tv_nsec / NSEC_PER_USEC); 2897 } 2898 } 2899 2900 static struct sk_buff *pktgen_alloc_skb(struct net_device *dev, 2901 struct pktgen_dev *pkt_dev) 2902 { 2903 unsigned int extralen = LL_RESERVED_SPACE(dev); 2904 struct sk_buff *skb = NULL; 2905 unsigned int size; 2906 2907 size = pkt_dev->cur_pkt_size + 64 + extralen + pkt_dev->pkt_overhead; 2908 if (pkt_dev->flags & F_NODE) { 2909 int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id(); 2910 2911 skb = __alloc_skb(NET_SKB_PAD + size, GFP_NOWAIT, 0, node); 2912 if (likely(skb)) { 2913 skb_reserve(skb, NET_SKB_PAD); 2914 skb->dev = dev; 2915 } 2916 } else { 2917 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT); 2918 } 2919 2920 /* the caller pre-fetches from skb->data and reserves for the mac hdr */ 2921 if (likely(skb)) 2922 skb_reserve(skb, extralen - 16); 2923 2924 return skb; 2925 } 2926 2927 static struct sk_buff *fill_packet_ipv4(struct net_device *odev, 2928 struct pktgen_dev *pkt_dev) 2929 { 2930 struct sk_buff *skb = NULL; 2931 __u8 *eth; 2932 struct udphdr *udph; 2933 int datalen, iplen; 2934 struct iphdr *iph; 2935 __be16 protocol = htons(ETH_P_IP); 2936 __be32 *mpls; 2937 __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */ 2938 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 2939 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 2940 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 2941 u16 queue_map; 2942 2943 if (pkt_dev->nr_labels) 2944 protocol = htons(ETH_P_MPLS_UC); 2945 2946 if (pkt_dev->vlan_id != 0xffff) 2947 protocol = htons(ETH_P_8021Q); 2948 2949 /* Update any of the values, used when we're incrementing various 2950 * fields. 2951 */ 2952 mod_cur_headers(pkt_dev); 2953 queue_map = pkt_dev->cur_queue_map; 2954 2955 skb = pktgen_alloc_skb(odev, pkt_dev); 2956 if (!skb) { 2957 sprintf(pkt_dev->result, "No memory"); 2958 return NULL; 2959 } 2960 2961 prefetchw(skb->data); 2962 skb_reserve(skb, 16); 2963 2964 /* Reserve for ethernet and IP header */ 2965 eth = skb_push(skb, 14); 2966 mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32)); 2967 if (pkt_dev->nr_labels) 2968 mpls_push(mpls, pkt_dev); 2969 2970 if (pkt_dev->vlan_id != 0xffff) { 2971 if (pkt_dev->svlan_id != 0xffff) { 2972 svlan_tci = skb_put(skb, sizeof(__be16)); 2973 *svlan_tci = build_tci(pkt_dev->svlan_id, 2974 pkt_dev->svlan_cfi, 2975 pkt_dev->svlan_p); 2976 svlan_encapsulated_proto = skb_put(skb, 2977 sizeof(__be16)); 2978 *svlan_encapsulated_proto = htons(ETH_P_8021Q); 2979 } 2980 vlan_tci = skb_put(skb, sizeof(__be16)); 2981 *vlan_tci = build_tci(pkt_dev->vlan_id, 2982 pkt_dev->vlan_cfi, 2983 pkt_dev->vlan_p); 2984 vlan_encapsulated_proto = skb_put(skb, sizeof(__be16)); 2985 *vlan_encapsulated_proto = htons(ETH_P_IP); 2986 } 2987 2988 skb_reset_mac_header(skb); 2989 skb_set_network_header(skb, skb->len); 2990 iph = skb_put(skb, sizeof(struct iphdr)); 2991 2992 skb_set_transport_header(skb, skb->len); 2993 udph = skb_put(skb, sizeof(struct udphdr)); 2994 skb_set_queue_mapping(skb, queue_map); 2995 skb->priority = pkt_dev->skb_priority; 2996 2997 memcpy(eth, pkt_dev->hh, 12); 2998 *(__be16 *)ð[12] = protocol; 2999 3000 /* Eth + IPh + UDPh + mpls */ 3001 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - 3002 pkt_dev->pkt_overhead; 3003 if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) 3004 datalen = sizeof(struct pktgen_hdr); 3005 3006 udph->source = htons(pkt_dev->cur_udp_src); 3007 udph->dest = htons(pkt_dev->cur_udp_dst); 3008 udph->len = htons(datalen + 8); /* DATA + udphdr */ 3009 udph->check = 0; 3010 3011 iph->ihl = 5; 3012 iph->version = 4; 3013 iph->ttl = 32; 3014 iph->tos = pkt_dev->tos; 3015 iph->protocol = IPPROTO_UDP; /* UDP */ 3016 iph->saddr = pkt_dev->cur_saddr; 3017 iph->daddr = pkt_dev->cur_daddr; 3018 iph->id = htons(pkt_dev->ip_id); 3019 pkt_dev->ip_id++; 3020 iph->frag_off = 0; 3021 iplen = 20 + 8 + datalen; 3022 iph->tot_len = htons(iplen); 3023 ip_send_check(iph); 3024 skb->protocol = protocol; 3025 skb->dev = odev; 3026 skb->pkt_type = PACKET_HOST; 3027 3028 pktgen_finalize_skb(pkt_dev, skb, datalen); 3029 3030 if (!(pkt_dev->flags & F_UDPCSUM)) { 3031 skb->ip_summed = CHECKSUM_NONE; 3032 } else if (odev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM)) { 3033 skb->ip_summed = CHECKSUM_PARTIAL; 3034 skb->csum = 0; 3035 udp4_hwcsum(skb, iph->saddr, iph->daddr); 3036 } else { 3037 __wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0); 3038 3039 /* add protocol-dependent pseudo-header */ 3040 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 3041 datalen + 8, IPPROTO_UDP, csum); 3042 3043 if (udph->check == 0) 3044 udph->check = CSUM_MANGLED_0; 3045 } 3046 3047 #ifdef CONFIG_XFRM 3048 if (!process_ipsec(pkt_dev, skb, protocol)) 3049 return NULL; 3050 #endif 3051 3052 return skb; 3053 } 3054 3055 static struct sk_buff *fill_packet_ipv6(struct net_device *odev, 3056 struct pktgen_dev *pkt_dev) 3057 { 3058 struct sk_buff *skb = NULL; 3059 __u8 *eth; 3060 struct udphdr *udph; 3061 int datalen, udplen; 3062 struct ipv6hdr *iph; 3063 __be16 protocol = htons(ETH_P_IPV6); 3064 __be32 *mpls; 3065 __be16 *vlan_tci = NULL; /* Encapsulates priority and VLAN ID */ 3066 __be16 *vlan_encapsulated_proto = NULL; /* packet type ID field (or len) for VLAN tag */ 3067 __be16 *svlan_tci = NULL; /* Encapsulates priority and SVLAN ID */ 3068 __be16 *svlan_encapsulated_proto = NULL; /* packet type ID field (or len) for SVLAN tag */ 3069 u16 queue_map; 3070 3071 if (pkt_dev->nr_labels) 3072 protocol = htons(ETH_P_MPLS_UC); 3073 3074 if (pkt_dev->vlan_id != 0xffff) 3075 protocol = htons(ETH_P_8021Q); 3076 3077 /* Update any of the values, used when we're incrementing various 3078 * fields. 3079 */ 3080 mod_cur_headers(pkt_dev); 3081 queue_map = pkt_dev->cur_queue_map; 3082 3083 skb = pktgen_alloc_skb(odev, pkt_dev); 3084 if (!skb) { 3085 sprintf(pkt_dev->result, "No memory"); 3086 return NULL; 3087 } 3088 3089 prefetchw(skb->data); 3090 skb_reserve(skb, 16); 3091 3092 /* Reserve for ethernet and IP header */ 3093 eth = skb_push(skb, 14); 3094 mpls = skb_put(skb, pkt_dev->nr_labels * sizeof(__u32)); 3095 if (pkt_dev->nr_labels) 3096 mpls_push(mpls, pkt_dev); 3097 3098 if (pkt_dev->vlan_id != 0xffff) { 3099 if (pkt_dev->svlan_id != 0xffff) { 3100 svlan_tci = skb_put(skb, sizeof(__be16)); 3101 *svlan_tci = build_tci(pkt_dev->svlan_id, 3102 pkt_dev->svlan_cfi, 3103 pkt_dev->svlan_p); 3104 svlan_encapsulated_proto = skb_put(skb, 3105 sizeof(__be16)); 3106 *svlan_encapsulated_proto = htons(ETH_P_8021Q); 3107 } 3108 vlan_tci = skb_put(skb, sizeof(__be16)); 3109 *vlan_tci = build_tci(pkt_dev->vlan_id, 3110 pkt_dev->vlan_cfi, 3111 pkt_dev->vlan_p); 3112 vlan_encapsulated_proto = skb_put(skb, sizeof(__be16)); 3113 *vlan_encapsulated_proto = htons(ETH_P_IPV6); 3114 } 3115 3116 skb_reset_mac_header(skb); 3117 skb_set_network_header(skb, skb->len); 3118 iph = skb_put(skb, sizeof(struct ipv6hdr)); 3119 3120 skb_set_transport_header(skb, skb->len); 3121 udph = skb_put(skb, sizeof(struct udphdr)); 3122 skb_set_queue_mapping(skb, queue_map); 3123 skb->priority = pkt_dev->skb_priority; 3124 3125 memcpy(eth, pkt_dev->hh, 12); 3126 *(__be16 *) ð[12] = protocol; 3127 3128 /* Eth + IPh + UDPh + mpls */ 3129 datalen = pkt_dev->cur_pkt_size - 14 - 3130 sizeof(struct ipv6hdr) - sizeof(struct udphdr) - 3131 pkt_dev->pkt_overhead; 3132 3133 if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) { 3134 datalen = sizeof(struct pktgen_hdr); 3135 net_info_ratelimited("increased datalen to %d\n", datalen); 3136 } 3137 3138 udplen = datalen + sizeof(struct udphdr); 3139 udph->source = htons(pkt_dev->cur_udp_src); 3140 udph->dest = htons(pkt_dev->cur_udp_dst); 3141 udph->len = htons(udplen); 3142 udph->check = 0; 3143 3144 *(__be32 *) iph = htonl(0x60000000); /* Version + flow */ 3145 3146 if (pkt_dev->traffic_class) { 3147 /* Version + traffic class + flow (0) */ 3148 *(__be32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20)); 3149 } 3150 3151 iph->hop_limit = 32; 3152 3153 iph->payload_len = htons(udplen); 3154 iph->nexthdr = IPPROTO_UDP; 3155 3156 iph->daddr = pkt_dev->cur_in6_daddr; 3157 iph->saddr = pkt_dev->cur_in6_saddr; 3158 3159 skb->protocol = protocol; 3160 skb->dev = odev; 3161 skb->pkt_type = PACKET_HOST; 3162 3163 pktgen_finalize_skb(pkt_dev, skb, datalen); 3164 3165 if (!(pkt_dev->flags & F_UDPCSUM)) { 3166 skb->ip_summed = CHECKSUM_NONE; 3167 } else if (odev->features & (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM)) { 3168 skb->ip_summed = CHECKSUM_PARTIAL; 3169 skb->csum_start = skb_transport_header(skb) - skb->head; 3170 skb->csum_offset = offsetof(struct udphdr, check); 3171 udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0); 3172 } else { 3173 __wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0); 3174 3175 /* add protocol-dependent pseudo-header */ 3176 udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum); 3177 3178 if (udph->check == 0) 3179 udph->check = CSUM_MANGLED_0; 3180 } 3181 3182 return skb; 3183 } 3184 3185 static struct sk_buff *fill_packet(struct net_device *odev, 3186 struct pktgen_dev *pkt_dev) 3187 { 3188 if (pkt_dev->flags & F_IPV6) 3189 return fill_packet_ipv6(odev, pkt_dev); 3190 else 3191 return fill_packet_ipv4(odev, pkt_dev); 3192 } 3193 3194 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev) 3195 { 3196 pkt_dev->seq_num = 1; 3197 pkt_dev->idle_acc = 0; 3198 pkt_dev->sofar = 0; 3199 pkt_dev->tx_bytes = 0; 3200 pkt_dev->errors = 0; 3201 } 3202 3203 /* Set up structure for sending pkts, clear counters */ 3204 3205 static void pktgen_run(struct pktgen_thread *t) 3206 { 3207 struct pktgen_dev *pkt_dev; 3208 int started = 0; 3209 3210 func_enter(); 3211 3212 rcu_read_lock(); 3213 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) { 3214 3215 /* 3216 * setup odev and create initial packet. 3217 */ 3218 pktgen_setup_inject(pkt_dev); 3219 3220 if (pkt_dev->odev) { 3221 pktgen_clear_counters(pkt_dev); 3222 pkt_dev->skb = NULL; 3223 pkt_dev->started_at = pkt_dev->next_tx = ktime_get(); 3224 3225 set_pkt_overhead(pkt_dev); 3226 3227 strscpy(pkt_dev->result, "Starting"); 3228 pkt_dev->running = 1; /* Cranke yeself! */ 3229 started++; 3230 } else 3231 strscpy(pkt_dev->result, "Error starting"); 3232 } 3233 rcu_read_unlock(); 3234 if (started) 3235 t->control &= ~(T_STOP); 3236 } 3237 3238 static void pktgen_handle_all_threads(struct pktgen_net *pn, u32 flags) 3239 { 3240 struct pktgen_thread *t; 3241 3242 mutex_lock(&pktgen_thread_lock); 3243 3244 list_for_each_entry(t, &pn->pktgen_threads, th_list) 3245 t->control |= (flags); 3246 3247 mutex_unlock(&pktgen_thread_lock); 3248 } 3249 3250 static void pktgen_stop_all_threads(struct pktgen_net *pn) 3251 { 3252 func_enter(); 3253 3254 pktgen_handle_all_threads(pn, T_STOP); 3255 } 3256 3257 static int thread_is_running(const struct pktgen_thread *t) 3258 { 3259 const struct pktgen_dev *pkt_dev; 3260 3261 rcu_read_lock(); 3262 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) 3263 if (pkt_dev->running) { 3264 rcu_read_unlock(); 3265 return 1; 3266 } 3267 rcu_read_unlock(); 3268 return 0; 3269 } 3270 3271 static int pktgen_wait_thread_run(struct pktgen_thread *t) 3272 { 3273 while (thread_is_running(t)) { 3274 3275 /* note: 't' will still be around even after the unlock/lock 3276 * cycle because pktgen_thread threads are only cleared at 3277 * net exit 3278 */ 3279 mutex_unlock(&pktgen_thread_lock); 3280 msleep_interruptible(100); 3281 mutex_lock(&pktgen_thread_lock); 3282 3283 if (signal_pending(current)) 3284 goto signal; 3285 } 3286 return 1; 3287 signal: 3288 return 0; 3289 } 3290 3291 static int pktgen_wait_all_threads_run(struct pktgen_net *pn) 3292 { 3293 struct pktgen_thread *t; 3294 int sig = 1; 3295 3296 /* prevent from racing with rmmod */ 3297 if (!try_module_get(THIS_MODULE)) 3298 return sig; 3299 3300 mutex_lock(&pktgen_thread_lock); 3301 3302 list_for_each_entry(t, &pn->pktgen_threads, th_list) { 3303 sig = pktgen_wait_thread_run(t); 3304 if (sig == 0) 3305 break; 3306 } 3307 3308 if (sig == 0) 3309 list_for_each_entry(t, &pn->pktgen_threads, th_list) 3310 t->control |= (T_STOP); 3311 3312 mutex_unlock(&pktgen_thread_lock); 3313 module_put(THIS_MODULE); 3314 return sig; 3315 } 3316 3317 static void pktgen_run_all_threads(struct pktgen_net *pn) 3318 { 3319 func_enter(); 3320 3321 pktgen_handle_all_threads(pn, T_RUN); 3322 3323 /* Propagate thread->control */ 3324 schedule_timeout_interruptible(msecs_to_jiffies(125)); 3325 3326 pktgen_wait_all_threads_run(pn); 3327 } 3328 3329 static void pktgen_reset_all_threads(struct pktgen_net *pn) 3330 { 3331 func_enter(); 3332 3333 pktgen_handle_all_threads(pn, T_REMDEVALL); 3334 3335 /* Propagate thread->control */ 3336 schedule_timeout_interruptible(msecs_to_jiffies(125)); 3337 3338 pktgen_wait_all_threads_run(pn); 3339 } 3340 3341 static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) 3342 { 3343 __u64 bps, mbps, pps; 3344 char *p = pkt_dev->result; 3345 ktime_t elapsed = ktime_sub(pkt_dev->stopped_at, 3346 pkt_dev->started_at); 3347 ktime_t idle = ns_to_ktime(pkt_dev->idle_acc); 3348 3349 p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n", 3350 (unsigned long long)ktime_to_us(elapsed), 3351 (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)), 3352 (unsigned long long)ktime_to_us(idle), 3353 (unsigned long long)pkt_dev->sofar, 3354 pkt_dev->cur_pkt_size, nr_frags); 3355 3356 pps = div64_u64(pkt_dev->sofar * NSEC_PER_SEC, 3357 ktime_to_ns(elapsed)); 3358 3359 if (pkt_dev->n_imix_entries > 0) { 3360 int i; 3361 struct imix_pkt *entry; 3362 3363 bps = 0; 3364 for (i = 0; i < pkt_dev->n_imix_entries; i++) { 3365 entry = &pkt_dev->imix_entries[i]; 3366 bps += entry->size * entry->count_so_far; 3367 } 3368 bps = div64_u64(bps * 8 * NSEC_PER_SEC, ktime_to_ns(elapsed)); 3369 } else { 3370 bps = pps * 8 * pkt_dev->cur_pkt_size; 3371 } 3372 3373 mbps = bps; 3374 do_div(mbps, 1000000); 3375 p += sprintf(p, " %llupps %lluMb/sec (%llubps) errors: %llu", 3376 (unsigned long long)pps, 3377 (unsigned long long)mbps, 3378 (unsigned long long)bps, 3379 (unsigned long long)pkt_dev->errors); 3380 } 3381 3382 /* Set stopped-at timer, remove from running list, do counters & statistics */ 3383 static int pktgen_stop_device(struct pktgen_dev *pkt_dev) 3384 { 3385 int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; 3386 3387 if (!pkt_dev->running) { 3388 pr_warn("interface: %s is already stopped\n", 3389 pkt_dev->odevname); 3390 return -EINVAL; 3391 } 3392 3393 pkt_dev->running = 0; 3394 kfree_skb(pkt_dev->skb); 3395 pkt_dev->skb = NULL; 3396 pkt_dev->stopped_at = ktime_get(); 3397 3398 show_results(pkt_dev, nr_frags); 3399 3400 return 0; 3401 } 3402 3403 static struct pktgen_dev *next_to_run(struct pktgen_thread *t) 3404 { 3405 struct pktgen_dev *pkt_dev, *best = NULL; 3406 3407 rcu_read_lock(); 3408 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) { 3409 if (!pkt_dev->running) 3410 continue; 3411 if (best == NULL) 3412 best = pkt_dev; 3413 else if (ktime_compare(pkt_dev->next_tx, best->next_tx) < 0) 3414 best = pkt_dev; 3415 } 3416 rcu_read_unlock(); 3417 3418 return best; 3419 } 3420 3421 static void pktgen_stop(struct pktgen_thread *t) 3422 { 3423 struct pktgen_dev *pkt_dev; 3424 3425 func_enter(); 3426 3427 rcu_read_lock(); 3428 3429 list_for_each_entry_rcu(pkt_dev, &t->if_list, list) { 3430 pktgen_stop_device(pkt_dev); 3431 } 3432 3433 rcu_read_unlock(); 3434 } 3435 3436 /* 3437 * one of our devices needs to be removed - find it 3438 * and remove it 3439 */ 3440 static void pktgen_rem_one_if(struct pktgen_thread *t) 3441 { 3442 struct list_head *q, *n; 3443 struct pktgen_dev *cur; 3444 3445 func_enter(); 3446 3447 list_for_each_safe(q, n, &t->if_list) { 3448 cur = list_entry(q, struct pktgen_dev, list); 3449 3450 if (!cur->removal_mark) 3451 continue; 3452 3453 kfree_skb(cur->skb); 3454 cur->skb = NULL; 3455 3456 pktgen_remove_device(t, cur); 3457 3458 break; 3459 } 3460 } 3461 3462 static void pktgen_rem_all_ifs(struct pktgen_thread *t) 3463 { 3464 struct list_head *q, *n; 3465 struct pktgen_dev *cur; 3466 3467 func_enter(); 3468 3469 /* Remove all devices, free mem */ 3470 3471 list_for_each_safe(q, n, &t->if_list) { 3472 cur = list_entry(q, struct pktgen_dev, list); 3473 3474 kfree_skb(cur->skb); 3475 cur->skb = NULL; 3476 3477 pktgen_remove_device(t, cur); 3478 } 3479 } 3480 3481 static void pktgen_rem_thread(struct pktgen_thread *t) 3482 { 3483 /* Remove from the thread list */ 3484 remove_proc_entry(t->tsk->comm, t->net->proc_dir); 3485 } 3486 3487 static void pktgen_resched(struct pktgen_dev *pkt_dev) 3488 { 3489 ktime_t idle_start = ktime_get(); 3490 3491 schedule(); 3492 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start)); 3493 } 3494 3495 static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev) 3496 { 3497 ktime_t idle_start = ktime_get(); 3498 3499 while (refcount_read(&(pkt_dev->skb->users)) != 1) { 3500 if (signal_pending(current)) 3501 break; 3502 3503 if (need_resched()) 3504 pktgen_resched(pkt_dev); 3505 else 3506 cpu_relax(); 3507 } 3508 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_get(), idle_start)); 3509 } 3510 3511 static void pktgen_xmit(struct pktgen_dev *pkt_dev) 3512 { 3513 bool skb_shared = !!(READ_ONCE(pkt_dev->flags) & F_SHARED); 3514 struct net_device *odev = pkt_dev->odev; 3515 struct netdev_queue *txq; 3516 unsigned int burst = 1; 3517 struct sk_buff *skb; 3518 int clone_skb = 0; 3519 int ret; 3520 3521 /* If 'skb_shared' is false, the read of possible 3522 * new values (if any) for 'burst' and 'clone_skb' will be skipped to 3523 * prevent some concurrent changes from slipping in. And the stabilized 3524 * config will be read in during the next run of pktgen_xmit. 3525 */ 3526 if (skb_shared) { 3527 burst = READ_ONCE(pkt_dev->burst); 3528 clone_skb = READ_ONCE(pkt_dev->clone_skb); 3529 } 3530 3531 /* If device is offline, then don't send */ 3532 if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) { 3533 pktgen_stop_device(pkt_dev); 3534 return; 3535 } 3536 3537 /* This is max DELAY, this has special meaning of 3538 * "never transmit" 3539 */ 3540 if (unlikely(pkt_dev->delay == ULLONG_MAX)) { 3541 pkt_dev->next_tx = ktime_add_ns(ktime_get(), ULONG_MAX); 3542 return; 3543 } 3544 3545 /* If no skb or clone count exhausted then get new one */ 3546 if (!pkt_dev->skb || (pkt_dev->last_ok && 3547 ++pkt_dev->clone_count >= clone_skb)) { 3548 /* build a new pkt */ 3549 kfree_skb(pkt_dev->skb); 3550 3551 pkt_dev->skb = fill_packet(odev, pkt_dev); 3552 if (pkt_dev->skb == NULL) { 3553 pr_err("ERROR: couldn't allocate skb in fill_packet\n"); 3554 schedule(); 3555 pkt_dev->clone_count--; /* back out increment, OOM */ 3556 return; 3557 } 3558 pkt_dev->last_pkt_size = pkt_dev->skb->len; 3559 pkt_dev->clone_count = 0; /* reset counter */ 3560 } 3561 3562 if (pkt_dev->delay && pkt_dev->last_ok) 3563 spin(pkt_dev, pkt_dev->next_tx); 3564 3565 if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) { 3566 skb = pkt_dev->skb; 3567 skb->protocol = eth_type_trans(skb, skb->dev); 3568 if (skb_shared) 3569 refcount_add(burst, &skb->users); 3570 local_bh_disable(); 3571 do { 3572 ret = netif_receive_skb(skb); 3573 if (ret == NET_RX_DROP) 3574 pkt_dev->errors++; 3575 pkt_dev->sofar++; 3576 pkt_dev->seq_num++; 3577 if (unlikely(!skb_shared)) { 3578 pkt_dev->skb = NULL; 3579 break; 3580 } 3581 if (refcount_read(&skb->users) != burst) { 3582 /* skb was queued by rps/rfs or taps, 3583 * so cannot reuse this skb 3584 */ 3585 WARN_ON(refcount_sub_and_test(burst - 1, &skb->users)); 3586 /* get out of the loop and wait 3587 * until skb is consumed 3588 */ 3589 break; 3590 } 3591 /* skb was 'freed' by stack, so clean few 3592 * bits and reuse it 3593 */ 3594 skb_reset_redirect(skb); 3595 } while (--burst > 0); 3596 goto out; /* Skips xmit_mode M_START_XMIT */ 3597 } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) { 3598 local_bh_disable(); 3599 if (skb_shared) 3600 refcount_inc(&pkt_dev->skb->users); 3601 3602 ret = dev_queue_xmit(pkt_dev->skb); 3603 3604 if (!skb_shared && dev_xmit_complete(ret)) 3605 pkt_dev->skb = NULL; 3606 3607 switch (ret) { 3608 case NET_XMIT_SUCCESS: 3609 pkt_dev->sofar++; 3610 pkt_dev->seq_num++; 3611 pkt_dev->tx_bytes += pkt_dev->last_pkt_size; 3612 break; 3613 case NET_XMIT_DROP: 3614 case NET_XMIT_CN: 3615 /* These are all valid return codes for a qdisc but 3616 * indicate packets are being dropped or will likely 3617 * be dropped soon. 3618 */ 3619 case NETDEV_TX_BUSY: 3620 /* qdisc may call dev_hard_start_xmit directly in cases 3621 * where no queues exist e.g. loopback device, virtual 3622 * devices, etc. In this case we need to handle 3623 * NETDEV_TX_ codes. 3624 */ 3625 default: 3626 pkt_dev->errors++; 3627 net_info_ratelimited("%s xmit error: %d\n", 3628 pkt_dev->odevname, ret); 3629 break; 3630 } 3631 goto out; 3632 } 3633 3634 txq = skb_get_tx_queue(odev, pkt_dev->skb); 3635 3636 local_bh_disable(); 3637 3638 HARD_TX_LOCK(odev, txq, smp_processor_id()); 3639 3640 if (unlikely(netif_xmit_frozen_or_drv_stopped(txq))) { 3641 pkt_dev->last_ok = 0; 3642 goto unlock; 3643 } 3644 if (skb_shared) 3645 refcount_add(burst, &pkt_dev->skb->users); 3646 3647 xmit_more: 3648 ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0); 3649 3650 if (!skb_shared && dev_xmit_complete(ret)) 3651 pkt_dev->skb = NULL; 3652 3653 switch (ret) { 3654 case NETDEV_TX_OK: 3655 pkt_dev->last_ok = 1; 3656 pkt_dev->sofar++; 3657 pkt_dev->seq_num++; 3658 pkt_dev->tx_bytes += pkt_dev->last_pkt_size; 3659 if (burst > 0 && !netif_xmit_frozen_or_drv_stopped(txq)) 3660 goto xmit_more; 3661 break; 3662 case NET_XMIT_DROP: 3663 case NET_XMIT_CN: 3664 /* skb has been consumed */ 3665 pkt_dev->errors++; 3666 break; 3667 default: /* Drivers are not supposed to return other values! */ 3668 net_info_ratelimited("%s xmit error: %d\n", 3669 pkt_dev->odevname, ret); 3670 pkt_dev->errors++; 3671 fallthrough; 3672 case NETDEV_TX_BUSY: 3673 /* Retry it next time */ 3674 if (skb_shared) 3675 refcount_dec(&pkt_dev->skb->users); 3676 pkt_dev->last_ok = 0; 3677 } 3678 if (unlikely(burst)) 3679 WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users)); 3680 unlock: 3681 HARD_TX_UNLOCK(odev, txq); 3682 3683 out: 3684 local_bh_enable(); 3685 3686 /* If pkt_dev->count is zero, then run forever */ 3687 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 3688 if (pkt_dev->skb) 3689 pktgen_wait_for_skb(pkt_dev); 3690 3691 /* Done with this */ 3692 pktgen_stop_device(pkt_dev); 3693 } 3694 } 3695 3696 /* 3697 * Main loop of the thread goes here 3698 */ 3699 3700 static int pktgen_thread_worker(void *arg) 3701 { 3702 struct pktgen_thread *t = arg; 3703 struct pktgen_dev *pkt_dev = NULL; 3704 int cpu = t->cpu; 3705 3706 WARN_ON_ONCE(smp_processor_id() != cpu); 3707 3708 init_waitqueue_head(&t->queue); 3709 complete(&t->start_done); 3710 3711 pr_debug("starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); 3712 3713 set_freezable(); 3714 3715 while (!kthread_should_stop()) { 3716 pkt_dev = next_to_run(t); 3717 3718 if (unlikely(!pkt_dev && t->control == 0)) { 3719 if (t->net->pktgen_exiting) 3720 break; 3721 wait_event_freezable_timeout(t->queue, 3722 t->control != 0, HZ / 10); 3723 continue; 3724 } 3725 3726 if (likely(pkt_dev)) { 3727 pktgen_xmit(pkt_dev); 3728 3729 if (need_resched()) 3730 pktgen_resched(pkt_dev); 3731 else 3732 cpu_relax(); 3733 } 3734 3735 if (t->control & T_STOP) { 3736 pktgen_stop(t); 3737 t->control &= ~(T_STOP); 3738 } 3739 3740 if (t->control & T_RUN) { 3741 pktgen_run(t); 3742 t->control &= ~(T_RUN); 3743 } 3744 3745 if (t->control & T_REMDEVALL) { 3746 pktgen_rem_all_ifs(t); 3747 t->control &= ~(T_REMDEVALL); 3748 } 3749 3750 if (t->control & T_REMDEV) { 3751 pktgen_rem_one_if(t); 3752 t->control &= ~(T_REMDEV); 3753 } 3754 3755 try_to_freeze(); 3756 } 3757 3758 pr_debug("%s stopping all device\n", t->tsk->comm); 3759 pktgen_stop(t); 3760 3761 pr_debug("%s removing all device\n", t->tsk->comm); 3762 pktgen_rem_all_ifs(t); 3763 3764 pr_debug("%s removing thread\n", t->tsk->comm); 3765 pktgen_rem_thread(t); 3766 3767 return 0; 3768 } 3769 3770 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 3771 const char *ifname, bool exact) 3772 { 3773 struct pktgen_dev *p, *pkt_dev = NULL; 3774 size_t len = strlen(ifname); 3775 3776 rcu_read_lock(); 3777 list_for_each_entry_rcu(p, &t->if_list, list) 3778 if (strncmp(p->odevname, ifname, len) == 0) { 3779 if (p->odevname[len]) { 3780 if (exact || p->odevname[len] != '@') 3781 continue; 3782 } 3783 pkt_dev = p; 3784 break; 3785 } 3786 3787 rcu_read_unlock(); 3788 pr_debug("find_dev(%s) returning %p\n", ifname, pkt_dev); 3789 return pkt_dev; 3790 } 3791 3792 /* 3793 * Adds a dev at front of if_list. 3794 */ 3795 3796 static int add_dev_to_thread(struct pktgen_thread *t, 3797 struct pktgen_dev *pkt_dev) 3798 { 3799 int rv = 0; 3800 3801 /* This function cannot be called concurrently, as its called 3802 * under pktgen_thread_lock mutex, but it can run from 3803 * userspace on another CPU than the kthread. The if_lock() 3804 * is used here to sync with concurrent instances of 3805 * _rem_dev_from_if_list() invoked via kthread, which is also 3806 * updating the if_list 3807 */ 3808 if_lock(t); 3809 3810 if (pkt_dev->pg_thread) { 3811 pr_err("ERROR: already assigned to a thread\n"); 3812 rv = -EBUSY; 3813 goto out; 3814 } 3815 3816 pkt_dev->running = 0; 3817 pkt_dev->pg_thread = t; 3818 list_add_rcu(&pkt_dev->list, &t->if_list); 3819 3820 out: 3821 if_unlock(t); 3822 return rv; 3823 } 3824 3825 /* Called under thread lock */ 3826 3827 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) 3828 { 3829 struct pktgen_dev *pkt_dev; 3830 int err; 3831 int node = cpu_to_node(t->cpu); 3832 3833 /* We don't allow a device to be on several threads */ 3834 3835 pkt_dev = __pktgen_NN_threads(t->net, ifname, FIND); 3836 if (pkt_dev) { 3837 pr_err("ERROR: interface already used\n"); 3838 return -EBUSY; 3839 } 3840 3841 pkt_dev = kzalloc_node(sizeof(struct pktgen_dev), GFP_KERNEL, node); 3842 if (!pkt_dev) 3843 return -ENOMEM; 3844 3845 strscpy(pkt_dev->odevname, ifname); 3846 pkt_dev->flows = vzalloc_node(array_size(MAX_CFLOWS, 3847 sizeof(struct flow_state)), 3848 node); 3849 if (pkt_dev->flows == NULL) { 3850 kfree(pkt_dev); 3851 return -ENOMEM; 3852 } 3853 3854 pkt_dev->removal_mark = 0; 3855 pkt_dev->nfrags = 0; 3856 pkt_dev->delay = pg_delay_d; 3857 pkt_dev->count = pg_count_d; 3858 pkt_dev->sofar = 0; 3859 pkt_dev->udp_src_min = 9; /* sink port */ 3860 pkt_dev->udp_src_max = 9; 3861 pkt_dev->udp_dst_min = 9; 3862 pkt_dev->udp_dst_max = 9; 3863 pkt_dev->vlan_p = 0; 3864 pkt_dev->vlan_cfi = 0; 3865 pkt_dev->vlan_id = 0xffff; 3866 pkt_dev->svlan_p = 0; 3867 pkt_dev->svlan_cfi = 0; 3868 pkt_dev->svlan_id = 0xffff; 3869 pkt_dev->burst = 1; 3870 pkt_dev->node = NUMA_NO_NODE; 3871 pkt_dev->flags = F_SHARED; /* SKB shared by default */ 3872 3873 err = pktgen_setup_dev(t->net, pkt_dev, ifname); 3874 if (err) 3875 goto out1; 3876 if (pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING) 3877 pkt_dev->clone_skb = pg_clone_skb_d; 3878 3879 pkt_dev->entry = proc_create_data(ifname, 0600, t->net->proc_dir, 3880 &pktgen_if_proc_ops, pkt_dev); 3881 if (!pkt_dev->entry) { 3882 pr_err("cannot create %s/%s procfs entry\n", 3883 PG_PROC_DIR, ifname); 3884 err = -EINVAL; 3885 goto out2; 3886 } 3887 #ifdef CONFIG_XFRM 3888 pkt_dev->ipsmode = XFRM_MODE_TRANSPORT; 3889 pkt_dev->ipsproto = IPPROTO_ESP; 3890 3891 /* xfrm tunnel mode needs additional dst to extract outer 3892 * ip header protocol/ttl/id field, here create a phony one. 3893 * instead of looking for a valid rt, which definitely hurting 3894 * performance under such circumstance. 3895 */ 3896 pkt_dev->dstops.family = AF_INET; 3897 pkt_dev->xdst.u.dst.dev = pkt_dev->odev; 3898 dst_init_metrics(&pkt_dev->xdst.u.dst, pktgen_dst_metrics, false); 3899 pkt_dev->xdst.child = &pkt_dev->xdst.u.dst; 3900 pkt_dev->xdst.u.dst.ops = &pkt_dev->dstops; 3901 #endif 3902 3903 return add_dev_to_thread(t, pkt_dev); 3904 out2: 3905 netdev_put(pkt_dev->odev, &pkt_dev->dev_tracker); 3906 out1: 3907 #ifdef CONFIG_XFRM 3908 free_SAs(pkt_dev); 3909 #endif 3910 vfree(pkt_dev->flows); 3911 kfree(pkt_dev); 3912 return err; 3913 } 3914 3915 static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn) 3916 { 3917 struct pktgen_thread *t; 3918 struct proc_dir_entry *pe; 3919 struct task_struct *p; 3920 3921 t = kzalloc_node(sizeof(struct pktgen_thread), GFP_KERNEL, 3922 cpu_to_node(cpu)); 3923 if (!t) { 3924 pr_err("ERROR: out of memory, can't create new thread\n"); 3925 return -ENOMEM; 3926 } 3927 3928 mutex_init(&t->if_lock); 3929 t->cpu = cpu; 3930 3931 INIT_LIST_HEAD(&t->if_list); 3932 3933 list_add_tail(&t->th_list, &pn->pktgen_threads); 3934 init_completion(&t->start_done); 3935 3936 p = kthread_create_on_cpu(pktgen_thread_worker, t, cpu, "kpktgend_%d"); 3937 if (IS_ERR(p)) { 3938 pr_err("kthread_create_on_node() failed for cpu %d\n", t->cpu); 3939 list_del(&t->th_list); 3940 kfree(t); 3941 return PTR_ERR(p); 3942 } 3943 3944 t->tsk = p; 3945 3946 pe = proc_create_data(t->tsk->comm, 0600, pn->proc_dir, 3947 &pktgen_thread_proc_ops, t); 3948 if (!pe) { 3949 pr_err("cannot create %s/%s procfs entry\n", 3950 PG_PROC_DIR, t->tsk->comm); 3951 kthread_stop(p); 3952 list_del(&t->th_list); 3953 kfree(t); 3954 return -EINVAL; 3955 } 3956 3957 t->net = pn; 3958 get_task_struct(p); 3959 wake_up_process(p); 3960 wait_for_completion(&t->start_done); 3961 3962 return 0; 3963 } 3964 3965 /* 3966 * Removes a device from the thread if_list. 3967 */ 3968 static void _rem_dev_from_if_list(struct pktgen_thread *t, 3969 struct pktgen_dev *pkt_dev) 3970 { 3971 struct list_head *q, *n; 3972 struct pktgen_dev *p; 3973 3974 if_lock(t); 3975 list_for_each_safe(q, n, &t->if_list) { 3976 p = list_entry(q, struct pktgen_dev, list); 3977 if (p == pkt_dev) 3978 list_del_rcu(&p->list); 3979 } 3980 if_unlock(t); 3981 } 3982 3983 static int pktgen_remove_device(struct pktgen_thread *t, 3984 struct pktgen_dev *pkt_dev) 3985 { 3986 pr_debug("remove_device pkt_dev=%p\n", pkt_dev); 3987 3988 if (pkt_dev->running) { 3989 pr_warn("WARNING: trying to remove a running interface, stopping it now\n"); 3990 pktgen_stop_device(pkt_dev); 3991 } 3992 3993 /* Dis-associate from the interface */ 3994 3995 if (pkt_dev->odev) { 3996 netdev_put(pkt_dev->odev, &pkt_dev->dev_tracker); 3997 pkt_dev->odev = NULL; 3998 } 3999 4000 /* Remove proc before if_list entry, because add_device uses 4001 * list to determine if interface already exist, avoid race 4002 * with proc_create_data() 4003 */ 4004 proc_remove(pkt_dev->entry); 4005 4006 /* And update the thread if_list */ 4007 _rem_dev_from_if_list(t, pkt_dev); 4008 4009 #ifdef CONFIG_XFRM 4010 free_SAs(pkt_dev); 4011 #endif 4012 vfree(pkt_dev->flows); 4013 if (pkt_dev->page) 4014 put_page(pkt_dev->page); 4015 kfree_rcu(pkt_dev, rcu); 4016 return 0; 4017 } 4018 4019 static int __net_init pg_net_init(struct net *net) 4020 { 4021 struct pktgen_net *pn = net_generic(net, pg_net_id); 4022 struct proc_dir_entry *pe; 4023 int cpu, ret = 0; 4024 4025 pn->net = net; 4026 INIT_LIST_HEAD(&pn->pktgen_threads); 4027 pn->pktgen_exiting = false; 4028 pn->proc_dir = proc_mkdir(PG_PROC_DIR, pn->net->proc_net); 4029 if (!pn->proc_dir) { 4030 pr_warn("cannot create /proc/net/%s\n", PG_PROC_DIR); 4031 return -ENODEV; 4032 } 4033 pe = proc_create(PGCTRL, 0600, pn->proc_dir, &pktgen_proc_ops); 4034 if (pe == NULL) { 4035 pr_err("cannot create %s procfs entry\n", PGCTRL); 4036 ret = -EINVAL; 4037 goto remove; 4038 } 4039 4040 cpus_read_lock(); 4041 for_each_online_cpu(cpu) { 4042 int err; 4043 4044 err = pktgen_create_thread(cpu, pn); 4045 if (err) 4046 pr_warn("Cannot create thread for cpu %d (%d)\n", 4047 cpu, err); 4048 } 4049 cpus_read_unlock(); 4050 4051 if (list_empty(&pn->pktgen_threads)) { 4052 pr_err("Initialization failed for all threads\n"); 4053 ret = -ENODEV; 4054 goto remove_entry; 4055 } 4056 4057 return 0; 4058 4059 remove_entry: 4060 remove_proc_entry(PGCTRL, pn->proc_dir); 4061 remove: 4062 remove_proc_entry(PG_PROC_DIR, pn->net->proc_net); 4063 return ret; 4064 } 4065 4066 static void __net_exit pg_net_exit(struct net *net) 4067 { 4068 struct pktgen_net *pn = net_generic(net, pg_net_id); 4069 struct pktgen_thread *t; 4070 struct list_head *q, *n; 4071 LIST_HEAD(list); 4072 4073 /* Stop all interfaces & threads */ 4074 pn->pktgen_exiting = true; 4075 4076 mutex_lock(&pktgen_thread_lock); 4077 list_splice_init(&pn->pktgen_threads, &list); 4078 mutex_unlock(&pktgen_thread_lock); 4079 4080 list_for_each_safe(q, n, &list) { 4081 t = list_entry(q, struct pktgen_thread, th_list); 4082 list_del(&t->th_list); 4083 kthread_stop_put(t->tsk); 4084 kfree(t); 4085 } 4086 4087 remove_proc_entry(PGCTRL, pn->proc_dir); 4088 remove_proc_entry(PG_PROC_DIR, pn->net->proc_net); 4089 } 4090 4091 static struct pernet_operations pg_net_ops = { 4092 .init = pg_net_init, 4093 .exit = pg_net_exit, 4094 .id = &pg_net_id, 4095 .size = sizeof(struct pktgen_net), 4096 }; 4097 4098 static int __init pg_init(void) 4099 { 4100 int ret = 0; 4101 4102 pr_info("%s", version); 4103 ret = register_pernet_subsys(&pg_net_ops); 4104 if (ret) 4105 return ret; 4106 ret = register_netdevice_notifier(&pktgen_notifier_block); 4107 if (ret) 4108 unregister_pernet_subsys(&pg_net_ops); 4109 4110 return ret; 4111 } 4112 4113 static void __exit pg_cleanup(void) 4114 { 4115 unregister_netdevice_notifier(&pktgen_notifier_block); 4116 unregister_pernet_subsys(&pg_net_ops); 4117 /* Don't need rcu_barrier() due to use of kfree_rcu() */ 4118 } 4119 4120 module_init(pg_init); 4121 module_exit(pg_cleanup); 4122 4123 MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se>"); 4124 MODULE_DESCRIPTION("Packet Generator tool"); 4125 MODULE_LICENSE("GPL"); 4126 MODULE_VERSION(VERSION); 4127 module_param(pg_count_d, int, 0); 4128 MODULE_PARM_DESC(pg_count_d, "Default number of packets to inject"); 4129 module_param(pg_delay_d, int, 0); 4130 MODULE_PARM_DESC(pg_delay_d, "Default delay between packets (nanoseconds)"); 4131 module_param(pg_clone_skb_d, int, 0); 4132 MODULE_PARM_DESC(pg_clone_skb_d, "Default number of copies of the same packet"); 4133 module_param(debug, int, 0); 4134 MODULE_PARM_DESC(debug, "Enable debugging of pktgen module"); 4135