1 /* 2 * Authors: 3 * Copyright 2001, 2002 by Robert Olsson <robert.olsson@its.uu.se> 4 * Uppsala University and 5 * Swedish University of Agricultural Sciences 6 * 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * Ben Greear <greearb@candelatech.com> 9 * Jens L��s <jens.laas@data.slu.se> 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 * 16 * 17 * A tool for loading the network with preconfigurated packets. 18 * The tool is implemented as a linux module. Parameters are output 19 * device, delay (to hard_xmit), number of packets, and whether 20 * to use multiple SKBs or just the same one. 21 * pktgen uses the installed interface's output routine. 22 * 23 * Additional hacking by: 24 * 25 * Jens.Laas@data.slu.se 26 * Improved by ANK. 010120. 27 * Improved by ANK even more. 010212. 28 * MAC address typo fixed. 010417 --ro 29 * Integrated. 020301 --DaveM 30 * Added multiskb option 020301 --DaveM 31 * Scaling of results. 020417--sigurdur@linpro.no 32 * Significant re-work of the module: 33 * * Convert to threaded model to more efficiently be able to transmit 34 * and receive on multiple interfaces at once. 35 * * Converted many counters to __u64 to allow longer runs. 36 * * Allow configuration of ranges, like min/max IP address, MACs, 37 * and UDP-ports, for both source and destination, and can 38 * set to use a random distribution or sequentially walk the range. 39 * * Can now change most values after starting. 40 * * Place 12-byte packet in UDP payload with magic number, 41 * sequence number, and timestamp. 42 * * Add receiver code that detects dropped pkts, re-ordered pkts, and 43 * latencies (with micro-second) precision. 44 * * Add IOCTL interface to easily get counters & configuration. 45 * --Ben Greear <greearb@candelatech.com> 46 * 47 * Renamed multiskb to clone_skb and cleaned up sending core for two distinct 48 * skb modes. A clone_skb=0 mode for Ben "ranges" work and a clone_skb != 0 49 * as a "fastpath" with a configurable number of clones after alloc's. 50 * clone_skb=0 means all packets are allocated this also means ranges time 51 * stamps etc can be used. clone_skb=100 means 1 malloc is followed by 100 52 * clones. 53 * 54 * Also moved to /proc/net/pktgen/ 55 * --ro 56 * 57 * Sept 10: Fixed threading/locking. Lots of bone-headed and more clever 58 * mistakes. Also merged in DaveM's patch in the -pre6 patch. 59 * --Ben Greear <greearb@candelatech.com> 60 * 61 * Integrated to 2.5.x 021029 --Lucio Maciel (luciomaciel@zipmail.com.br) 62 * 63 * 64 * 021124 Finished major redesign and rewrite for new functionality. 65 * See Documentation/networking/pktgen.txt for how to use this. 66 * 67 * The new operation: 68 * For each CPU one thread/process is created at start. This process checks 69 * for running devices in the if_list and sends packets until count is 0 it 70 * also the thread checks the thread->control which is used for inter-process 71 * communication. controlling process "posts" operations to the threads this 72 * way. The if_lock should be possible to remove when add/rem_device is merged 73 * into this too. 74 * 75 * By design there should only be *one* "controlling" process. In practice 76 * multiple write accesses gives unpredictable result. Understood by "write" 77 * to /proc gives result code thats should be read be the "writer". 78 * For practical use this should be no problem. 79 * 80 * Note when adding devices to a specific CPU there good idea to also assign 81 * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU. 82 * --ro 83 * 84 * Fix refcount off by one if first packet fails, potential null deref, 85 * memleak 030710- KJP 86 * 87 * First "ranges" functionality for ipv6 030726 --ro 88 * 89 * Included flow support. 030802 ANK. 90 * 91 * Fixed unaligned access on IA-64 Grant Grundler <grundler@parisc-linux.org> 92 * 93 * Remove if fix from added Harald Welte <laforge@netfilter.org> 040419 94 * ia64 compilation fix from Aron Griffis <aron@hp.com> 040604 95 * 96 * New xmit() return, do_div and misc clean up by Stephen Hemminger 97 * <shemminger@osdl.org> 040923 98 * 99 * Randy Dunlap fixed u64 printk compiler waring 100 * 101 * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org> 102 * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213 103 * 104 * Corrections from Nikolai Malykh (nmalykh@bilim.com) 105 * Removed unused flags F_SET_SRCMAC & F_SET_SRCIP 041230 106 * 107 * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan <nacc@us.ibm.com> 108 * 050103 109 * 110 * MPLS support by Steven Whitehouse <steve@chygwyn.com> 111 * 112 */ 113 #include <linux/sys.h> 114 #include <linux/types.h> 115 #include <linux/module.h> 116 #include <linux/moduleparam.h> 117 #include <linux/kernel.h> 118 #include <linux/smp_lock.h> 119 #include <linux/mutex.h> 120 #include <linux/sched.h> 121 #include <linux/slab.h> 122 #include <linux/vmalloc.h> 123 #include <linux/unistd.h> 124 #include <linux/string.h> 125 #include <linux/ptrace.h> 126 #include <linux/errno.h> 127 #include <linux/ioport.h> 128 #include <linux/interrupt.h> 129 #include <linux/capability.h> 130 #include <linux/delay.h> 131 #include <linux/timer.h> 132 #include <linux/list.h> 133 #include <linux/init.h> 134 #include <linux/skbuff.h> 135 #include <linux/netdevice.h> 136 #include <linux/inet.h> 137 #include <linux/inetdevice.h> 138 #include <linux/rtnetlink.h> 139 #include <linux/if_arp.h> 140 #include <linux/in.h> 141 #include <linux/ip.h> 142 #include <linux/ipv6.h> 143 #include <linux/udp.h> 144 #include <linux/proc_fs.h> 145 #include <linux/seq_file.h> 146 #include <linux/wait.h> 147 #include <linux/etherdevice.h> 148 #include <net/checksum.h> 149 #include <net/ipv6.h> 150 #include <net/addrconf.h> 151 #include <asm/byteorder.h> 152 #include <linux/rcupdate.h> 153 #include <asm/bitops.h> 154 #include <asm/io.h> 155 #include <asm/dma.h> 156 #include <asm/uaccess.h> 157 #include <asm/div64.h> /* do_div */ 158 #include <asm/timex.h> 159 160 #define VERSION "pktgen v2.67: Packet Generator for packet performance testing.\n" 161 162 /* #define PG_DEBUG(a) a */ 163 #define PG_DEBUG(a) 164 165 /* The buckets are exponential in 'width' */ 166 #define LAT_BUCKETS_MAX 32 167 #define IP_NAME_SZ 32 168 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ 169 #define MPLS_STACK_BOTTOM __constant_htonl(0x00000100) 170 171 /* Device flag bits */ 172 #define F_IPSRC_RND (1<<0) /* IP-Src Random */ 173 #define F_IPDST_RND (1<<1) /* IP-Dst Random */ 174 #define F_UDPSRC_RND (1<<2) /* UDP-Src Random */ 175 #define F_UDPDST_RND (1<<3) /* UDP-Dst Random */ 176 #define F_MACSRC_RND (1<<4) /* MAC-Src Random */ 177 #define F_MACDST_RND (1<<5) /* MAC-Dst Random */ 178 #define F_TXSIZE_RND (1<<6) /* Transmit size is random */ 179 #define F_IPV6 (1<<7) /* Interface in IPV6 Mode */ 180 #define F_MPLS_RND (1<<8) /* Random MPLS labels */ 181 182 /* Thread control flag bits */ 183 #define T_TERMINATE (1<<0) 184 #define T_STOP (1<<1) /* Stop run */ 185 #define T_RUN (1<<2) /* Start run */ 186 #define T_REMDEVALL (1<<3) /* Remove all devs */ 187 #define T_REMDEV (1<<4) /* Remove one dev */ 188 189 /* If lock -- can be removed after some work */ 190 #define if_lock(t) spin_lock(&(t->if_lock)); 191 #define if_unlock(t) spin_unlock(&(t->if_lock)); 192 193 /* Used to help with determining the pkts on receive */ 194 #define PKTGEN_MAGIC 0xbe9be955 195 #define PG_PROC_DIR "pktgen" 196 #define PGCTRL "pgctrl" 197 static struct proc_dir_entry *pg_proc_dir = NULL; 198 199 #define MAX_CFLOWS 65536 200 201 struct flow_state { 202 __u32 cur_daddr; 203 int count; 204 }; 205 206 struct pktgen_dev { 207 208 /* 209 * Try to keep frequent/infrequent used vars. separated. 210 */ 211 212 char ifname[IFNAMSIZ]; 213 char result[512]; 214 215 struct pktgen_thread *pg_thread; /* the owner */ 216 struct list_head list; /* Used for chaining in the thread's run-queue */ 217 218 int running; /* if this changes to false, the test will stop */ 219 220 /* If min != max, then we will either do a linear iteration, or 221 * we will do a random selection from within the range. 222 */ 223 __u32 flags; 224 int removal_mark; /* non-zero => the device is marked for 225 * removal by worker thread */ 226 227 int min_pkt_size; /* = ETH_ZLEN; */ 228 int max_pkt_size; /* = ETH_ZLEN; */ 229 int nfrags; 230 __u32 delay_us; /* Default delay */ 231 __u32 delay_ns; 232 __u64 count; /* Default No packets to send */ 233 __u64 sofar; /* How many pkts we've sent so far */ 234 __u64 tx_bytes; /* How many bytes we've transmitted */ 235 __u64 errors; /* Errors when trying to transmit, pkts will be re-sent */ 236 237 /* runtime counters relating to clone_skb */ 238 __u64 next_tx_us; /* timestamp of when to tx next */ 239 __u32 next_tx_ns; 240 241 __u64 allocated_skbs; 242 __u32 clone_count; 243 int last_ok; /* Was last skb sent? 244 * Or a failed transmit of some sort? This will keep 245 * sequence numbers in order, for example. 246 */ 247 __u64 started_at; /* micro-seconds */ 248 __u64 stopped_at; /* micro-seconds */ 249 __u64 idle_acc; /* micro-seconds */ 250 __u32 seq_num; 251 252 int clone_skb; /* Use multiple SKBs during packet gen. If this number 253 * is greater than 1, then that many copies of the same 254 * packet will be sent before a new packet is allocated. 255 * For instance, if you want to send 1024 identical packets 256 * before creating a new packet, set clone_skb to 1024. 257 */ 258 259 char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 260 char dst_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 261 char src_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 262 char src_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 263 264 struct in6_addr in6_saddr; 265 struct in6_addr in6_daddr; 266 struct in6_addr cur_in6_daddr; 267 struct in6_addr cur_in6_saddr; 268 /* For ranges */ 269 struct in6_addr min_in6_daddr; 270 struct in6_addr max_in6_daddr; 271 struct in6_addr min_in6_saddr; 272 struct in6_addr max_in6_saddr; 273 274 /* If we're doing ranges, random or incremental, then this 275 * defines the min/max for those ranges. 276 */ 277 __u32 saddr_min; /* inclusive, source IP address */ 278 __u32 saddr_max; /* exclusive, source IP address */ 279 __u32 daddr_min; /* inclusive, dest IP address */ 280 __u32 daddr_max; /* exclusive, dest IP address */ 281 282 __u16 udp_src_min; /* inclusive, source UDP port */ 283 __u16 udp_src_max; /* exclusive, source UDP port */ 284 __u16 udp_dst_min; /* inclusive, dest UDP port */ 285 __u16 udp_dst_max; /* exclusive, dest UDP port */ 286 287 /* MPLS */ 288 unsigned nr_labels; /* Depth of stack, 0 = no MPLS */ 289 __be32 labels[MAX_MPLS_LABELS]; 290 291 __u32 src_mac_count; /* How many MACs to iterate through */ 292 __u32 dst_mac_count; /* How many MACs to iterate through */ 293 294 unsigned char dst_mac[ETH_ALEN]; 295 unsigned char src_mac[ETH_ALEN]; 296 297 __u32 cur_dst_mac_offset; 298 __u32 cur_src_mac_offset; 299 __u32 cur_saddr; 300 __u32 cur_daddr; 301 __u16 cur_udp_dst; 302 __u16 cur_udp_src; 303 __u32 cur_pkt_size; 304 305 __u8 hh[14]; 306 /* = { 307 0x00, 0x80, 0xC8, 0x79, 0xB3, 0xCB, 308 309 We fill in SRC address later 310 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 311 0x08, 0x00 312 }; 313 */ 314 __u16 pad; /* pad out the hh struct to an even 16 bytes */ 315 316 struct sk_buff *skb; /* skb we are to transmit next, mainly used for when we 317 * are transmitting the same one multiple times 318 */ 319 struct net_device *odev; /* The out-going device. Note that the device should 320 * have it's pg_info pointer pointing back to this 321 * device. This will be set when the user specifies 322 * the out-going device name (not when the inject is 323 * started as it used to do.) 324 */ 325 struct flow_state *flows; 326 unsigned cflows; /* Concurrent flows (config) */ 327 unsigned lflow; /* Flow length (config) */ 328 unsigned nflows; /* accumulated flows (stats) */ 329 }; 330 331 struct pktgen_hdr { 332 __u32 pgh_magic; 333 __u32 seq_num; 334 __u32 tv_sec; 335 __u32 tv_usec; 336 }; 337 338 struct pktgen_thread { 339 spinlock_t if_lock; 340 struct list_head if_list; /* All device here */ 341 struct list_head th_list; 342 int removed; 343 char name[32]; 344 char result[512]; 345 u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */ 346 347 /* Field for thread to receive "posted" events terminate, stop ifs etc. */ 348 349 u32 control; 350 int pid; 351 int cpu; 352 353 wait_queue_head_t queue; 354 }; 355 356 #define REMOVE 1 357 #define FIND 0 358 359 /* This code works around the fact that do_div cannot handle two 64-bit 360 numbers, and regular 64-bit division doesn't work on x86 kernels. 361 --Ben 362 */ 363 364 #define PG_DIV 0 365 366 /* This was emailed to LMKL by: Chris Caputo <ccaputo@alt.net> 367 * Function copied/adapted/optimized from: 368 * 369 * nemesis.sourceforge.net/browse/lib/static/intmath/ix86/intmath.c.html 370 * 371 * Copyright 1994, University of Cambridge Computer Laboratory 372 * All Rights Reserved. 373 * 374 */ 375 static inline s64 divremdi3(s64 x, s64 y, int type) 376 { 377 u64 a = (x < 0) ? -x : x; 378 u64 b = (y < 0) ? -y : y; 379 u64 res = 0, d = 1; 380 381 if (b > 0) { 382 while (b < a) { 383 b <<= 1; 384 d <<= 1; 385 } 386 } 387 388 do { 389 if (a >= b) { 390 a -= b; 391 res += d; 392 } 393 b >>= 1; 394 d >>= 1; 395 } 396 while (d); 397 398 if (PG_DIV == type) { 399 return (((x ^ y) & (1ll << 63)) == 0) ? res : -(s64) res; 400 } else { 401 return ((x & (1ll << 63)) == 0) ? a : -(s64) a; 402 } 403 } 404 405 /* End of hacks to deal with 64-bit math on x86 */ 406 407 /** Convert to milliseconds */ 408 static inline __u64 tv_to_ms(const struct timeval *tv) 409 { 410 __u64 ms = tv->tv_usec / 1000; 411 ms += (__u64) tv->tv_sec * (__u64) 1000; 412 return ms; 413 } 414 415 /** Convert to micro-seconds */ 416 static inline __u64 tv_to_us(const struct timeval *tv) 417 { 418 __u64 us = tv->tv_usec; 419 us += (__u64) tv->tv_sec * (__u64) 1000000; 420 return us; 421 } 422 423 static inline __u64 pg_div(__u64 n, __u32 base) 424 { 425 __u64 tmp = n; 426 do_div(tmp, base); 427 /* printk("pktgen: pg_div, n: %llu base: %d rv: %llu\n", 428 n, base, tmp); */ 429 return tmp; 430 } 431 432 static inline __u64 pg_div64(__u64 n, __u64 base) 433 { 434 __u64 tmp = n; 435 /* 436 * How do we know if the architecture we are running on 437 * supports division with 64 bit base? 438 * 439 */ 440 #if defined(__sparc_v9__) || defined(__powerpc64__) || defined(__alpha__) || defined(__x86_64__) || defined(__ia64__) 441 442 do_div(tmp, base); 443 #else 444 tmp = divremdi3(n, base, PG_DIV); 445 #endif 446 return tmp; 447 } 448 449 static inline u32 pktgen_random(void) 450 { 451 #if 0 452 __u32 n; 453 get_random_bytes(&n, 4); 454 return n; 455 #else 456 return net_random(); 457 #endif 458 } 459 460 static inline __u64 getCurMs(void) 461 { 462 struct timeval tv; 463 do_gettimeofday(&tv); 464 return tv_to_ms(&tv); 465 } 466 467 static inline __u64 getCurUs(void) 468 { 469 struct timeval tv; 470 do_gettimeofday(&tv); 471 return tv_to_us(&tv); 472 } 473 474 static inline __u64 tv_diff(const struct timeval *a, const struct timeval *b) 475 { 476 return tv_to_us(a) - tv_to_us(b); 477 } 478 479 /* old include end */ 480 481 static char version[] __initdata = VERSION; 482 483 static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); 484 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); 485 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 486 const char *ifname); 487 static int pktgen_device_event(struct notifier_block *, unsigned long, void *); 488 static void pktgen_run_all_threads(void); 489 static void pktgen_stop_all_threads_ifs(void); 490 static int pktgen_stop_device(struct pktgen_dev *pkt_dev); 491 static void pktgen_stop(struct pktgen_thread *t); 492 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); 493 static int pktgen_mark_device(const char *ifname); 494 static unsigned int scan_ip6(const char *s, char ip[16]); 495 static unsigned int fmt_ip6(char *s, const char ip[16]); 496 497 /* Module parameters, defaults. */ 498 static int pg_count_d = 1000; /* 1000 pkts by default */ 499 static int pg_delay_d; 500 static int pg_clone_skb_d; 501 static int debug; 502 503 static DEFINE_MUTEX(pktgen_thread_lock); 504 static LIST_HEAD(pktgen_threads); 505 506 static struct notifier_block pktgen_notifier_block = { 507 .notifier_call = pktgen_device_event, 508 }; 509 510 /* 511 * /proc handling functions 512 * 513 */ 514 515 static int pgctrl_show(struct seq_file *seq, void *v) 516 { 517 seq_puts(seq, VERSION); 518 return 0; 519 } 520 521 static ssize_t pgctrl_write(struct file *file, const char __user * buf, 522 size_t count, loff_t * ppos) 523 { 524 int err = 0; 525 char data[128]; 526 527 if (!capable(CAP_NET_ADMIN)) { 528 err = -EPERM; 529 goto out; 530 } 531 532 if (count > sizeof(data)) 533 count = sizeof(data); 534 535 if (copy_from_user(data, buf, count)) { 536 err = -EFAULT; 537 goto out; 538 } 539 data[count - 1] = 0; /* Make string */ 540 541 if (!strcmp(data, "stop")) 542 pktgen_stop_all_threads_ifs(); 543 544 else if (!strcmp(data, "start")) 545 pktgen_run_all_threads(); 546 547 else 548 printk("pktgen: Unknown command: %s\n", data); 549 550 err = count; 551 552 out: 553 return err; 554 } 555 556 static int pgctrl_open(struct inode *inode, struct file *file) 557 { 558 return single_open(file, pgctrl_show, PDE(inode)->data); 559 } 560 561 static struct file_operations pktgen_fops = { 562 .owner = THIS_MODULE, 563 .open = pgctrl_open, 564 .read = seq_read, 565 .llseek = seq_lseek, 566 .write = pgctrl_write, 567 .release = single_release, 568 }; 569 570 static int pktgen_if_show(struct seq_file *seq, void *v) 571 { 572 int i; 573 struct pktgen_dev *pkt_dev = seq->private; 574 __u64 sa; 575 __u64 stopped; 576 __u64 now = getCurUs(); 577 578 seq_printf(seq, 579 "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", 580 (unsigned long long)pkt_dev->count, pkt_dev->min_pkt_size, 581 pkt_dev->max_pkt_size); 582 583 seq_printf(seq, 584 " frags: %d delay: %u clone_skb: %d ifname: %s\n", 585 pkt_dev->nfrags, 586 1000 * pkt_dev->delay_us + pkt_dev->delay_ns, 587 pkt_dev->clone_skb, pkt_dev->ifname); 588 589 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, 590 pkt_dev->lflow); 591 592 if (pkt_dev->flags & F_IPV6) { 593 char b1[128], b2[128], b3[128]; 594 fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr); 595 fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr); 596 fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr); 597 seq_printf(seq, 598 " saddr: %s min_saddr: %s max_saddr: %s\n", b1, 599 b2, b3); 600 601 fmt_ip6(b1, pkt_dev->in6_daddr.s6_addr); 602 fmt_ip6(b2, pkt_dev->min_in6_daddr.s6_addr); 603 fmt_ip6(b3, pkt_dev->max_in6_daddr.s6_addr); 604 seq_printf(seq, 605 " daddr: %s min_daddr: %s max_daddr: %s\n", b1, 606 b2, b3); 607 608 } else 609 seq_printf(seq, 610 " dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n", 611 pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, 612 pkt_dev->src_max); 613 614 seq_puts(seq, " src_mac: "); 615 616 if (is_zero_ether_addr(pkt_dev->src_mac)) 617 for (i = 0; i < 6; i++) 618 seq_printf(seq, "%02X%s", pkt_dev->odev->dev_addr[i], 619 i == 5 ? " " : ":"); 620 else 621 for (i = 0; i < 6; i++) 622 seq_printf(seq, "%02X%s", pkt_dev->src_mac[i], 623 i == 5 ? " " : ":"); 624 625 seq_printf(seq, "dst_mac: "); 626 for (i = 0; i < 6; i++) 627 seq_printf(seq, "%02X%s", pkt_dev->dst_mac[i], 628 i == 5 ? "\n" : ":"); 629 630 seq_printf(seq, 631 " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", 632 pkt_dev->udp_src_min, pkt_dev->udp_src_max, 633 pkt_dev->udp_dst_min, pkt_dev->udp_dst_max); 634 635 seq_printf(seq, 636 " src_mac_count: %d dst_mac_count: %d\n", 637 pkt_dev->src_mac_count, pkt_dev->dst_mac_count); 638 639 if (pkt_dev->nr_labels) { 640 unsigned i; 641 seq_printf(seq, " mpls: "); 642 for(i = 0; i < pkt_dev->nr_labels; i++) 643 seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), 644 i == pkt_dev->nr_labels-1 ? "\n" : ", "); 645 } 646 647 seq_printf(seq, " Flags: "); 648 649 if (pkt_dev->flags & F_IPV6) 650 seq_printf(seq, "IPV6 "); 651 652 if (pkt_dev->flags & F_IPSRC_RND) 653 seq_printf(seq, "IPSRC_RND "); 654 655 if (pkt_dev->flags & F_IPDST_RND) 656 seq_printf(seq, "IPDST_RND "); 657 658 if (pkt_dev->flags & F_TXSIZE_RND) 659 seq_printf(seq, "TXSIZE_RND "); 660 661 if (pkt_dev->flags & F_UDPSRC_RND) 662 seq_printf(seq, "UDPSRC_RND "); 663 664 if (pkt_dev->flags & F_UDPDST_RND) 665 seq_printf(seq, "UDPDST_RND "); 666 667 if (pkt_dev->flags & F_MPLS_RND) 668 seq_printf(seq, "MPLS_RND "); 669 670 if (pkt_dev->flags & F_MACSRC_RND) 671 seq_printf(seq, "MACSRC_RND "); 672 673 if (pkt_dev->flags & F_MACDST_RND) 674 seq_printf(seq, "MACDST_RND "); 675 676 seq_puts(seq, "\n"); 677 678 sa = pkt_dev->started_at; 679 stopped = pkt_dev->stopped_at; 680 if (pkt_dev->running) 681 stopped = now; /* not really stopped, more like last-running-at */ 682 683 seq_printf(seq, 684 "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n", 685 (unsigned long long)pkt_dev->sofar, 686 (unsigned long long)pkt_dev->errors, (unsigned long long)sa, 687 (unsigned long long)stopped, 688 (unsigned long long)pkt_dev->idle_acc); 689 690 seq_printf(seq, 691 " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", 692 pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, 693 pkt_dev->cur_src_mac_offset); 694 695 if (pkt_dev->flags & F_IPV6) { 696 char b1[128], b2[128]; 697 fmt_ip6(b1, pkt_dev->cur_in6_daddr.s6_addr); 698 fmt_ip6(b2, pkt_dev->cur_in6_saddr.s6_addr); 699 seq_printf(seq, " cur_saddr: %s cur_daddr: %s\n", b2, b1); 700 } else 701 seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n", 702 pkt_dev->cur_saddr, pkt_dev->cur_daddr); 703 704 seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n", 705 pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); 706 707 seq_printf(seq, " flows: %u\n", pkt_dev->nflows); 708 709 if (pkt_dev->result[0]) 710 seq_printf(seq, "Result: %s\n", pkt_dev->result); 711 else 712 seq_printf(seq, "Result: Idle\n"); 713 714 return 0; 715 } 716 717 718 static int hex32_arg(const char __user *user_buffer, __u32 *num) 719 { 720 int i = 0; 721 *num = 0; 722 723 for(; i < 8; i++) { 724 char c; 725 *num <<= 4; 726 if (get_user(c, &user_buffer[i])) 727 return -EFAULT; 728 if ((c >= '0') && (c <= '9')) 729 *num |= c - '0'; 730 else if ((c >= 'a') && (c <= 'f')) 731 *num |= c - 'a' + 10; 732 else if ((c >= 'A') && (c <= 'F')) 733 *num |= c - 'A' + 10; 734 else 735 break; 736 } 737 return i; 738 } 739 740 static int count_trail_chars(const char __user * user_buffer, 741 unsigned int maxlen) 742 { 743 int i; 744 745 for (i = 0; i < maxlen; i++) { 746 char c; 747 if (get_user(c, &user_buffer[i])) 748 return -EFAULT; 749 switch (c) { 750 case '\"': 751 case '\n': 752 case '\r': 753 case '\t': 754 case ' ': 755 case '=': 756 break; 757 default: 758 goto done; 759 }; 760 } 761 done: 762 return i; 763 } 764 765 static unsigned long num_arg(const char __user * user_buffer, 766 unsigned long maxlen, unsigned long *num) 767 { 768 int i = 0; 769 *num = 0; 770 771 for (; i < maxlen; i++) { 772 char c; 773 if (get_user(c, &user_buffer[i])) 774 return -EFAULT; 775 if ((c >= '0') && (c <= '9')) { 776 *num *= 10; 777 *num += c - '0'; 778 } else 779 break; 780 } 781 return i; 782 } 783 784 static int strn_len(const char __user * user_buffer, unsigned int maxlen) 785 { 786 int i = 0; 787 788 for (; i < maxlen; i++) { 789 char c; 790 if (get_user(c, &user_buffer[i])) 791 return -EFAULT; 792 switch (c) { 793 case '\"': 794 case '\n': 795 case '\r': 796 case '\t': 797 case ' ': 798 goto done_str; 799 break; 800 default: 801 break; 802 }; 803 } 804 done_str: 805 return i; 806 } 807 808 static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) 809 { 810 unsigned n = 0; 811 char c; 812 ssize_t i = 0; 813 int len; 814 815 pkt_dev->nr_labels = 0; 816 do { 817 __u32 tmp; 818 len = hex32_arg(&buffer[i], &tmp); 819 if (len <= 0) 820 return len; 821 pkt_dev->labels[n] = htonl(tmp); 822 if (pkt_dev->labels[n] & MPLS_STACK_BOTTOM) 823 pkt_dev->flags |= F_MPLS_RND; 824 i += len; 825 if (get_user(c, &buffer[i])) 826 return -EFAULT; 827 i++; 828 n++; 829 if (n >= MAX_MPLS_LABELS) 830 return -E2BIG; 831 } while(c == ','); 832 833 pkt_dev->nr_labels = n; 834 return i; 835 } 836 837 static ssize_t pktgen_if_write(struct file *file, 838 const char __user * user_buffer, size_t count, 839 loff_t * offset) 840 { 841 struct seq_file *seq = (struct seq_file *)file->private_data; 842 struct pktgen_dev *pkt_dev = seq->private; 843 int i = 0, max, len; 844 char name[16], valstr[32]; 845 unsigned long value = 0; 846 char *pg_result = NULL; 847 int tmp = 0; 848 char buf[128]; 849 850 pg_result = &(pkt_dev->result[0]); 851 852 if (count < 1) { 853 printk("pktgen: wrong command format\n"); 854 return -EINVAL; 855 } 856 857 max = count - i; 858 tmp = count_trail_chars(&user_buffer[i], max); 859 if (tmp < 0) { 860 printk("pktgen: illegal format\n"); 861 return tmp; 862 } 863 i += tmp; 864 865 /* Read variable name */ 866 867 len = strn_len(&user_buffer[i], sizeof(name) - 1); 868 if (len < 0) { 869 return len; 870 } 871 memset(name, 0, sizeof(name)); 872 if (copy_from_user(name, &user_buffer[i], len)) 873 return -EFAULT; 874 i += len; 875 876 max = count - i; 877 len = count_trail_chars(&user_buffer[i], max); 878 if (len < 0) 879 return len; 880 881 i += len; 882 883 if (debug) { 884 char tb[count + 1]; 885 if (copy_from_user(tb, user_buffer, count)) 886 return -EFAULT; 887 tb[count] = 0; 888 printk("pktgen: %s,%lu buffer -:%s:-\n", name, 889 (unsigned long)count, tb); 890 } 891 892 if (!strcmp(name, "min_pkt_size")) { 893 len = num_arg(&user_buffer[i], 10, &value); 894 if (len < 0) { 895 return len; 896 } 897 i += len; 898 if (value < 14 + 20 + 8) 899 value = 14 + 20 + 8; 900 if (value != pkt_dev->min_pkt_size) { 901 pkt_dev->min_pkt_size = value; 902 pkt_dev->cur_pkt_size = value; 903 } 904 sprintf(pg_result, "OK: min_pkt_size=%u", 905 pkt_dev->min_pkt_size); 906 return count; 907 } 908 909 if (!strcmp(name, "max_pkt_size")) { 910 len = num_arg(&user_buffer[i], 10, &value); 911 if (len < 0) { 912 return len; 913 } 914 i += len; 915 if (value < 14 + 20 + 8) 916 value = 14 + 20 + 8; 917 if (value != pkt_dev->max_pkt_size) { 918 pkt_dev->max_pkt_size = value; 919 pkt_dev->cur_pkt_size = value; 920 } 921 sprintf(pg_result, "OK: max_pkt_size=%u", 922 pkt_dev->max_pkt_size); 923 return count; 924 } 925 926 /* Shortcut for min = max */ 927 928 if (!strcmp(name, "pkt_size")) { 929 len = num_arg(&user_buffer[i], 10, &value); 930 if (len < 0) { 931 return len; 932 } 933 i += len; 934 if (value < 14 + 20 + 8) 935 value = 14 + 20 + 8; 936 if (value != pkt_dev->min_pkt_size) { 937 pkt_dev->min_pkt_size = value; 938 pkt_dev->max_pkt_size = value; 939 pkt_dev->cur_pkt_size = value; 940 } 941 sprintf(pg_result, "OK: pkt_size=%u", pkt_dev->min_pkt_size); 942 return count; 943 } 944 945 if (!strcmp(name, "debug")) { 946 len = num_arg(&user_buffer[i], 10, &value); 947 if (len < 0) { 948 return len; 949 } 950 i += len; 951 debug = value; 952 sprintf(pg_result, "OK: debug=%u", debug); 953 return count; 954 } 955 956 if (!strcmp(name, "frags")) { 957 len = num_arg(&user_buffer[i], 10, &value); 958 if (len < 0) { 959 return len; 960 } 961 i += len; 962 pkt_dev->nfrags = value; 963 sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags); 964 return count; 965 } 966 if (!strcmp(name, "delay")) { 967 len = num_arg(&user_buffer[i], 10, &value); 968 if (len < 0) { 969 return len; 970 } 971 i += len; 972 if (value == 0x7FFFFFFF) { 973 pkt_dev->delay_us = 0x7FFFFFFF; 974 pkt_dev->delay_ns = 0; 975 } else { 976 pkt_dev->delay_us = value / 1000; 977 pkt_dev->delay_ns = value % 1000; 978 } 979 sprintf(pg_result, "OK: delay=%u", 980 1000 * pkt_dev->delay_us + pkt_dev->delay_ns); 981 return count; 982 } 983 if (!strcmp(name, "udp_src_min")) { 984 len = num_arg(&user_buffer[i], 10, &value); 985 if (len < 0) { 986 return len; 987 } 988 i += len; 989 if (value != pkt_dev->udp_src_min) { 990 pkt_dev->udp_src_min = value; 991 pkt_dev->cur_udp_src = value; 992 } 993 sprintf(pg_result, "OK: udp_src_min=%u", pkt_dev->udp_src_min); 994 return count; 995 } 996 if (!strcmp(name, "udp_dst_min")) { 997 len = num_arg(&user_buffer[i], 10, &value); 998 if (len < 0) { 999 return len; 1000 } 1001 i += len; 1002 if (value != pkt_dev->udp_dst_min) { 1003 pkt_dev->udp_dst_min = value; 1004 pkt_dev->cur_udp_dst = value; 1005 } 1006 sprintf(pg_result, "OK: udp_dst_min=%u", pkt_dev->udp_dst_min); 1007 return count; 1008 } 1009 if (!strcmp(name, "udp_src_max")) { 1010 len = num_arg(&user_buffer[i], 10, &value); 1011 if (len < 0) { 1012 return len; 1013 } 1014 i += len; 1015 if (value != pkt_dev->udp_src_max) { 1016 pkt_dev->udp_src_max = value; 1017 pkt_dev->cur_udp_src = value; 1018 } 1019 sprintf(pg_result, "OK: udp_src_max=%u", pkt_dev->udp_src_max); 1020 return count; 1021 } 1022 if (!strcmp(name, "udp_dst_max")) { 1023 len = num_arg(&user_buffer[i], 10, &value); 1024 if (len < 0) { 1025 return len; 1026 } 1027 i += len; 1028 if (value != pkt_dev->udp_dst_max) { 1029 pkt_dev->udp_dst_max = value; 1030 pkt_dev->cur_udp_dst = value; 1031 } 1032 sprintf(pg_result, "OK: udp_dst_max=%u", pkt_dev->udp_dst_max); 1033 return count; 1034 } 1035 if (!strcmp(name, "clone_skb")) { 1036 len = num_arg(&user_buffer[i], 10, &value); 1037 if (len < 0) { 1038 return len; 1039 } 1040 i += len; 1041 pkt_dev->clone_skb = value; 1042 1043 sprintf(pg_result, "OK: clone_skb=%d", pkt_dev->clone_skb); 1044 return count; 1045 } 1046 if (!strcmp(name, "count")) { 1047 len = num_arg(&user_buffer[i], 10, &value); 1048 if (len < 0) { 1049 return len; 1050 } 1051 i += len; 1052 pkt_dev->count = value; 1053 sprintf(pg_result, "OK: count=%llu", 1054 (unsigned long long)pkt_dev->count); 1055 return count; 1056 } 1057 if (!strcmp(name, "src_mac_count")) { 1058 len = num_arg(&user_buffer[i], 10, &value); 1059 if (len < 0) { 1060 return len; 1061 } 1062 i += len; 1063 if (pkt_dev->src_mac_count != value) { 1064 pkt_dev->src_mac_count = value; 1065 pkt_dev->cur_src_mac_offset = 0; 1066 } 1067 sprintf(pg_result, "OK: src_mac_count=%d", 1068 pkt_dev->src_mac_count); 1069 return count; 1070 } 1071 if (!strcmp(name, "dst_mac_count")) { 1072 len = num_arg(&user_buffer[i], 10, &value); 1073 if (len < 0) { 1074 return len; 1075 } 1076 i += len; 1077 if (pkt_dev->dst_mac_count != value) { 1078 pkt_dev->dst_mac_count = value; 1079 pkt_dev->cur_dst_mac_offset = 0; 1080 } 1081 sprintf(pg_result, "OK: dst_mac_count=%d", 1082 pkt_dev->dst_mac_count); 1083 return count; 1084 } 1085 if (!strcmp(name, "flag")) { 1086 char f[32]; 1087 memset(f, 0, 32); 1088 len = strn_len(&user_buffer[i], sizeof(f) - 1); 1089 if (len < 0) { 1090 return len; 1091 } 1092 if (copy_from_user(f, &user_buffer[i], len)) 1093 return -EFAULT; 1094 i += len; 1095 if (strcmp(f, "IPSRC_RND") == 0) 1096 pkt_dev->flags |= F_IPSRC_RND; 1097 1098 else if (strcmp(f, "!IPSRC_RND") == 0) 1099 pkt_dev->flags &= ~F_IPSRC_RND; 1100 1101 else if (strcmp(f, "TXSIZE_RND") == 0) 1102 pkt_dev->flags |= F_TXSIZE_RND; 1103 1104 else if (strcmp(f, "!TXSIZE_RND") == 0) 1105 pkt_dev->flags &= ~F_TXSIZE_RND; 1106 1107 else if (strcmp(f, "IPDST_RND") == 0) 1108 pkt_dev->flags |= F_IPDST_RND; 1109 1110 else if (strcmp(f, "!IPDST_RND") == 0) 1111 pkt_dev->flags &= ~F_IPDST_RND; 1112 1113 else if (strcmp(f, "UDPSRC_RND") == 0) 1114 pkt_dev->flags |= F_UDPSRC_RND; 1115 1116 else if (strcmp(f, "!UDPSRC_RND") == 0) 1117 pkt_dev->flags &= ~F_UDPSRC_RND; 1118 1119 else if (strcmp(f, "UDPDST_RND") == 0) 1120 pkt_dev->flags |= F_UDPDST_RND; 1121 1122 else if (strcmp(f, "!UDPDST_RND") == 0) 1123 pkt_dev->flags &= ~F_UDPDST_RND; 1124 1125 else if (strcmp(f, "MACSRC_RND") == 0) 1126 pkt_dev->flags |= F_MACSRC_RND; 1127 1128 else if (strcmp(f, "!MACSRC_RND") == 0) 1129 pkt_dev->flags &= ~F_MACSRC_RND; 1130 1131 else if (strcmp(f, "MACDST_RND") == 0) 1132 pkt_dev->flags |= F_MACDST_RND; 1133 1134 else if (strcmp(f, "!MACDST_RND") == 0) 1135 pkt_dev->flags &= ~F_MACDST_RND; 1136 1137 else if (strcmp(f, "MPLS_RND") == 0) 1138 pkt_dev->flags |= F_MPLS_RND; 1139 1140 else if (strcmp(f, "!MPLS_RND") == 0) 1141 pkt_dev->flags &= ~F_MPLS_RND; 1142 1143 else { 1144 sprintf(pg_result, 1145 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", 1146 f, 1147 "IPSRC_RND, IPDST_RND, TXSIZE_RND, UDPSRC_RND, UDPDST_RND, MACSRC_RND, MACDST_RND\n"); 1148 return count; 1149 } 1150 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); 1151 return count; 1152 } 1153 if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) { 1154 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1); 1155 if (len < 0) { 1156 return len; 1157 } 1158 1159 if (copy_from_user(buf, &user_buffer[i], len)) 1160 return -EFAULT; 1161 buf[len] = 0; 1162 if (strcmp(buf, pkt_dev->dst_min) != 0) { 1163 memset(pkt_dev->dst_min, 0, sizeof(pkt_dev->dst_min)); 1164 strncpy(pkt_dev->dst_min, buf, len); 1165 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); 1166 pkt_dev->cur_daddr = pkt_dev->daddr_min; 1167 } 1168 if (debug) 1169 printk("pktgen: dst_min set to: %s\n", 1170 pkt_dev->dst_min); 1171 i += len; 1172 sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min); 1173 return count; 1174 } 1175 if (!strcmp(name, "dst_max")) { 1176 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1); 1177 if (len < 0) { 1178 return len; 1179 } 1180 1181 if (copy_from_user(buf, &user_buffer[i], len)) 1182 return -EFAULT; 1183 1184 buf[len] = 0; 1185 if (strcmp(buf, pkt_dev->dst_max) != 0) { 1186 memset(pkt_dev->dst_max, 0, sizeof(pkt_dev->dst_max)); 1187 strncpy(pkt_dev->dst_max, buf, len); 1188 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); 1189 pkt_dev->cur_daddr = pkt_dev->daddr_max; 1190 } 1191 if (debug) 1192 printk("pktgen: dst_max set to: %s\n", 1193 pkt_dev->dst_max); 1194 i += len; 1195 sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max); 1196 return count; 1197 } 1198 if (!strcmp(name, "dst6")) { 1199 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1200 if (len < 0) 1201 return len; 1202 1203 pkt_dev->flags |= F_IPV6; 1204 1205 if (copy_from_user(buf, &user_buffer[i], len)) 1206 return -EFAULT; 1207 buf[len] = 0; 1208 1209 scan_ip6(buf, pkt_dev->in6_daddr.s6_addr); 1210 fmt_ip6(buf, pkt_dev->in6_daddr.s6_addr); 1211 1212 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr); 1213 1214 if (debug) 1215 printk("pktgen: dst6 set to: %s\n", buf); 1216 1217 i += len; 1218 sprintf(pg_result, "OK: dst6=%s", buf); 1219 return count; 1220 } 1221 if (!strcmp(name, "dst6_min")) { 1222 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1223 if (len < 0) 1224 return len; 1225 1226 pkt_dev->flags |= F_IPV6; 1227 1228 if (copy_from_user(buf, &user_buffer[i], len)) 1229 return -EFAULT; 1230 buf[len] = 0; 1231 1232 scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); 1233 fmt_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); 1234 1235 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, 1236 &pkt_dev->min_in6_daddr); 1237 if (debug) 1238 printk("pktgen: dst6_min set to: %s\n", buf); 1239 1240 i += len; 1241 sprintf(pg_result, "OK: dst6_min=%s", buf); 1242 return count; 1243 } 1244 if (!strcmp(name, "dst6_max")) { 1245 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1246 if (len < 0) 1247 return len; 1248 1249 pkt_dev->flags |= F_IPV6; 1250 1251 if (copy_from_user(buf, &user_buffer[i], len)) 1252 return -EFAULT; 1253 buf[len] = 0; 1254 1255 scan_ip6(buf, pkt_dev->max_in6_daddr.s6_addr); 1256 fmt_ip6(buf, pkt_dev->max_in6_daddr.s6_addr); 1257 1258 if (debug) 1259 printk("pktgen: dst6_max set to: %s\n", buf); 1260 1261 i += len; 1262 sprintf(pg_result, "OK: dst6_max=%s", buf); 1263 return count; 1264 } 1265 if (!strcmp(name, "src6")) { 1266 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1267 if (len < 0) 1268 return len; 1269 1270 pkt_dev->flags |= F_IPV6; 1271 1272 if (copy_from_user(buf, &user_buffer[i], len)) 1273 return -EFAULT; 1274 buf[len] = 0; 1275 1276 scan_ip6(buf, pkt_dev->in6_saddr.s6_addr); 1277 fmt_ip6(buf, pkt_dev->in6_saddr.s6_addr); 1278 1279 ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr); 1280 1281 if (debug) 1282 printk("pktgen: src6 set to: %s\n", buf); 1283 1284 i += len; 1285 sprintf(pg_result, "OK: src6=%s", buf); 1286 return count; 1287 } 1288 if (!strcmp(name, "src_min")) { 1289 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1); 1290 if (len < 0) { 1291 return len; 1292 } 1293 if (copy_from_user(buf, &user_buffer[i], len)) 1294 return -EFAULT; 1295 buf[len] = 0; 1296 if (strcmp(buf, pkt_dev->src_min) != 0) { 1297 memset(pkt_dev->src_min, 0, sizeof(pkt_dev->src_min)); 1298 strncpy(pkt_dev->src_min, buf, len); 1299 pkt_dev->saddr_min = in_aton(pkt_dev->src_min); 1300 pkt_dev->cur_saddr = pkt_dev->saddr_min; 1301 } 1302 if (debug) 1303 printk("pktgen: src_min set to: %s\n", 1304 pkt_dev->src_min); 1305 i += len; 1306 sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min); 1307 return count; 1308 } 1309 if (!strcmp(name, "src_max")) { 1310 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1); 1311 if (len < 0) { 1312 return len; 1313 } 1314 if (copy_from_user(buf, &user_buffer[i], len)) 1315 return -EFAULT; 1316 buf[len] = 0; 1317 if (strcmp(buf, pkt_dev->src_max) != 0) { 1318 memset(pkt_dev->src_max, 0, sizeof(pkt_dev->src_max)); 1319 strncpy(pkt_dev->src_max, buf, len); 1320 pkt_dev->saddr_max = in_aton(pkt_dev->src_max); 1321 pkt_dev->cur_saddr = pkt_dev->saddr_max; 1322 } 1323 if (debug) 1324 printk("pktgen: src_max set to: %s\n", 1325 pkt_dev->src_max); 1326 i += len; 1327 sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max); 1328 return count; 1329 } 1330 if (!strcmp(name, "dst_mac")) { 1331 char *v = valstr; 1332 unsigned char old_dmac[ETH_ALEN]; 1333 unsigned char *m = pkt_dev->dst_mac; 1334 memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN); 1335 1336 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1337 if (len < 0) { 1338 return len; 1339 } 1340 memset(valstr, 0, sizeof(valstr)); 1341 if (copy_from_user(valstr, &user_buffer[i], len)) 1342 return -EFAULT; 1343 i += len; 1344 1345 for (*m = 0; *v && m < pkt_dev->dst_mac + 6; v++) { 1346 if (*v >= '0' && *v <= '9') { 1347 *m *= 16; 1348 *m += *v - '0'; 1349 } 1350 if (*v >= 'A' && *v <= 'F') { 1351 *m *= 16; 1352 *m += *v - 'A' + 10; 1353 } 1354 if (*v >= 'a' && *v <= 'f') { 1355 *m *= 16; 1356 *m += *v - 'a' + 10; 1357 } 1358 if (*v == ':') { 1359 m++; 1360 *m = 0; 1361 } 1362 } 1363 1364 /* Set up Dest MAC */ 1365 if (compare_ether_addr(old_dmac, pkt_dev->dst_mac)) 1366 memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN); 1367 1368 sprintf(pg_result, "OK: dstmac"); 1369 return count; 1370 } 1371 if (!strcmp(name, "src_mac")) { 1372 char *v = valstr; 1373 unsigned char *m = pkt_dev->src_mac; 1374 1375 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1376 if (len < 0) { 1377 return len; 1378 } 1379 memset(valstr, 0, sizeof(valstr)); 1380 if (copy_from_user(valstr, &user_buffer[i], len)) 1381 return -EFAULT; 1382 i += len; 1383 1384 for (*m = 0; *v && m < pkt_dev->src_mac + 6; v++) { 1385 if (*v >= '0' && *v <= '9') { 1386 *m *= 16; 1387 *m += *v - '0'; 1388 } 1389 if (*v >= 'A' && *v <= 'F') { 1390 *m *= 16; 1391 *m += *v - 'A' + 10; 1392 } 1393 if (*v >= 'a' && *v <= 'f') { 1394 *m *= 16; 1395 *m += *v - 'a' + 10; 1396 } 1397 if (*v == ':') { 1398 m++; 1399 *m = 0; 1400 } 1401 } 1402 1403 sprintf(pg_result, "OK: srcmac"); 1404 return count; 1405 } 1406 1407 if (!strcmp(name, "clear_counters")) { 1408 pktgen_clear_counters(pkt_dev); 1409 sprintf(pg_result, "OK: Clearing counters.\n"); 1410 return count; 1411 } 1412 1413 if (!strcmp(name, "flows")) { 1414 len = num_arg(&user_buffer[i], 10, &value); 1415 if (len < 0) { 1416 return len; 1417 } 1418 i += len; 1419 if (value > MAX_CFLOWS) 1420 value = MAX_CFLOWS; 1421 1422 pkt_dev->cflows = value; 1423 sprintf(pg_result, "OK: flows=%u", pkt_dev->cflows); 1424 return count; 1425 } 1426 1427 if (!strcmp(name, "flowlen")) { 1428 len = num_arg(&user_buffer[i], 10, &value); 1429 if (len < 0) { 1430 return len; 1431 } 1432 i += len; 1433 pkt_dev->lflow = value; 1434 sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow); 1435 return count; 1436 } 1437 1438 if (!strcmp(name, "mpls")) { 1439 unsigned n, offset; 1440 len = get_labels(&user_buffer[i], pkt_dev); 1441 if (len < 0) { return len; } 1442 i += len; 1443 offset = sprintf(pg_result, "OK: mpls="); 1444 for(n = 0; n < pkt_dev->nr_labels; n++) 1445 offset += sprintf(pg_result + offset, 1446 "%08x%s", ntohl(pkt_dev->labels[n]), 1447 n == pkt_dev->nr_labels-1 ? "" : ","); 1448 return count; 1449 } 1450 1451 sprintf(pkt_dev->result, "No such parameter \"%s\"", name); 1452 return -EINVAL; 1453 } 1454 1455 static int pktgen_if_open(struct inode *inode, struct file *file) 1456 { 1457 return single_open(file, pktgen_if_show, PDE(inode)->data); 1458 } 1459 1460 static struct file_operations pktgen_if_fops = { 1461 .owner = THIS_MODULE, 1462 .open = pktgen_if_open, 1463 .read = seq_read, 1464 .llseek = seq_lseek, 1465 .write = pktgen_if_write, 1466 .release = single_release, 1467 }; 1468 1469 static int pktgen_thread_show(struct seq_file *seq, void *v) 1470 { 1471 struct pktgen_thread *t = seq->private; 1472 struct pktgen_dev *pkt_dev; 1473 1474 BUG_ON(!t); 1475 1476 seq_printf(seq, "Name: %s max_before_softirq: %d\n", 1477 t->name, t->max_before_softirq); 1478 1479 seq_printf(seq, "Running: "); 1480 1481 if_lock(t); 1482 list_for_each_entry(pkt_dev, &t->if_list, list) 1483 if (pkt_dev->running) 1484 seq_printf(seq, "%s ", pkt_dev->ifname); 1485 1486 seq_printf(seq, "\nStopped: "); 1487 1488 list_for_each_entry(pkt_dev, &t->if_list, list) 1489 if (!pkt_dev->running) 1490 seq_printf(seq, "%s ", pkt_dev->ifname); 1491 1492 if (t->result[0]) 1493 seq_printf(seq, "\nResult: %s\n", t->result); 1494 else 1495 seq_printf(seq, "\nResult: NA\n"); 1496 1497 if_unlock(t); 1498 1499 return 0; 1500 } 1501 1502 static ssize_t pktgen_thread_write(struct file *file, 1503 const char __user * user_buffer, 1504 size_t count, loff_t * offset) 1505 { 1506 struct seq_file *seq = (struct seq_file *)file->private_data; 1507 struct pktgen_thread *t = seq->private; 1508 int i = 0, max, len, ret; 1509 char name[40]; 1510 char *pg_result; 1511 unsigned long value = 0; 1512 1513 if (count < 1) { 1514 // sprintf(pg_result, "Wrong command format"); 1515 return -EINVAL; 1516 } 1517 1518 max = count - i; 1519 len = count_trail_chars(&user_buffer[i], max); 1520 if (len < 0) 1521 return len; 1522 1523 i += len; 1524 1525 /* Read variable name */ 1526 1527 len = strn_len(&user_buffer[i], sizeof(name) - 1); 1528 if (len < 0) 1529 return len; 1530 1531 memset(name, 0, sizeof(name)); 1532 if (copy_from_user(name, &user_buffer[i], len)) 1533 return -EFAULT; 1534 i += len; 1535 1536 max = count - i; 1537 len = count_trail_chars(&user_buffer[i], max); 1538 if (len < 0) 1539 return len; 1540 1541 i += len; 1542 1543 if (debug) 1544 printk("pktgen: t=%s, count=%lu\n", name, (unsigned long)count); 1545 1546 if (!t) { 1547 printk("pktgen: ERROR: No thread\n"); 1548 ret = -EINVAL; 1549 goto out; 1550 } 1551 1552 pg_result = &(t->result[0]); 1553 1554 if (!strcmp(name, "add_device")) { 1555 char f[32]; 1556 memset(f, 0, 32); 1557 len = strn_len(&user_buffer[i], sizeof(f) - 1); 1558 if (len < 0) { 1559 ret = len; 1560 goto out; 1561 } 1562 if (copy_from_user(f, &user_buffer[i], len)) 1563 return -EFAULT; 1564 i += len; 1565 mutex_lock(&pktgen_thread_lock); 1566 pktgen_add_device(t, f); 1567 mutex_unlock(&pktgen_thread_lock); 1568 ret = count; 1569 sprintf(pg_result, "OK: add_device=%s", f); 1570 goto out; 1571 } 1572 1573 if (!strcmp(name, "rem_device_all")) { 1574 mutex_lock(&pktgen_thread_lock); 1575 t->control |= T_REMDEVALL; 1576 mutex_unlock(&pktgen_thread_lock); 1577 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ 1578 ret = count; 1579 sprintf(pg_result, "OK: rem_device_all"); 1580 goto out; 1581 } 1582 1583 if (!strcmp(name, "max_before_softirq")) { 1584 len = num_arg(&user_buffer[i], 10, &value); 1585 mutex_lock(&pktgen_thread_lock); 1586 t->max_before_softirq = value; 1587 mutex_unlock(&pktgen_thread_lock); 1588 ret = count; 1589 sprintf(pg_result, "OK: max_before_softirq=%lu", value); 1590 goto out; 1591 } 1592 1593 ret = -EINVAL; 1594 out: 1595 return ret; 1596 } 1597 1598 static int pktgen_thread_open(struct inode *inode, struct file *file) 1599 { 1600 return single_open(file, pktgen_thread_show, PDE(inode)->data); 1601 } 1602 1603 static struct file_operations pktgen_thread_fops = { 1604 .owner = THIS_MODULE, 1605 .open = pktgen_thread_open, 1606 .read = seq_read, 1607 .llseek = seq_lseek, 1608 .write = pktgen_thread_write, 1609 .release = single_release, 1610 }; 1611 1612 /* Think find or remove for NN */ 1613 static struct pktgen_dev *__pktgen_NN_threads(const char *ifname, int remove) 1614 { 1615 struct pktgen_thread *t; 1616 struct pktgen_dev *pkt_dev = NULL; 1617 1618 list_for_each_entry(t, &pktgen_threads, th_list) { 1619 pkt_dev = pktgen_find_dev(t, ifname); 1620 if (pkt_dev) { 1621 if (remove) { 1622 if_lock(t); 1623 pkt_dev->removal_mark = 1; 1624 t->control |= T_REMDEV; 1625 if_unlock(t); 1626 } 1627 break; 1628 } 1629 } 1630 return pkt_dev; 1631 } 1632 1633 /* 1634 * mark a device for removal 1635 */ 1636 static int pktgen_mark_device(const char *ifname) 1637 { 1638 struct pktgen_dev *pkt_dev = NULL; 1639 const int max_tries = 10, msec_per_try = 125; 1640 int i = 0; 1641 int ret = 0; 1642 1643 mutex_lock(&pktgen_thread_lock); 1644 PG_DEBUG(printk("pktgen: pktgen_mark_device marking %s for removal\n", 1645 ifname)); 1646 1647 while (1) { 1648 1649 pkt_dev = __pktgen_NN_threads(ifname, REMOVE); 1650 if (pkt_dev == NULL) 1651 break; /* success */ 1652 1653 mutex_unlock(&pktgen_thread_lock); 1654 PG_DEBUG(printk("pktgen: pktgen_mark_device waiting for %s " 1655 "to disappear....\n", ifname)); 1656 schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try)); 1657 mutex_lock(&pktgen_thread_lock); 1658 1659 if (++i >= max_tries) { 1660 printk("pktgen_mark_device: timed out after waiting " 1661 "%d msec for device %s to be removed\n", 1662 msec_per_try * i, ifname); 1663 ret = 1; 1664 break; 1665 } 1666 1667 } 1668 1669 mutex_unlock(&pktgen_thread_lock); 1670 1671 return ret; 1672 } 1673 1674 static int pktgen_device_event(struct notifier_block *unused, 1675 unsigned long event, void *ptr) 1676 { 1677 struct net_device *dev = (struct net_device *)(ptr); 1678 1679 /* It is OK that we do not hold the group lock right now, 1680 * as we run under the RTNL lock. 1681 */ 1682 1683 switch (event) { 1684 case NETDEV_CHANGEADDR: 1685 case NETDEV_GOING_DOWN: 1686 case NETDEV_DOWN: 1687 case NETDEV_UP: 1688 /* Ignore for now */ 1689 break; 1690 1691 case NETDEV_UNREGISTER: 1692 pktgen_mark_device(dev->name); 1693 break; 1694 }; 1695 1696 return NOTIFY_DONE; 1697 } 1698 1699 /* Associate pktgen_dev with a device. */ 1700 1701 static struct net_device *pktgen_setup_dev(struct pktgen_dev *pkt_dev) 1702 { 1703 struct net_device *odev; 1704 1705 /* Clean old setups */ 1706 1707 if (pkt_dev->odev) { 1708 dev_put(pkt_dev->odev); 1709 pkt_dev->odev = NULL; 1710 } 1711 1712 odev = dev_get_by_name(pkt_dev->ifname); 1713 1714 if (!odev) { 1715 printk("pktgen: no such netdevice: \"%s\"\n", pkt_dev->ifname); 1716 goto out; 1717 } 1718 if (odev->type != ARPHRD_ETHER) { 1719 printk("pktgen: not an ethernet device: \"%s\"\n", 1720 pkt_dev->ifname); 1721 goto out_put; 1722 } 1723 if (!netif_running(odev)) { 1724 printk("pktgen: device is down: \"%s\"\n", pkt_dev->ifname); 1725 goto out_put; 1726 } 1727 pkt_dev->odev = odev; 1728 1729 return pkt_dev->odev; 1730 1731 out_put: 1732 dev_put(odev); 1733 out: 1734 return NULL; 1735 1736 } 1737 1738 /* Read pkt_dev from the interface and set up internal pktgen_dev 1739 * structure to have the right information to create/send packets 1740 */ 1741 static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) 1742 { 1743 /* Try once more, just in case it works now. */ 1744 if (!pkt_dev->odev) 1745 pktgen_setup_dev(pkt_dev); 1746 1747 if (!pkt_dev->odev) { 1748 printk("pktgen: ERROR: pkt_dev->odev == NULL in setup_inject.\n"); 1749 sprintf(pkt_dev->result, 1750 "ERROR: pkt_dev->odev == NULL in setup_inject.\n"); 1751 return; 1752 } 1753 1754 /* Default to the interface's mac if not explicitly set. */ 1755 1756 if (is_zero_ether_addr(pkt_dev->src_mac)) 1757 memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, ETH_ALEN); 1758 1759 /* Set up Dest MAC */ 1760 memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN); 1761 1762 /* Set up pkt size */ 1763 pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size; 1764 1765 if (pkt_dev->flags & F_IPV6) { 1766 /* 1767 * Skip this automatic address setting until locks or functions 1768 * gets exported 1769 */ 1770 1771 #ifdef NOTNOW 1772 int i, set = 0, err = 1; 1773 struct inet6_dev *idev; 1774 1775 for (i = 0; i < IN6_ADDR_HSIZE; i++) 1776 if (pkt_dev->cur_in6_saddr.s6_addr[i]) { 1777 set = 1; 1778 break; 1779 } 1780 1781 if (!set) { 1782 1783 /* 1784 * Use linklevel address if unconfigured. 1785 * 1786 * use ipv6_get_lladdr if/when it's get exported 1787 */ 1788 1789 read_lock(&addrconf_lock); 1790 if ((idev = __in6_dev_get(pkt_dev->odev)) != NULL) { 1791 struct inet6_ifaddr *ifp; 1792 1793 read_lock_bh(&idev->lock); 1794 for (ifp = idev->addr_list; ifp; 1795 ifp = ifp->if_next) { 1796 if (ifp->scope == IFA_LINK 1797 && !(ifp-> 1798 flags & IFA_F_TENTATIVE)) { 1799 ipv6_addr_copy(&pkt_dev-> 1800 cur_in6_saddr, 1801 &ifp->addr); 1802 err = 0; 1803 break; 1804 } 1805 } 1806 read_unlock_bh(&idev->lock); 1807 } 1808 read_unlock(&addrconf_lock); 1809 if (err) 1810 printk("pktgen: ERROR: IPv6 link address not availble.\n"); 1811 } 1812 #endif 1813 } else { 1814 pkt_dev->saddr_min = 0; 1815 pkt_dev->saddr_max = 0; 1816 if (strlen(pkt_dev->src_min) == 0) { 1817 1818 struct in_device *in_dev; 1819 1820 rcu_read_lock(); 1821 in_dev = __in_dev_get_rcu(pkt_dev->odev); 1822 if (in_dev) { 1823 if (in_dev->ifa_list) { 1824 pkt_dev->saddr_min = 1825 in_dev->ifa_list->ifa_address; 1826 pkt_dev->saddr_max = pkt_dev->saddr_min; 1827 } 1828 } 1829 rcu_read_unlock(); 1830 } else { 1831 pkt_dev->saddr_min = in_aton(pkt_dev->src_min); 1832 pkt_dev->saddr_max = in_aton(pkt_dev->src_max); 1833 } 1834 1835 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); 1836 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); 1837 } 1838 /* Initialize current values. */ 1839 pkt_dev->cur_dst_mac_offset = 0; 1840 pkt_dev->cur_src_mac_offset = 0; 1841 pkt_dev->cur_saddr = pkt_dev->saddr_min; 1842 pkt_dev->cur_daddr = pkt_dev->daddr_min; 1843 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min; 1844 pkt_dev->cur_udp_src = pkt_dev->udp_src_min; 1845 pkt_dev->nflows = 0; 1846 } 1847 1848 static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us) 1849 { 1850 __u64 start; 1851 __u64 now; 1852 1853 start = now = getCurUs(); 1854 printk(KERN_INFO "sleeping for %d\n", (int)(spin_until_us - now)); 1855 while (now < spin_until_us) { 1856 /* TODO: optimize sleeping behavior */ 1857 if (spin_until_us - now > jiffies_to_usecs(1) + 1) 1858 schedule_timeout_interruptible(1); 1859 else if (spin_until_us - now > 100) { 1860 do_softirq(); 1861 if (!pkt_dev->running) 1862 return; 1863 if (need_resched()) 1864 schedule(); 1865 } 1866 1867 now = getCurUs(); 1868 } 1869 1870 pkt_dev->idle_acc += now - start; 1871 } 1872 1873 /* Increment/randomize headers according to flags and current values 1874 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst 1875 */ 1876 static void mod_cur_headers(struct pktgen_dev *pkt_dev) 1877 { 1878 __u32 imn; 1879 __u32 imx; 1880 int flow = 0; 1881 1882 if (pkt_dev->cflows) { 1883 flow = pktgen_random() % pkt_dev->cflows; 1884 1885 if (pkt_dev->flows[flow].count > pkt_dev->lflow) 1886 pkt_dev->flows[flow].count = 0; 1887 } 1888 1889 /* Deal with source MAC */ 1890 if (pkt_dev->src_mac_count > 1) { 1891 __u32 mc; 1892 __u32 tmp; 1893 1894 if (pkt_dev->flags & F_MACSRC_RND) 1895 mc = pktgen_random() % (pkt_dev->src_mac_count); 1896 else { 1897 mc = pkt_dev->cur_src_mac_offset++; 1898 if (pkt_dev->cur_src_mac_offset > 1899 pkt_dev->src_mac_count) 1900 pkt_dev->cur_src_mac_offset = 0; 1901 } 1902 1903 tmp = pkt_dev->src_mac[5] + (mc & 0xFF); 1904 pkt_dev->hh[11] = tmp; 1905 tmp = (pkt_dev->src_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8)); 1906 pkt_dev->hh[10] = tmp; 1907 tmp = (pkt_dev->src_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8)); 1908 pkt_dev->hh[9] = tmp; 1909 tmp = (pkt_dev->src_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8)); 1910 pkt_dev->hh[8] = tmp; 1911 tmp = (pkt_dev->src_mac[1] + (tmp >> 8)); 1912 pkt_dev->hh[7] = tmp; 1913 } 1914 1915 /* Deal with Destination MAC */ 1916 if (pkt_dev->dst_mac_count > 1) { 1917 __u32 mc; 1918 __u32 tmp; 1919 1920 if (pkt_dev->flags & F_MACDST_RND) 1921 mc = pktgen_random() % (pkt_dev->dst_mac_count); 1922 1923 else { 1924 mc = pkt_dev->cur_dst_mac_offset++; 1925 if (pkt_dev->cur_dst_mac_offset > 1926 pkt_dev->dst_mac_count) { 1927 pkt_dev->cur_dst_mac_offset = 0; 1928 } 1929 } 1930 1931 tmp = pkt_dev->dst_mac[5] + (mc & 0xFF); 1932 pkt_dev->hh[5] = tmp; 1933 tmp = (pkt_dev->dst_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8)); 1934 pkt_dev->hh[4] = tmp; 1935 tmp = (pkt_dev->dst_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8)); 1936 pkt_dev->hh[3] = tmp; 1937 tmp = (pkt_dev->dst_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8)); 1938 pkt_dev->hh[2] = tmp; 1939 tmp = (pkt_dev->dst_mac[1] + (tmp >> 8)); 1940 pkt_dev->hh[1] = tmp; 1941 } 1942 1943 if (pkt_dev->flags & F_MPLS_RND) { 1944 unsigned i; 1945 for(i = 0; i < pkt_dev->nr_labels; i++) 1946 if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) 1947 pkt_dev->labels[i] = MPLS_STACK_BOTTOM | 1948 (pktgen_random() & 1949 htonl(0x000fffff)); 1950 } 1951 1952 if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) { 1953 if (pkt_dev->flags & F_UDPSRC_RND) 1954 pkt_dev->cur_udp_src = 1955 ((pktgen_random() % 1956 (pkt_dev->udp_src_max - pkt_dev->udp_src_min)) + 1957 pkt_dev->udp_src_min); 1958 1959 else { 1960 pkt_dev->cur_udp_src++; 1961 if (pkt_dev->cur_udp_src >= pkt_dev->udp_src_max) 1962 pkt_dev->cur_udp_src = pkt_dev->udp_src_min; 1963 } 1964 } 1965 1966 if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) { 1967 if (pkt_dev->flags & F_UDPDST_RND) { 1968 pkt_dev->cur_udp_dst = 1969 ((pktgen_random() % 1970 (pkt_dev->udp_dst_max - pkt_dev->udp_dst_min)) + 1971 pkt_dev->udp_dst_min); 1972 } else { 1973 pkt_dev->cur_udp_dst++; 1974 if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max) 1975 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min; 1976 } 1977 } 1978 1979 if (!(pkt_dev->flags & F_IPV6)) { 1980 1981 if ((imn = ntohl(pkt_dev->saddr_min)) < (imx = 1982 ntohl(pkt_dev-> 1983 saddr_max))) { 1984 __u32 t; 1985 if (pkt_dev->flags & F_IPSRC_RND) 1986 t = ((pktgen_random() % (imx - imn)) + imn); 1987 else { 1988 t = ntohl(pkt_dev->cur_saddr); 1989 t++; 1990 if (t > imx) { 1991 t = imn; 1992 } 1993 } 1994 pkt_dev->cur_saddr = htonl(t); 1995 } 1996 1997 if (pkt_dev->cflows && pkt_dev->flows[flow].count != 0) { 1998 pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr; 1999 } else { 2000 2001 if ((imn = ntohl(pkt_dev->daddr_min)) < (imx = 2002 ntohl(pkt_dev-> 2003 daddr_max))) 2004 { 2005 __u32 t; 2006 if (pkt_dev->flags & F_IPDST_RND) { 2007 2008 t = ((pktgen_random() % (imx - imn)) + 2009 imn); 2010 t = htonl(t); 2011 2012 while (LOOPBACK(t) || MULTICAST(t) 2013 || BADCLASS(t) || ZERONET(t) 2014 || LOCAL_MCAST(t)) { 2015 t = ((pktgen_random() % 2016 (imx - imn)) + imn); 2017 t = htonl(t); 2018 } 2019 pkt_dev->cur_daddr = t; 2020 } 2021 2022 else { 2023 t = ntohl(pkt_dev->cur_daddr); 2024 t++; 2025 if (t > imx) { 2026 t = imn; 2027 } 2028 pkt_dev->cur_daddr = htonl(t); 2029 } 2030 } 2031 if (pkt_dev->cflows) { 2032 pkt_dev->flows[flow].cur_daddr = 2033 pkt_dev->cur_daddr; 2034 pkt_dev->nflows++; 2035 } 2036 } 2037 } else { /* IPV6 * */ 2038 2039 if (pkt_dev->min_in6_daddr.s6_addr32[0] == 0 && 2040 pkt_dev->min_in6_daddr.s6_addr32[1] == 0 && 2041 pkt_dev->min_in6_daddr.s6_addr32[2] == 0 && 2042 pkt_dev->min_in6_daddr.s6_addr32[3] == 0) ; 2043 else { 2044 int i; 2045 2046 /* Only random destinations yet */ 2047 2048 for (i = 0; i < 4; i++) { 2049 pkt_dev->cur_in6_daddr.s6_addr32[i] = 2050 ((pktgen_random() | 2051 pkt_dev->min_in6_daddr.s6_addr32[i]) & 2052 pkt_dev->max_in6_daddr.s6_addr32[i]); 2053 } 2054 } 2055 } 2056 2057 if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) { 2058 __u32 t; 2059 if (pkt_dev->flags & F_TXSIZE_RND) { 2060 t = ((pktgen_random() % 2061 (pkt_dev->max_pkt_size - pkt_dev->min_pkt_size)) 2062 + pkt_dev->min_pkt_size); 2063 } else { 2064 t = pkt_dev->cur_pkt_size + 1; 2065 if (t > pkt_dev->max_pkt_size) 2066 t = pkt_dev->min_pkt_size; 2067 } 2068 pkt_dev->cur_pkt_size = t; 2069 } 2070 2071 pkt_dev->flows[flow].count++; 2072 } 2073 2074 static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) 2075 { 2076 unsigned i; 2077 for(i = 0; i < pkt_dev->nr_labels; i++) { 2078 *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; 2079 } 2080 mpls--; 2081 *mpls |= MPLS_STACK_BOTTOM; 2082 } 2083 2084 static struct sk_buff *fill_packet_ipv4(struct net_device *odev, 2085 struct pktgen_dev *pkt_dev) 2086 { 2087 struct sk_buff *skb = NULL; 2088 __u8 *eth; 2089 struct udphdr *udph; 2090 int datalen, iplen; 2091 struct iphdr *iph; 2092 struct pktgen_hdr *pgh = NULL; 2093 __be16 protocol = __constant_htons(ETH_P_IP); 2094 __be32 *mpls; 2095 2096 if (pkt_dev->nr_labels) 2097 protocol = __constant_htons(ETH_P_MPLS_UC); 2098 2099 /* Update any of the values, used when we're incrementing various 2100 * fields. 2101 */ 2102 mod_cur_headers(pkt_dev); 2103 2104 datalen = (odev->hard_header_len + 16) & ~0xf; 2105 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen + 2106 pkt_dev->nr_labels*sizeof(u32), GFP_ATOMIC); 2107 if (!skb) { 2108 sprintf(pkt_dev->result, "No memory"); 2109 return NULL; 2110 } 2111 2112 skb_reserve(skb, datalen); 2113 2114 /* Reserve for ethernet and IP header */ 2115 eth = (__u8 *) skb_push(skb, 14); 2116 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); 2117 if (pkt_dev->nr_labels) 2118 mpls_push(mpls, pkt_dev); 2119 iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr)); 2120 udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); 2121 2122 memcpy(eth, pkt_dev->hh, 12); 2123 *(u16 *) & eth[12] = protocol; 2124 2125 /* Eth + IPh + UDPh + mpls */ 2126 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - 2127 pkt_dev->nr_labels*sizeof(u32); 2128 if (datalen < sizeof(struct pktgen_hdr)) 2129 datalen = sizeof(struct pktgen_hdr); 2130 2131 udph->source = htons(pkt_dev->cur_udp_src); 2132 udph->dest = htons(pkt_dev->cur_udp_dst); 2133 udph->len = htons(datalen + 8); /* DATA + udphdr */ 2134 udph->check = 0; /* No checksum */ 2135 2136 iph->ihl = 5; 2137 iph->version = 4; 2138 iph->ttl = 32; 2139 iph->tos = 0; 2140 iph->protocol = IPPROTO_UDP; /* UDP */ 2141 iph->saddr = pkt_dev->cur_saddr; 2142 iph->daddr = pkt_dev->cur_daddr; 2143 iph->frag_off = 0; 2144 iplen = 20 + 8 + datalen; 2145 iph->tot_len = htons(iplen); 2146 iph->check = 0; 2147 iph->check = ip_fast_csum((void *)iph, iph->ihl); 2148 skb->protocol = protocol; 2149 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); 2150 skb->dev = odev; 2151 skb->pkt_type = PACKET_HOST; 2152 skb->nh.iph = iph; 2153 skb->h.uh = udph; 2154 2155 if (pkt_dev->nfrags <= 0) 2156 pgh = (struct pktgen_hdr *)skb_put(skb, datalen); 2157 else { 2158 int frags = pkt_dev->nfrags; 2159 int i; 2160 2161 pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8); 2162 2163 if (frags > MAX_SKB_FRAGS) 2164 frags = MAX_SKB_FRAGS; 2165 if (datalen > frags * PAGE_SIZE) { 2166 skb_put(skb, datalen - frags * PAGE_SIZE); 2167 datalen = frags * PAGE_SIZE; 2168 } 2169 2170 i = 0; 2171 while (datalen > 0) { 2172 struct page *page = alloc_pages(GFP_KERNEL, 0); 2173 skb_shinfo(skb)->frags[i].page = page; 2174 skb_shinfo(skb)->frags[i].page_offset = 0; 2175 skb_shinfo(skb)->frags[i].size = 2176 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); 2177 datalen -= skb_shinfo(skb)->frags[i].size; 2178 skb->len += skb_shinfo(skb)->frags[i].size; 2179 skb->data_len += skb_shinfo(skb)->frags[i].size; 2180 i++; 2181 skb_shinfo(skb)->nr_frags = i; 2182 } 2183 2184 while (i < frags) { 2185 int rem; 2186 2187 if (i == 0) 2188 break; 2189 2190 rem = skb_shinfo(skb)->frags[i - 1].size / 2; 2191 if (rem == 0) 2192 break; 2193 2194 skb_shinfo(skb)->frags[i - 1].size -= rem; 2195 2196 skb_shinfo(skb)->frags[i] = 2197 skb_shinfo(skb)->frags[i - 1]; 2198 get_page(skb_shinfo(skb)->frags[i].page); 2199 skb_shinfo(skb)->frags[i].page = 2200 skb_shinfo(skb)->frags[i - 1].page; 2201 skb_shinfo(skb)->frags[i].page_offset += 2202 skb_shinfo(skb)->frags[i - 1].size; 2203 skb_shinfo(skb)->frags[i].size = rem; 2204 i++; 2205 skb_shinfo(skb)->nr_frags = i; 2206 } 2207 } 2208 2209 /* Stamp the time, and sequence number, convert them to network byte order */ 2210 2211 if (pgh) { 2212 struct timeval timestamp; 2213 2214 pgh->pgh_magic = htonl(PKTGEN_MAGIC); 2215 pgh->seq_num = htonl(pkt_dev->seq_num); 2216 2217 do_gettimeofday(×tamp); 2218 pgh->tv_sec = htonl(timestamp.tv_sec); 2219 pgh->tv_usec = htonl(timestamp.tv_usec); 2220 } 2221 pkt_dev->seq_num++; 2222 2223 return skb; 2224 } 2225 2226 /* 2227 * scan_ip6, fmt_ip taken from dietlibc-0.21 2228 * Author Felix von Leitner <felix-dietlibc@fefe.de> 2229 * 2230 * Slightly modified for kernel. 2231 * Should be candidate for net/ipv4/utils.c 2232 * --ro 2233 */ 2234 2235 static unsigned int scan_ip6(const char *s, char ip[16]) 2236 { 2237 unsigned int i; 2238 unsigned int len = 0; 2239 unsigned long u; 2240 char suffix[16]; 2241 unsigned int prefixlen = 0; 2242 unsigned int suffixlen = 0; 2243 __u32 tmp; 2244 2245 for (i = 0; i < 16; i++) 2246 ip[i] = 0; 2247 2248 for (;;) { 2249 if (*s == ':') { 2250 len++; 2251 if (s[1] == ':') { /* Found "::", skip to part 2 */ 2252 s += 2; 2253 len++; 2254 break; 2255 } 2256 s++; 2257 } 2258 { 2259 char *tmp; 2260 u = simple_strtoul(s, &tmp, 16); 2261 i = tmp - s; 2262 } 2263 2264 if (!i) 2265 return 0; 2266 if (prefixlen == 12 && s[i] == '.') { 2267 2268 /* the last 4 bytes may be written as IPv4 address */ 2269 2270 tmp = in_aton(s); 2271 memcpy((struct in_addr *)(ip + 12), &tmp, sizeof(tmp)); 2272 return i + len; 2273 } 2274 ip[prefixlen++] = (u >> 8); 2275 ip[prefixlen++] = (u & 255); 2276 s += i; 2277 len += i; 2278 if (prefixlen == 16) 2279 return len; 2280 } 2281 2282 /* part 2, after "::" */ 2283 for (;;) { 2284 if (*s == ':') { 2285 if (suffixlen == 0) 2286 break; 2287 s++; 2288 len++; 2289 } else if (suffixlen != 0) 2290 break; 2291 { 2292 char *tmp; 2293 u = simple_strtol(s, &tmp, 16); 2294 i = tmp - s; 2295 } 2296 if (!i) { 2297 if (*s) 2298 len--; 2299 break; 2300 } 2301 if (suffixlen + prefixlen <= 12 && s[i] == '.') { 2302 tmp = in_aton(s); 2303 memcpy((struct in_addr *)(suffix + suffixlen), &tmp, 2304 sizeof(tmp)); 2305 suffixlen += 4; 2306 len += strlen(s); 2307 break; 2308 } 2309 suffix[suffixlen++] = (u >> 8); 2310 suffix[suffixlen++] = (u & 255); 2311 s += i; 2312 len += i; 2313 if (prefixlen + suffixlen == 16) 2314 break; 2315 } 2316 for (i = 0; i < suffixlen; i++) 2317 ip[16 - suffixlen + i] = suffix[i]; 2318 return len; 2319 } 2320 2321 static char tohex(char hexdigit) 2322 { 2323 return hexdigit > 9 ? hexdigit + 'a' - 10 : hexdigit + '0'; 2324 } 2325 2326 static int fmt_xlong(char *s, unsigned int i) 2327 { 2328 char *bak = s; 2329 *s = tohex((i >> 12) & 0xf); 2330 if (s != bak || *s != '0') 2331 ++s; 2332 *s = tohex((i >> 8) & 0xf); 2333 if (s != bak || *s != '0') 2334 ++s; 2335 *s = tohex((i >> 4) & 0xf); 2336 if (s != bak || *s != '0') 2337 ++s; 2338 *s = tohex(i & 0xf); 2339 return s - bak + 1; 2340 } 2341 2342 static unsigned int fmt_ip6(char *s, const char ip[16]) 2343 { 2344 unsigned int len; 2345 unsigned int i; 2346 unsigned int temp; 2347 unsigned int compressing; 2348 int j; 2349 2350 len = 0; 2351 compressing = 0; 2352 for (j = 0; j < 16; j += 2) { 2353 2354 #ifdef V4MAPPEDPREFIX 2355 if (j == 12 && !memcmp(ip, V4mappedprefix, 12)) { 2356 inet_ntoa_r(*(struct in_addr *)(ip + 12), s); 2357 temp = strlen(s); 2358 return len + temp; 2359 } 2360 #endif 2361 temp = ((unsigned long)(unsigned char)ip[j] << 8) + 2362 (unsigned long)(unsigned char)ip[j + 1]; 2363 if (temp == 0) { 2364 if (!compressing) { 2365 compressing = 1; 2366 if (j == 0) { 2367 *s++ = ':'; 2368 ++len; 2369 } 2370 } 2371 } else { 2372 if (compressing) { 2373 compressing = 0; 2374 *s++ = ':'; 2375 ++len; 2376 } 2377 i = fmt_xlong(s, temp); 2378 len += i; 2379 s += i; 2380 if (j < 14) { 2381 *s++ = ':'; 2382 ++len; 2383 } 2384 } 2385 } 2386 if (compressing) { 2387 *s++ = ':'; 2388 ++len; 2389 } 2390 *s = 0; 2391 return len; 2392 } 2393 2394 static struct sk_buff *fill_packet_ipv6(struct net_device *odev, 2395 struct pktgen_dev *pkt_dev) 2396 { 2397 struct sk_buff *skb = NULL; 2398 __u8 *eth; 2399 struct udphdr *udph; 2400 int datalen; 2401 struct ipv6hdr *iph; 2402 struct pktgen_hdr *pgh = NULL; 2403 __be16 protocol = __constant_htons(ETH_P_IPV6); 2404 __be32 *mpls; 2405 2406 if (pkt_dev->nr_labels) 2407 protocol = __constant_htons(ETH_P_MPLS_UC); 2408 2409 /* Update any of the values, used when we're incrementing various 2410 * fields. 2411 */ 2412 mod_cur_headers(pkt_dev); 2413 2414 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + 2415 pkt_dev->nr_labels*sizeof(u32), GFP_ATOMIC); 2416 if (!skb) { 2417 sprintf(pkt_dev->result, "No memory"); 2418 return NULL; 2419 } 2420 2421 skb_reserve(skb, 16); 2422 2423 /* Reserve for ethernet and IP header */ 2424 eth = (__u8 *) skb_push(skb, 14); 2425 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); 2426 if (pkt_dev->nr_labels) 2427 mpls_push(mpls, pkt_dev); 2428 iph = (struct ipv6hdr *)skb_put(skb, sizeof(struct ipv6hdr)); 2429 udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); 2430 2431 memcpy(eth, pkt_dev->hh, 12); 2432 *(u16 *) & eth[12] = __constant_htons(ETH_P_IPV6); 2433 2434 /* Eth + IPh + UDPh + mpls */ 2435 datalen = pkt_dev->cur_pkt_size - 14 - 2436 sizeof(struct ipv6hdr) - sizeof(struct udphdr) - 2437 pkt_dev->nr_labels*sizeof(u32); 2438 2439 if (datalen < sizeof(struct pktgen_hdr)) { 2440 datalen = sizeof(struct pktgen_hdr); 2441 if (net_ratelimit()) 2442 printk(KERN_INFO "pktgen: increased datalen to %d\n", 2443 datalen); 2444 } 2445 2446 udph->source = htons(pkt_dev->cur_udp_src); 2447 udph->dest = htons(pkt_dev->cur_udp_dst); 2448 udph->len = htons(datalen + sizeof(struct udphdr)); 2449 udph->check = 0; /* No checksum */ 2450 2451 *(u32 *) iph = __constant_htonl(0x60000000); /* Version + flow */ 2452 2453 iph->hop_limit = 32; 2454 2455 iph->payload_len = htons(sizeof(struct udphdr) + datalen); 2456 iph->nexthdr = IPPROTO_UDP; 2457 2458 ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr); 2459 ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr); 2460 2461 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); 2462 skb->protocol = protocol; 2463 skb->dev = odev; 2464 skb->pkt_type = PACKET_HOST; 2465 skb->nh.ipv6h = iph; 2466 skb->h.uh = udph; 2467 2468 if (pkt_dev->nfrags <= 0) 2469 pgh = (struct pktgen_hdr *)skb_put(skb, datalen); 2470 else { 2471 int frags = pkt_dev->nfrags; 2472 int i; 2473 2474 pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8); 2475 2476 if (frags > MAX_SKB_FRAGS) 2477 frags = MAX_SKB_FRAGS; 2478 if (datalen > frags * PAGE_SIZE) { 2479 skb_put(skb, datalen - frags * PAGE_SIZE); 2480 datalen = frags * PAGE_SIZE; 2481 } 2482 2483 i = 0; 2484 while (datalen > 0) { 2485 struct page *page = alloc_pages(GFP_KERNEL, 0); 2486 skb_shinfo(skb)->frags[i].page = page; 2487 skb_shinfo(skb)->frags[i].page_offset = 0; 2488 skb_shinfo(skb)->frags[i].size = 2489 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); 2490 datalen -= skb_shinfo(skb)->frags[i].size; 2491 skb->len += skb_shinfo(skb)->frags[i].size; 2492 skb->data_len += skb_shinfo(skb)->frags[i].size; 2493 i++; 2494 skb_shinfo(skb)->nr_frags = i; 2495 } 2496 2497 while (i < frags) { 2498 int rem; 2499 2500 if (i == 0) 2501 break; 2502 2503 rem = skb_shinfo(skb)->frags[i - 1].size / 2; 2504 if (rem == 0) 2505 break; 2506 2507 skb_shinfo(skb)->frags[i - 1].size -= rem; 2508 2509 skb_shinfo(skb)->frags[i] = 2510 skb_shinfo(skb)->frags[i - 1]; 2511 get_page(skb_shinfo(skb)->frags[i].page); 2512 skb_shinfo(skb)->frags[i].page = 2513 skb_shinfo(skb)->frags[i - 1].page; 2514 skb_shinfo(skb)->frags[i].page_offset += 2515 skb_shinfo(skb)->frags[i - 1].size; 2516 skb_shinfo(skb)->frags[i].size = rem; 2517 i++; 2518 skb_shinfo(skb)->nr_frags = i; 2519 } 2520 } 2521 2522 /* Stamp the time, and sequence number, convert them to network byte order */ 2523 /* should we update cloned packets too ? */ 2524 if (pgh) { 2525 struct timeval timestamp; 2526 2527 pgh->pgh_magic = htonl(PKTGEN_MAGIC); 2528 pgh->seq_num = htonl(pkt_dev->seq_num); 2529 2530 do_gettimeofday(×tamp); 2531 pgh->tv_sec = htonl(timestamp.tv_sec); 2532 pgh->tv_usec = htonl(timestamp.tv_usec); 2533 } 2534 pkt_dev->seq_num++; 2535 2536 return skb; 2537 } 2538 2539 static inline struct sk_buff *fill_packet(struct net_device *odev, 2540 struct pktgen_dev *pkt_dev) 2541 { 2542 if (pkt_dev->flags & F_IPV6) 2543 return fill_packet_ipv6(odev, pkt_dev); 2544 else 2545 return fill_packet_ipv4(odev, pkt_dev); 2546 } 2547 2548 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev) 2549 { 2550 pkt_dev->seq_num = 1; 2551 pkt_dev->idle_acc = 0; 2552 pkt_dev->sofar = 0; 2553 pkt_dev->tx_bytes = 0; 2554 pkt_dev->errors = 0; 2555 } 2556 2557 /* Set up structure for sending pkts, clear counters */ 2558 2559 static void pktgen_run(struct pktgen_thread *t) 2560 { 2561 struct pktgen_dev *pkt_dev; 2562 int started = 0; 2563 2564 PG_DEBUG(printk("pktgen: entering pktgen_run. %p\n", t)); 2565 2566 if_lock(t); 2567 list_for_each_entry(pkt_dev, &t->if_list, list) { 2568 2569 /* 2570 * setup odev and create initial packet. 2571 */ 2572 pktgen_setup_inject(pkt_dev); 2573 2574 if (pkt_dev->odev) { 2575 pktgen_clear_counters(pkt_dev); 2576 pkt_dev->running = 1; /* Cranke yeself! */ 2577 pkt_dev->skb = NULL; 2578 pkt_dev->started_at = getCurUs(); 2579 pkt_dev->next_tx_us = getCurUs(); /* Transmit immediately */ 2580 pkt_dev->next_tx_ns = 0; 2581 2582 strcpy(pkt_dev->result, "Starting"); 2583 started++; 2584 } else 2585 strcpy(pkt_dev->result, "Error starting"); 2586 } 2587 if_unlock(t); 2588 if (started) 2589 t->control &= ~(T_STOP); 2590 } 2591 2592 static void pktgen_stop_all_threads_ifs(void) 2593 { 2594 struct pktgen_thread *t; 2595 2596 PG_DEBUG(printk("pktgen: entering pktgen_stop_all_threads_ifs.\n")); 2597 2598 mutex_lock(&pktgen_thread_lock); 2599 2600 list_for_each_entry(t, &pktgen_threads, th_list) 2601 t->control |= T_STOP; 2602 2603 mutex_unlock(&pktgen_thread_lock); 2604 } 2605 2606 static int thread_is_running(struct pktgen_thread *t) 2607 { 2608 struct pktgen_dev *pkt_dev; 2609 int res = 0; 2610 2611 list_for_each_entry(pkt_dev, &t->if_list, list) 2612 if (pkt_dev->running) { 2613 res = 1; 2614 break; 2615 } 2616 return res; 2617 } 2618 2619 static int pktgen_wait_thread_run(struct pktgen_thread *t) 2620 { 2621 if_lock(t); 2622 2623 while (thread_is_running(t)) { 2624 2625 if_unlock(t); 2626 2627 msleep_interruptible(100); 2628 2629 if (signal_pending(current)) 2630 goto signal; 2631 if_lock(t); 2632 } 2633 if_unlock(t); 2634 return 1; 2635 signal: 2636 return 0; 2637 } 2638 2639 static int pktgen_wait_all_threads_run(void) 2640 { 2641 struct pktgen_thread *t; 2642 int sig = 1; 2643 2644 mutex_lock(&pktgen_thread_lock); 2645 2646 list_for_each_entry(t, &pktgen_threads, th_list) { 2647 sig = pktgen_wait_thread_run(t); 2648 if (sig == 0) 2649 break; 2650 } 2651 2652 if (sig == 0) 2653 list_for_each_entry(t, &pktgen_threads, th_list) 2654 t->control |= (T_STOP); 2655 2656 mutex_unlock(&pktgen_thread_lock); 2657 return sig; 2658 } 2659 2660 static void pktgen_run_all_threads(void) 2661 { 2662 struct pktgen_thread *t; 2663 2664 PG_DEBUG(printk("pktgen: entering pktgen_run_all_threads.\n")); 2665 2666 mutex_lock(&pktgen_thread_lock); 2667 2668 list_for_each_entry(t, &pktgen_threads, th_list) 2669 t->control |= (T_RUN); 2670 2671 mutex_unlock(&pktgen_thread_lock); 2672 2673 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ 2674 2675 pktgen_wait_all_threads_run(); 2676 } 2677 2678 static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) 2679 { 2680 __u64 total_us, bps, mbps, pps, idle; 2681 char *p = pkt_dev->result; 2682 2683 total_us = pkt_dev->stopped_at - pkt_dev->started_at; 2684 2685 idle = pkt_dev->idle_acc; 2686 2687 p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n", 2688 (unsigned long long)total_us, 2689 (unsigned long long)(total_us - idle), 2690 (unsigned long long)idle, 2691 (unsigned long long)pkt_dev->sofar, 2692 pkt_dev->cur_pkt_size, nr_frags); 2693 2694 pps = pkt_dev->sofar * USEC_PER_SEC; 2695 2696 while ((total_us >> 32) != 0) { 2697 pps >>= 1; 2698 total_us >>= 1; 2699 } 2700 2701 do_div(pps, total_us); 2702 2703 bps = pps * 8 * pkt_dev->cur_pkt_size; 2704 2705 mbps = bps; 2706 do_div(mbps, 1000000); 2707 p += sprintf(p, " %llupps %lluMb/sec (%llubps) errors: %llu", 2708 (unsigned long long)pps, 2709 (unsigned long long)mbps, 2710 (unsigned long long)bps, 2711 (unsigned long long)pkt_dev->errors); 2712 } 2713 2714 /* Set stopped-at timer, remove from running list, do counters & statistics */ 2715 2716 static int pktgen_stop_device(struct pktgen_dev *pkt_dev) 2717 { 2718 int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; 2719 2720 if (!pkt_dev->running) { 2721 printk("pktgen: interface: %s is already stopped\n", 2722 pkt_dev->ifname); 2723 return -EINVAL; 2724 } 2725 2726 pkt_dev->stopped_at = getCurUs(); 2727 pkt_dev->running = 0; 2728 2729 show_results(pkt_dev, nr_frags); 2730 2731 return 0; 2732 } 2733 2734 static struct pktgen_dev *next_to_run(struct pktgen_thread *t) 2735 { 2736 struct pktgen_dev *pkt_dev, *best = NULL; 2737 2738 if_lock(t); 2739 2740 list_for_each_entry(pkt_dev, &t->if_list, list) { 2741 if (!pkt_dev->running) 2742 continue; 2743 if (best == NULL) 2744 best = pkt_dev; 2745 else if (pkt_dev->next_tx_us < best->next_tx_us) 2746 best = pkt_dev; 2747 } 2748 if_unlock(t); 2749 return best; 2750 } 2751 2752 static void pktgen_stop(struct pktgen_thread *t) 2753 { 2754 struct pktgen_dev *pkt_dev; 2755 2756 PG_DEBUG(printk("pktgen: entering pktgen_stop\n")); 2757 2758 if_lock(t); 2759 2760 list_for_each_entry(pkt_dev, &t->if_list, list) { 2761 pktgen_stop_device(pkt_dev); 2762 if (pkt_dev->skb) 2763 kfree_skb(pkt_dev->skb); 2764 2765 pkt_dev->skb = NULL; 2766 } 2767 2768 if_unlock(t); 2769 } 2770 2771 /* 2772 * one of our devices needs to be removed - find it 2773 * and remove it 2774 */ 2775 static void pktgen_rem_one_if(struct pktgen_thread *t) 2776 { 2777 struct list_head *q, *n; 2778 struct pktgen_dev *cur; 2779 2780 PG_DEBUG(printk("pktgen: entering pktgen_rem_one_if\n")); 2781 2782 if_lock(t); 2783 2784 list_for_each_safe(q, n, &t->if_list) { 2785 cur = list_entry(q, struct pktgen_dev, list); 2786 2787 if (!cur->removal_mark) 2788 continue; 2789 2790 if (cur->skb) 2791 kfree_skb(cur->skb); 2792 cur->skb = NULL; 2793 2794 pktgen_remove_device(t, cur); 2795 2796 break; 2797 } 2798 2799 if_unlock(t); 2800 } 2801 2802 static void pktgen_rem_all_ifs(struct pktgen_thread *t) 2803 { 2804 struct list_head *q, *n; 2805 struct pktgen_dev *cur; 2806 2807 /* Remove all devices, free mem */ 2808 2809 PG_DEBUG(printk("pktgen: entering pktgen_rem_all_ifs\n")); 2810 if_lock(t); 2811 2812 list_for_each_safe(q, n, &t->if_list) { 2813 cur = list_entry(q, struct pktgen_dev, list); 2814 2815 if (cur->skb) 2816 kfree_skb(cur->skb); 2817 cur->skb = NULL; 2818 2819 pktgen_remove_device(t, cur); 2820 } 2821 2822 if_unlock(t); 2823 } 2824 2825 static void pktgen_rem_thread(struct pktgen_thread *t) 2826 { 2827 /* Remove from the thread list */ 2828 2829 remove_proc_entry(t->name, pg_proc_dir); 2830 2831 mutex_lock(&pktgen_thread_lock); 2832 2833 list_del(&t->th_list); 2834 2835 mutex_unlock(&pktgen_thread_lock); 2836 } 2837 2838 static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) 2839 { 2840 struct net_device *odev = NULL; 2841 __u64 idle_start = 0; 2842 int ret; 2843 2844 odev = pkt_dev->odev; 2845 2846 if (pkt_dev->delay_us || pkt_dev->delay_ns) { 2847 u64 now; 2848 2849 now = getCurUs(); 2850 if (now < pkt_dev->next_tx_us) 2851 spin(pkt_dev, pkt_dev->next_tx_us); 2852 2853 /* This is max DELAY, this has special meaning of 2854 * "never transmit" 2855 */ 2856 if (pkt_dev->delay_us == 0x7FFFFFFF) { 2857 pkt_dev->next_tx_us = getCurUs() + pkt_dev->delay_us; 2858 pkt_dev->next_tx_ns = pkt_dev->delay_ns; 2859 goto out; 2860 } 2861 } 2862 2863 if (netif_queue_stopped(odev) || need_resched()) { 2864 idle_start = getCurUs(); 2865 2866 if (!netif_running(odev)) { 2867 pktgen_stop_device(pkt_dev); 2868 if (pkt_dev->skb) 2869 kfree_skb(pkt_dev->skb); 2870 pkt_dev->skb = NULL; 2871 goto out; 2872 } 2873 if (need_resched()) 2874 schedule(); 2875 2876 pkt_dev->idle_acc += getCurUs() - idle_start; 2877 2878 if (netif_queue_stopped(odev)) { 2879 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 2880 pkt_dev->next_tx_ns = 0; 2881 goto out; /* Try the next interface */ 2882 } 2883 } 2884 2885 if (pkt_dev->last_ok || !pkt_dev->skb) { 2886 if ((++pkt_dev->clone_count >= pkt_dev->clone_skb) 2887 || (!pkt_dev->skb)) { 2888 /* build a new pkt */ 2889 if (pkt_dev->skb) 2890 kfree_skb(pkt_dev->skb); 2891 2892 pkt_dev->skb = fill_packet(odev, pkt_dev); 2893 if (pkt_dev->skb == NULL) { 2894 printk("pktgen: ERROR: couldn't allocate skb in fill_packet.\n"); 2895 schedule(); 2896 pkt_dev->clone_count--; /* back out increment, OOM */ 2897 goto out; 2898 } 2899 pkt_dev->allocated_skbs++; 2900 pkt_dev->clone_count = 0; /* reset counter */ 2901 } 2902 } 2903 2904 netif_tx_lock_bh(odev); 2905 if (!netif_queue_stopped(odev)) { 2906 2907 atomic_inc(&(pkt_dev->skb->users)); 2908 retry_now: 2909 ret = odev->hard_start_xmit(pkt_dev->skb, odev); 2910 if (likely(ret == NETDEV_TX_OK)) { 2911 pkt_dev->last_ok = 1; 2912 pkt_dev->sofar++; 2913 pkt_dev->seq_num++; 2914 pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; 2915 2916 } else if (ret == NETDEV_TX_LOCKED 2917 && (odev->features & NETIF_F_LLTX)) { 2918 cpu_relax(); 2919 goto retry_now; 2920 } else { /* Retry it next time */ 2921 2922 atomic_dec(&(pkt_dev->skb->users)); 2923 2924 if (debug && net_ratelimit()) 2925 printk(KERN_INFO "pktgen: Hard xmit error\n"); 2926 2927 pkt_dev->errors++; 2928 pkt_dev->last_ok = 0; 2929 } 2930 2931 pkt_dev->next_tx_us = getCurUs(); 2932 pkt_dev->next_tx_ns = 0; 2933 2934 pkt_dev->next_tx_us += pkt_dev->delay_us; 2935 pkt_dev->next_tx_ns += pkt_dev->delay_ns; 2936 2937 if (pkt_dev->next_tx_ns > 1000) { 2938 pkt_dev->next_tx_us++; 2939 pkt_dev->next_tx_ns -= 1000; 2940 } 2941 } 2942 2943 else { /* Retry it next time */ 2944 pkt_dev->last_ok = 0; 2945 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 2946 pkt_dev->next_tx_ns = 0; 2947 } 2948 2949 netif_tx_unlock_bh(odev); 2950 2951 /* If pkt_dev->count is zero, then run forever */ 2952 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 2953 if (atomic_read(&(pkt_dev->skb->users)) != 1) { 2954 idle_start = getCurUs(); 2955 while (atomic_read(&(pkt_dev->skb->users)) != 1) { 2956 if (signal_pending(current)) { 2957 break; 2958 } 2959 schedule(); 2960 } 2961 pkt_dev->idle_acc += getCurUs() - idle_start; 2962 } 2963 2964 /* Done with this */ 2965 pktgen_stop_device(pkt_dev); 2966 if (pkt_dev->skb) 2967 kfree_skb(pkt_dev->skb); 2968 pkt_dev->skb = NULL; 2969 } 2970 out:; 2971 } 2972 2973 /* 2974 * Main loop of the thread goes here 2975 */ 2976 2977 static void pktgen_thread_worker(struct pktgen_thread *t) 2978 { 2979 DEFINE_WAIT(wait); 2980 struct pktgen_dev *pkt_dev = NULL; 2981 int cpu = t->cpu; 2982 sigset_t tmpsig; 2983 u32 max_before_softirq; 2984 u32 tx_since_softirq = 0; 2985 2986 daemonize("pktgen/%d", cpu); 2987 2988 /* Block all signals except SIGKILL, SIGSTOP and SIGTERM */ 2989 2990 spin_lock_irq(¤t->sighand->siglock); 2991 tmpsig = current->blocked; 2992 siginitsetinv(¤t->blocked, 2993 sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGTERM)); 2994 2995 recalc_sigpending(); 2996 spin_unlock_irq(¤t->sighand->siglock); 2997 2998 /* Migrate to the right CPU */ 2999 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 3000 if (smp_processor_id() != cpu) 3001 BUG(); 3002 3003 init_waitqueue_head(&t->queue); 3004 3005 t->control &= ~(T_TERMINATE); 3006 t->control &= ~(T_RUN); 3007 t->control &= ~(T_STOP); 3008 t->control &= ~(T_REMDEVALL); 3009 t->control &= ~(T_REMDEV); 3010 3011 t->pid = current->pid; 3012 3013 PG_DEBUG(printk("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid)); 3014 3015 max_before_softirq = t->max_before_softirq; 3016 3017 __set_current_state(TASK_INTERRUPTIBLE); 3018 mb(); 3019 3020 while (1) { 3021 3022 __set_current_state(TASK_RUNNING); 3023 3024 /* 3025 * Get next dev to xmit -- if any. 3026 */ 3027 3028 pkt_dev = next_to_run(t); 3029 3030 if (pkt_dev) { 3031 3032 pktgen_xmit(pkt_dev); 3033 3034 /* 3035 * We like to stay RUNNING but must also give 3036 * others fair share. 3037 */ 3038 3039 tx_since_softirq += pkt_dev->last_ok; 3040 3041 if (tx_since_softirq > max_before_softirq) { 3042 if (local_softirq_pending()) 3043 do_softirq(); 3044 tx_since_softirq = 0; 3045 } 3046 } else { 3047 prepare_to_wait(&(t->queue), &wait, TASK_INTERRUPTIBLE); 3048 schedule_timeout(HZ / 10); 3049 finish_wait(&(t->queue), &wait); 3050 } 3051 3052 /* 3053 * Back from sleep, either due to the timeout or signal. 3054 * We check if we have any "posted" work for us. 3055 */ 3056 3057 if (t->control & T_TERMINATE || signal_pending(current)) 3058 /* we received a request to terminate ourself */ 3059 break; 3060 3061 if (t->control & T_STOP) { 3062 pktgen_stop(t); 3063 t->control &= ~(T_STOP); 3064 } 3065 3066 if (t->control & T_RUN) { 3067 pktgen_run(t); 3068 t->control &= ~(T_RUN); 3069 } 3070 3071 if (t->control & T_REMDEVALL) { 3072 pktgen_rem_all_ifs(t); 3073 t->control &= ~(T_REMDEVALL); 3074 } 3075 3076 if (t->control & T_REMDEV) { 3077 pktgen_rem_one_if(t); 3078 t->control &= ~(T_REMDEV); 3079 } 3080 3081 if (need_resched()) 3082 schedule(); 3083 } 3084 3085 PG_DEBUG(printk("pktgen: %s stopping all device\n", t->name)); 3086 pktgen_stop(t); 3087 3088 PG_DEBUG(printk("pktgen: %s removing all device\n", t->name)); 3089 pktgen_rem_all_ifs(t); 3090 3091 PG_DEBUG(printk("pktgen: %s removing thread.\n", t->name)); 3092 pktgen_rem_thread(t); 3093 3094 t->removed = 1; 3095 } 3096 3097 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 3098 const char *ifname) 3099 { 3100 struct pktgen_dev *p, *pkt_dev = NULL; 3101 if_lock(t); 3102 3103 list_for_each_entry(p, &t->if_list, list) 3104 if (strncmp(p->ifname, ifname, IFNAMSIZ) == 0) { 3105 pkt_dev = p; 3106 break; 3107 } 3108 3109 if_unlock(t); 3110 PG_DEBUG(printk("pktgen: find_dev(%s) returning %p\n", ifname, pkt_dev)); 3111 return pkt_dev; 3112 } 3113 3114 /* 3115 * Adds a dev at front of if_list. 3116 */ 3117 3118 static int add_dev_to_thread(struct pktgen_thread *t, 3119 struct pktgen_dev *pkt_dev) 3120 { 3121 int rv = 0; 3122 3123 if_lock(t); 3124 3125 if (pkt_dev->pg_thread) { 3126 printk("pktgen: ERROR: already assigned to a thread.\n"); 3127 rv = -EBUSY; 3128 goto out; 3129 } 3130 3131 list_add(&pkt_dev->list, &t->if_list); 3132 pkt_dev->pg_thread = t; 3133 pkt_dev->running = 0; 3134 3135 out: 3136 if_unlock(t); 3137 return rv; 3138 } 3139 3140 /* Called under thread lock */ 3141 3142 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) 3143 { 3144 struct pktgen_dev *pkt_dev; 3145 struct proc_dir_entry *pe; 3146 3147 /* We don't allow a device to be on several threads */ 3148 3149 pkt_dev = __pktgen_NN_threads(ifname, FIND); 3150 if (pkt_dev) { 3151 printk("pktgen: ERROR: interface already used.\n"); 3152 return -EBUSY; 3153 } 3154 3155 pkt_dev = kzalloc(sizeof(struct pktgen_dev), GFP_KERNEL); 3156 if (!pkt_dev) 3157 return -ENOMEM; 3158 3159 pkt_dev->flows = vmalloc(MAX_CFLOWS * sizeof(struct flow_state)); 3160 if (pkt_dev->flows == NULL) { 3161 kfree(pkt_dev); 3162 return -ENOMEM; 3163 } 3164 memset(pkt_dev->flows, 0, MAX_CFLOWS * sizeof(struct flow_state)); 3165 3166 pkt_dev->removal_mark = 0; 3167 pkt_dev->min_pkt_size = ETH_ZLEN; 3168 pkt_dev->max_pkt_size = ETH_ZLEN; 3169 pkt_dev->nfrags = 0; 3170 pkt_dev->clone_skb = pg_clone_skb_d; 3171 pkt_dev->delay_us = pg_delay_d / 1000; 3172 pkt_dev->delay_ns = pg_delay_d % 1000; 3173 pkt_dev->count = pg_count_d; 3174 pkt_dev->sofar = 0; 3175 pkt_dev->udp_src_min = 9; /* sink port */ 3176 pkt_dev->udp_src_max = 9; 3177 pkt_dev->udp_dst_min = 9; 3178 pkt_dev->udp_dst_max = 9; 3179 3180 strncpy(pkt_dev->ifname, ifname, IFNAMSIZ); 3181 3182 if (!pktgen_setup_dev(pkt_dev)) { 3183 printk("pktgen: ERROR: pktgen_setup_dev failed.\n"); 3184 if (pkt_dev->flows) 3185 vfree(pkt_dev->flows); 3186 kfree(pkt_dev); 3187 return -ENODEV; 3188 } 3189 3190 pe = create_proc_entry(ifname, 0600, pg_proc_dir); 3191 if (!pe) { 3192 printk("pktgen: cannot create %s/%s procfs entry.\n", 3193 PG_PROC_DIR, ifname); 3194 if (pkt_dev->flows) 3195 vfree(pkt_dev->flows); 3196 kfree(pkt_dev); 3197 return -EINVAL; 3198 } 3199 pe->proc_fops = &pktgen_if_fops; 3200 pe->data = pkt_dev; 3201 3202 return add_dev_to_thread(t, pkt_dev); 3203 } 3204 3205 static struct pktgen_thread *__init pktgen_find_thread(const char *name) 3206 { 3207 struct pktgen_thread *t; 3208 3209 mutex_lock(&pktgen_thread_lock); 3210 3211 list_for_each_entry(t, &pktgen_threads, th_list) 3212 if (strcmp(t->name, name) == 0) { 3213 mutex_unlock(&pktgen_thread_lock); 3214 return t; 3215 } 3216 3217 mutex_unlock(&pktgen_thread_lock); 3218 return NULL; 3219 } 3220 3221 static int __init pktgen_create_thread(const char *name, int cpu) 3222 { 3223 int err; 3224 struct pktgen_thread *t = NULL; 3225 struct proc_dir_entry *pe; 3226 3227 if (strlen(name) > 31) { 3228 printk("pktgen: ERROR: Thread name cannot be more than 31 characters.\n"); 3229 return -EINVAL; 3230 } 3231 3232 if (pktgen_find_thread(name)) { 3233 printk("pktgen: ERROR: thread: %s already exists\n", name); 3234 return -EINVAL; 3235 } 3236 3237 t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL); 3238 if (!t) { 3239 printk("pktgen: ERROR: out of memory, can't create new thread.\n"); 3240 return -ENOMEM; 3241 } 3242 3243 strcpy(t->name, name); 3244 spin_lock_init(&t->if_lock); 3245 t->cpu = cpu; 3246 3247 pe = create_proc_entry(t->name, 0600, pg_proc_dir); 3248 if (!pe) { 3249 printk("pktgen: cannot create %s/%s procfs entry.\n", 3250 PG_PROC_DIR, t->name); 3251 kfree(t); 3252 return -EINVAL; 3253 } 3254 3255 pe->proc_fops = &pktgen_thread_fops; 3256 pe->data = t; 3257 3258 INIT_LIST_HEAD(&t->if_list); 3259 3260 list_add_tail(&t->th_list, &pktgen_threads); 3261 3262 t->removed = 0; 3263 3264 err = kernel_thread((void *)pktgen_thread_worker, (void *)t, 3265 CLONE_FS | CLONE_FILES | CLONE_SIGHAND); 3266 if (err < 0) { 3267 printk("pktgen: kernel_thread() failed for cpu %d\n", t->cpu); 3268 remove_proc_entry(t->name, pg_proc_dir); 3269 list_del(&t->th_list); 3270 kfree(t); 3271 return err; 3272 } 3273 3274 return 0; 3275 } 3276 3277 /* 3278 * Removes a device from the thread if_list. 3279 */ 3280 static void _rem_dev_from_if_list(struct pktgen_thread *t, 3281 struct pktgen_dev *pkt_dev) 3282 { 3283 struct list_head *q, *n; 3284 struct pktgen_dev *p; 3285 3286 list_for_each_safe(q, n, &t->if_list) { 3287 p = list_entry(q, struct pktgen_dev, list); 3288 if (p == pkt_dev) 3289 list_del(&p->list); 3290 } 3291 } 3292 3293 static int pktgen_remove_device(struct pktgen_thread *t, 3294 struct pktgen_dev *pkt_dev) 3295 { 3296 3297 PG_DEBUG(printk("pktgen: remove_device pkt_dev=%p\n", pkt_dev)); 3298 3299 if (pkt_dev->running) { 3300 printk("pktgen:WARNING: trying to remove a running interface, stopping it now.\n"); 3301 pktgen_stop_device(pkt_dev); 3302 } 3303 3304 /* Dis-associate from the interface */ 3305 3306 if (pkt_dev->odev) { 3307 dev_put(pkt_dev->odev); 3308 pkt_dev->odev = NULL; 3309 } 3310 3311 /* And update the thread if_list */ 3312 3313 _rem_dev_from_if_list(t, pkt_dev); 3314 3315 /* Clean up proc file system */ 3316 3317 remove_proc_entry(pkt_dev->ifname, pg_proc_dir); 3318 3319 if (pkt_dev->flows) 3320 vfree(pkt_dev->flows); 3321 kfree(pkt_dev); 3322 return 0; 3323 } 3324 3325 static int __init pg_init(void) 3326 { 3327 int cpu; 3328 struct proc_dir_entry *pe; 3329 3330 printk(version); 3331 3332 pg_proc_dir = proc_mkdir(PG_PROC_DIR, proc_net); 3333 if (!pg_proc_dir) 3334 return -ENODEV; 3335 pg_proc_dir->owner = THIS_MODULE; 3336 3337 pe = create_proc_entry(PGCTRL, 0600, pg_proc_dir); 3338 if (pe == NULL) { 3339 printk("pktgen: ERROR: cannot create %s procfs entry.\n", 3340 PGCTRL); 3341 proc_net_remove(PG_PROC_DIR); 3342 return -EINVAL; 3343 } 3344 3345 pe->proc_fops = &pktgen_fops; 3346 pe->data = NULL; 3347 3348 /* Register us to receive netdevice events */ 3349 register_netdevice_notifier(&pktgen_notifier_block); 3350 3351 for_each_online_cpu(cpu) { 3352 int err; 3353 char buf[30]; 3354 3355 sprintf(buf, "kpktgend_%i", cpu); 3356 err = pktgen_create_thread(buf, cpu); 3357 if (err) 3358 printk("pktgen: WARNING: Cannot create thread for cpu %d (%d)\n", 3359 cpu, err); 3360 } 3361 3362 if (list_empty(&pktgen_threads)) { 3363 printk("pktgen: ERROR: Initialization failed for all threads\n"); 3364 unregister_netdevice_notifier(&pktgen_notifier_block); 3365 remove_proc_entry(PGCTRL, pg_proc_dir); 3366 proc_net_remove(PG_PROC_DIR); 3367 return -ENODEV; 3368 } 3369 3370 return 0; 3371 } 3372 3373 static void __exit pg_cleanup(void) 3374 { 3375 struct pktgen_thread *t; 3376 struct list_head *q, *n; 3377 wait_queue_head_t queue; 3378 init_waitqueue_head(&queue); 3379 3380 /* Stop all interfaces & threads */ 3381 3382 list_for_each_safe(q, n, &pktgen_threads) { 3383 t = list_entry(q, struct pktgen_thread, th_list); 3384 t->control |= (T_TERMINATE); 3385 3386 wait_event_interruptible_timeout(queue, (t->removed == 1), HZ); 3387 } 3388 3389 /* Un-register us from receiving netdevice events */ 3390 unregister_netdevice_notifier(&pktgen_notifier_block); 3391 3392 /* Clean up proc file system */ 3393 remove_proc_entry(PGCTRL, pg_proc_dir); 3394 proc_net_remove(PG_PROC_DIR); 3395 } 3396 3397 module_init(pg_init); 3398 module_exit(pg_cleanup); 3399 3400 MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se"); 3401 MODULE_DESCRIPTION("Packet Generator tool"); 3402 MODULE_LICENSE("GPL"); 3403 module_param(pg_count_d, int, 0); 3404 module_param(pg_delay_d, int, 0); 3405 module_param(pg_clone_skb_d, int, 0); 3406 module_param(debug, int, 0); 3407