1 /* 2 * Authors: 3 * Copyright 2001, 2002 by Robert Olsson <robert.olsson@its.uu.se> 4 * Uppsala University and 5 * Swedish University of Agricultural Sciences 6 * 7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> 8 * Ben Greear <greearb@candelatech.com> 9 * Jens L��s <jens.laas@data.slu.se> 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 * 16 * 17 * A tool for loading the network with preconfigurated packets. 18 * The tool is implemented as a linux module. Parameters are output 19 * device, delay (to hard_xmit), number of packets, and whether 20 * to use multiple SKBs or just the same one. 21 * pktgen uses the installed interface's output routine. 22 * 23 * Additional hacking by: 24 * 25 * Jens.Laas@data.slu.se 26 * Improved by ANK. 010120. 27 * Improved by ANK even more. 010212. 28 * MAC address typo fixed. 010417 --ro 29 * Integrated. 020301 --DaveM 30 * Added multiskb option 020301 --DaveM 31 * Scaling of results. 020417--sigurdur@linpro.no 32 * Significant re-work of the module: 33 * * Convert to threaded model to more efficiently be able to transmit 34 * and receive on multiple interfaces at once. 35 * * Converted many counters to __u64 to allow longer runs. 36 * * Allow configuration of ranges, like min/max IP address, MACs, 37 * and UDP-ports, for both source and destination, and can 38 * set to use a random distribution or sequentially walk the range. 39 * * Can now change most values after starting. 40 * * Place 12-byte packet in UDP payload with magic number, 41 * sequence number, and timestamp. 42 * * Add receiver code that detects dropped pkts, re-ordered pkts, and 43 * latencies (with micro-second) precision. 44 * * Add IOCTL interface to easily get counters & configuration. 45 * --Ben Greear <greearb@candelatech.com> 46 * 47 * Renamed multiskb to clone_skb and cleaned up sending core for two distinct 48 * skb modes. A clone_skb=0 mode for Ben "ranges" work and a clone_skb != 0 49 * as a "fastpath" with a configurable number of clones after alloc's. 50 * clone_skb=0 means all packets are allocated this also means ranges time 51 * stamps etc can be used. clone_skb=100 means 1 malloc is followed by 100 52 * clones. 53 * 54 * Also moved to /proc/net/pktgen/ 55 * --ro 56 * 57 * Sept 10: Fixed threading/locking. Lots of bone-headed and more clever 58 * mistakes. Also merged in DaveM's patch in the -pre6 patch. 59 * --Ben Greear <greearb@candelatech.com> 60 * 61 * Integrated to 2.5.x 021029 --Lucio Maciel (luciomaciel@zipmail.com.br) 62 * 63 * 64 * 021124 Finished major redesign and rewrite for new functionality. 65 * See Documentation/networking/pktgen.txt for how to use this. 66 * 67 * The new operation: 68 * For each CPU one thread/process is created at start. This process checks 69 * for running devices in the if_list and sends packets until count is 0 it 70 * also the thread checks the thread->control which is used for inter-process 71 * communication. controlling process "posts" operations to the threads this 72 * way. The if_lock should be possible to remove when add/rem_device is merged 73 * into this too. 74 * 75 * By design there should only be *one* "controlling" process. In practice 76 * multiple write accesses gives unpredictable result. Understood by "write" 77 * to /proc gives result code thats should be read be the "writer". 78 * For practical use this should be no problem. 79 * 80 * Note when adding devices to a specific CPU there good idea to also assign 81 * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU. 82 * --ro 83 * 84 * Fix refcount off by one if first packet fails, potential null deref, 85 * memleak 030710- KJP 86 * 87 * First "ranges" functionality for ipv6 030726 --ro 88 * 89 * Included flow support. 030802 ANK. 90 * 91 * Fixed unaligned access on IA-64 Grant Grundler <grundler@parisc-linux.org> 92 * 93 * Remove if fix from added Harald Welte <laforge@netfilter.org> 040419 94 * ia64 compilation fix from Aron Griffis <aron@hp.com> 040604 95 * 96 * New xmit() return, do_div and misc clean up by Stephen Hemminger 97 * <shemminger@osdl.org> 040923 98 * 99 * Randy Dunlap fixed u64 printk compiler waring 100 * 101 * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org> 102 * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213 103 * 104 * Corrections from Nikolai Malykh (nmalykh@bilim.com) 105 * Removed unused flags F_SET_SRCMAC & F_SET_SRCIP 041230 106 * 107 * interruptible_sleep_on_timeout() replaced Nishanth Aravamudan <nacc@us.ibm.com> 108 * 050103 109 * 110 * MPLS support by Steven Whitehouse <steve@chygwyn.com> 111 * 112 */ 113 #include <linux/sys.h> 114 #include <linux/types.h> 115 #include <linux/module.h> 116 #include <linux/moduleparam.h> 117 #include <linux/kernel.h> 118 #include <linux/smp_lock.h> 119 #include <linux/mutex.h> 120 #include <linux/sched.h> 121 #include <linux/slab.h> 122 #include <linux/vmalloc.h> 123 #include <linux/unistd.h> 124 #include <linux/string.h> 125 #include <linux/ptrace.h> 126 #include <linux/errno.h> 127 #include <linux/ioport.h> 128 #include <linux/interrupt.h> 129 #include <linux/capability.h> 130 #include <linux/delay.h> 131 #include <linux/timer.h> 132 #include <linux/list.h> 133 #include <linux/init.h> 134 #include <linux/skbuff.h> 135 #include <linux/netdevice.h> 136 #include <linux/inet.h> 137 #include <linux/inetdevice.h> 138 #include <linux/rtnetlink.h> 139 #include <linux/if_arp.h> 140 #include <linux/in.h> 141 #include <linux/ip.h> 142 #include <linux/ipv6.h> 143 #include <linux/udp.h> 144 #include <linux/proc_fs.h> 145 #include <linux/seq_file.h> 146 #include <linux/wait.h> 147 #include <linux/etherdevice.h> 148 #include <net/checksum.h> 149 #include <net/ipv6.h> 150 #include <net/addrconf.h> 151 #include <asm/byteorder.h> 152 #include <linux/rcupdate.h> 153 #include <asm/bitops.h> 154 #include <asm/io.h> 155 #include <asm/dma.h> 156 #include <asm/uaccess.h> 157 #include <asm/div64.h> /* do_div */ 158 #include <asm/timex.h> 159 160 #define VERSION "pktgen v2.67: Packet Generator for packet performance testing.\n" 161 162 /* #define PG_DEBUG(a) a */ 163 #define PG_DEBUG(a) 164 165 /* The buckets are exponential in 'width' */ 166 #define LAT_BUCKETS_MAX 32 167 #define IP_NAME_SZ 32 168 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ 169 #define MPLS_STACK_BOTTOM __constant_htonl(0x00000100) 170 171 /* Device flag bits */ 172 #define F_IPSRC_RND (1<<0) /* IP-Src Random */ 173 #define F_IPDST_RND (1<<1) /* IP-Dst Random */ 174 #define F_UDPSRC_RND (1<<2) /* UDP-Src Random */ 175 #define F_UDPDST_RND (1<<3) /* UDP-Dst Random */ 176 #define F_MACSRC_RND (1<<4) /* MAC-Src Random */ 177 #define F_MACDST_RND (1<<5) /* MAC-Dst Random */ 178 #define F_TXSIZE_RND (1<<6) /* Transmit size is random */ 179 #define F_IPV6 (1<<7) /* Interface in IPV6 Mode */ 180 #define F_MPLS_RND (1<<8) /* Random MPLS labels */ 181 182 /* Thread control flag bits */ 183 #define T_TERMINATE (1<<0) 184 #define T_STOP (1<<1) /* Stop run */ 185 #define T_RUN (1<<2) /* Start run */ 186 #define T_REMDEVALL (1<<3) /* Remove all devs */ 187 #define T_REMDEV (1<<4) /* Remove one dev */ 188 189 /* If lock -- can be removed after some work */ 190 #define if_lock(t) spin_lock(&(t->if_lock)); 191 #define if_unlock(t) spin_unlock(&(t->if_lock)); 192 193 /* Used to help with determining the pkts on receive */ 194 #define PKTGEN_MAGIC 0xbe9be955 195 #define PG_PROC_DIR "pktgen" 196 #define PGCTRL "pgctrl" 197 static struct proc_dir_entry *pg_proc_dir = NULL; 198 199 #define MAX_CFLOWS 65536 200 201 struct flow_state { 202 __u32 cur_daddr; 203 int count; 204 }; 205 206 struct pktgen_dev { 207 208 /* 209 * Try to keep frequent/infrequent used vars. separated. 210 */ 211 212 char ifname[IFNAMSIZ]; 213 char result[512]; 214 215 struct pktgen_thread *pg_thread; /* the owner */ 216 struct list_head list; /* Used for chaining in the thread's run-queue */ 217 218 int running; /* if this changes to false, the test will stop */ 219 220 /* If min != max, then we will either do a linear iteration, or 221 * we will do a random selection from within the range. 222 */ 223 __u32 flags; 224 int removal_mark; /* non-zero => the device is marked for 225 * removal by worker thread */ 226 227 int min_pkt_size; /* = ETH_ZLEN; */ 228 int max_pkt_size; /* = ETH_ZLEN; */ 229 int nfrags; 230 __u32 delay_us; /* Default delay */ 231 __u32 delay_ns; 232 __u64 count; /* Default No packets to send */ 233 __u64 sofar; /* How many pkts we've sent so far */ 234 __u64 tx_bytes; /* How many bytes we've transmitted */ 235 __u64 errors; /* Errors when trying to transmit, pkts will be re-sent */ 236 237 /* runtime counters relating to clone_skb */ 238 __u64 next_tx_us; /* timestamp of when to tx next */ 239 __u32 next_tx_ns; 240 241 __u64 allocated_skbs; 242 __u32 clone_count; 243 int last_ok; /* Was last skb sent? 244 * Or a failed transmit of some sort? This will keep 245 * sequence numbers in order, for example. 246 */ 247 __u64 started_at; /* micro-seconds */ 248 __u64 stopped_at; /* micro-seconds */ 249 __u64 idle_acc; /* micro-seconds */ 250 __u32 seq_num; 251 252 int clone_skb; /* Use multiple SKBs during packet gen. If this number 253 * is greater than 1, then that many copies of the same 254 * packet will be sent before a new packet is allocated. 255 * For instance, if you want to send 1024 identical packets 256 * before creating a new packet, set clone_skb to 1024. 257 */ 258 259 char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 260 char dst_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 261 char src_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 262 char src_max[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */ 263 264 struct in6_addr in6_saddr; 265 struct in6_addr in6_daddr; 266 struct in6_addr cur_in6_daddr; 267 struct in6_addr cur_in6_saddr; 268 /* For ranges */ 269 struct in6_addr min_in6_daddr; 270 struct in6_addr max_in6_daddr; 271 struct in6_addr min_in6_saddr; 272 struct in6_addr max_in6_saddr; 273 274 /* If we're doing ranges, random or incremental, then this 275 * defines the min/max for those ranges. 276 */ 277 __u32 saddr_min; /* inclusive, source IP address */ 278 __u32 saddr_max; /* exclusive, source IP address */ 279 __u32 daddr_min; /* inclusive, dest IP address */ 280 __u32 daddr_max; /* exclusive, dest IP address */ 281 282 __u16 udp_src_min; /* inclusive, source UDP port */ 283 __u16 udp_src_max; /* exclusive, source UDP port */ 284 __u16 udp_dst_min; /* inclusive, dest UDP port */ 285 __u16 udp_dst_max; /* exclusive, dest UDP port */ 286 287 /* MPLS */ 288 unsigned nr_labels; /* Depth of stack, 0 = no MPLS */ 289 __be32 labels[MAX_MPLS_LABELS]; 290 291 __u32 src_mac_count; /* How many MACs to iterate through */ 292 __u32 dst_mac_count; /* How many MACs to iterate through */ 293 294 unsigned char dst_mac[ETH_ALEN]; 295 unsigned char src_mac[ETH_ALEN]; 296 297 __u32 cur_dst_mac_offset; 298 __u32 cur_src_mac_offset; 299 __u32 cur_saddr; 300 __u32 cur_daddr; 301 __u16 cur_udp_dst; 302 __u16 cur_udp_src; 303 __u32 cur_pkt_size; 304 305 __u8 hh[14]; 306 /* = { 307 0x00, 0x80, 0xC8, 0x79, 0xB3, 0xCB, 308 309 We fill in SRC address later 310 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 311 0x08, 0x00 312 }; 313 */ 314 __u16 pad; /* pad out the hh struct to an even 16 bytes */ 315 316 struct sk_buff *skb; /* skb we are to transmit next, mainly used for when we 317 * are transmitting the same one multiple times 318 */ 319 struct net_device *odev; /* The out-going device. Note that the device should 320 * have it's pg_info pointer pointing back to this 321 * device. This will be set when the user specifies 322 * the out-going device name (not when the inject is 323 * started as it used to do.) 324 */ 325 struct flow_state *flows; 326 unsigned cflows; /* Concurrent flows (config) */ 327 unsigned lflow; /* Flow length (config) */ 328 unsigned nflows; /* accumulated flows (stats) */ 329 }; 330 331 struct pktgen_hdr { 332 __u32 pgh_magic; 333 __u32 seq_num; 334 __u32 tv_sec; 335 __u32 tv_usec; 336 }; 337 338 struct pktgen_thread { 339 spinlock_t if_lock; 340 struct list_head if_list; /* All device here */ 341 struct list_head th_list; 342 int removed; 343 char name[32]; 344 char result[512]; 345 u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */ 346 347 /* Field for thread to receive "posted" events terminate, stop ifs etc. */ 348 349 u32 control; 350 int pid; 351 int cpu; 352 353 wait_queue_head_t queue; 354 }; 355 356 #define REMOVE 1 357 #define FIND 0 358 359 /* This code works around the fact that do_div cannot handle two 64-bit 360 numbers, and regular 64-bit division doesn't work on x86 kernels. 361 --Ben 362 */ 363 364 #define PG_DIV 0 365 366 /* This was emailed to LMKL by: Chris Caputo <ccaputo@alt.net> 367 * Function copied/adapted/optimized from: 368 * 369 * nemesis.sourceforge.net/browse/lib/static/intmath/ix86/intmath.c.html 370 * 371 * Copyright 1994, University of Cambridge Computer Laboratory 372 * All Rights Reserved. 373 * 374 */ 375 static inline s64 divremdi3(s64 x, s64 y, int type) 376 { 377 u64 a = (x < 0) ? -x : x; 378 u64 b = (y < 0) ? -y : y; 379 u64 res = 0, d = 1; 380 381 if (b > 0) { 382 while (b < a) { 383 b <<= 1; 384 d <<= 1; 385 } 386 } 387 388 do { 389 if (a >= b) { 390 a -= b; 391 res += d; 392 } 393 b >>= 1; 394 d >>= 1; 395 } 396 while (d); 397 398 if (PG_DIV == type) { 399 return (((x ^ y) & (1ll << 63)) == 0) ? res : -(s64) res; 400 } else { 401 return ((x & (1ll << 63)) == 0) ? a : -(s64) a; 402 } 403 } 404 405 /* End of hacks to deal with 64-bit math on x86 */ 406 407 /** Convert to milliseconds */ 408 static inline __u64 tv_to_ms(const struct timeval *tv) 409 { 410 __u64 ms = tv->tv_usec / 1000; 411 ms += (__u64) tv->tv_sec * (__u64) 1000; 412 return ms; 413 } 414 415 /** Convert to micro-seconds */ 416 static inline __u64 tv_to_us(const struct timeval *tv) 417 { 418 __u64 us = tv->tv_usec; 419 us += (__u64) tv->tv_sec * (__u64) 1000000; 420 return us; 421 } 422 423 static inline __u64 pg_div(__u64 n, __u32 base) 424 { 425 __u64 tmp = n; 426 do_div(tmp, base); 427 /* printk("pktgen: pg_div, n: %llu base: %d rv: %llu\n", 428 n, base, tmp); */ 429 return tmp; 430 } 431 432 static inline __u64 pg_div64(__u64 n, __u64 base) 433 { 434 __u64 tmp = n; 435 /* 436 * How do we know if the architecture we are running on 437 * supports division with 64 bit base? 438 * 439 */ 440 #if defined(__sparc_v9__) || defined(__powerpc64__) || defined(__alpha__) || defined(__x86_64__) || defined(__ia64__) 441 442 do_div(tmp, base); 443 #else 444 tmp = divremdi3(n, base, PG_DIV); 445 #endif 446 return tmp; 447 } 448 449 static inline u32 pktgen_random(void) 450 { 451 #if 0 452 __u32 n; 453 get_random_bytes(&n, 4); 454 return n; 455 #else 456 return net_random(); 457 #endif 458 } 459 460 static inline __u64 getCurMs(void) 461 { 462 struct timeval tv; 463 do_gettimeofday(&tv); 464 return tv_to_ms(&tv); 465 } 466 467 static inline __u64 getCurUs(void) 468 { 469 struct timeval tv; 470 do_gettimeofday(&tv); 471 return tv_to_us(&tv); 472 } 473 474 static inline __u64 tv_diff(const struct timeval *a, const struct timeval *b) 475 { 476 return tv_to_us(a) - tv_to_us(b); 477 } 478 479 /* old include end */ 480 481 static char version[] __initdata = VERSION; 482 483 static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); 484 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); 485 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 486 const char *ifname); 487 static int pktgen_device_event(struct notifier_block *, unsigned long, void *); 488 static void pktgen_run_all_threads(void); 489 static void pktgen_stop_all_threads_ifs(void); 490 static int pktgen_stop_device(struct pktgen_dev *pkt_dev); 491 static void pktgen_stop(struct pktgen_thread *t); 492 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); 493 static int pktgen_mark_device(const char *ifname); 494 static unsigned int scan_ip6(const char *s, char ip[16]); 495 static unsigned int fmt_ip6(char *s, const char ip[16]); 496 497 /* Module parameters, defaults. */ 498 static int pg_count_d = 1000; /* 1000 pkts by default */ 499 static int pg_delay_d; 500 static int pg_clone_skb_d; 501 static int debug; 502 503 static DEFINE_MUTEX(pktgen_thread_lock); 504 static LIST_HEAD(pktgen_threads); 505 506 static struct notifier_block pktgen_notifier_block = { 507 .notifier_call = pktgen_device_event, 508 }; 509 510 /* 511 * /proc handling functions 512 * 513 */ 514 515 static int pgctrl_show(struct seq_file *seq, void *v) 516 { 517 seq_puts(seq, VERSION); 518 return 0; 519 } 520 521 static ssize_t pgctrl_write(struct file *file, const char __user * buf, 522 size_t count, loff_t * ppos) 523 { 524 int err = 0; 525 char data[128]; 526 527 if (!capable(CAP_NET_ADMIN)) { 528 err = -EPERM; 529 goto out; 530 } 531 532 if (count > sizeof(data)) 533 count = sizeof(data); 534 535 if (copy_from_user(data, buf, count)) { 536 err = -EFAULT; 537 goto out; 538 } 539 data[count - 1] = 0; /* Make string */ 540 541 if (!strcmp(data, "stop")) 542 pktgen_stop_all_threads_ifs(); 543 544 else if (!strcmp(data, "start")) 545 pktgen_run_all_threads(); 546 547 else 548 printk("pktgen: Unknown command: %s\n", data); 549 550 err = count; 551 552 out: 553 return err; 554 } 555 556 static int pgctrl_open(struct inode *inode, struct file *file) 557 { 558 return single_open(file, pgctrl_show, PDE(inode)->data); 559 } 560 561 static struct file_operations pktgen_fops = { 562 .owner = THIS_MODULE, 563 .open = pgctrl_open, 564 .read = seq_read, 565 .llseek = seq_lseek, 566 .write = pgctrl_write, 567 .release = single_release, 568 }; 569 570 static int pktgen_if_show(struct seq_file *seq, void *v) 571 { 572 int i; 573 struct pktgen_dev *pkt_dev = seq->private; 574 __u64 sa; 575 __u64 stopped; 576 __u64 now = getCurUs(); 577 578 seq_printf(seq, 579 "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", 580 (unsigned long long)pkt_dev->count, pkt_dev->min_pkt_size, 581 pkt_dev->max_pkt_size); 582 583 seq_printf(seq, 584 " frags: %d delay: %u clone_skb: %d ifname: %s\n", 585 pkt_dev->nfrags, 586 1000 * pkt_dev->delay_us + pkt_dev->delay_ns, 587 pkt_dev->clone_skb, pkt_dev->ifname); 588 589 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, 590 pkt_dev->lflow); 591 592 if (pkt_dev->flags & F_IPV6) { 593 char b1[128], b2[128], b3[128]; 594 fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr); 595 fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr); 596 fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr); 597 seq_printf(seq, 598 " saddr: %s min_saddr: %s max_saddr: %s\n", b1, 599 b2, b3); 600 601 fmt_ip6(b1, pkt_dev->in6_daddr.s6_addr); 602 fmt_ip6(b2, pkt_dev->min_in6_daddr.s6_addr); 603 fmt_ip6(b3, pkt_dev->max_in6_daddr.s6_addr); 604 seq_printf(seq, 605 " daddr: %s min_daddr: %s max_daddr: %s\n", b1, 606 b2, b3); 607 608 } else 609 seq_printf(seq, 610 " dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n", 611 pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, 612 pkt_dev->src_max); 613 614 seq_puts(seq, " src_mac: "); 615 616 if (is_zero_ether_addr(pkt_dev->src_mac)) 617 for (i = 0; i < 6; i++) 618 seq_printf(seq, "%02X%s", pkt_dev->odev->dev_addr[i], 619 i == 5 ? " " : ":"); 620 else 621 for (i = 0; i < 6; i++) 622 seq_printf(seq, "%02X%s", pkt_dev->src_mac[i], 623 i == 5 ? " " : ":"); 624 625 seq_printf(seq, "dst_mac: "); 626 for (i = 0; i < 6; i++) 627 seq_printf(seq, "%02X%s", pkt_dev->dst_mac[i], 628 i == 5 ? "\n" : ":"); 629 630 seq_printf(seq, 631 " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", 632 pkt_dev->udp_src_min, pkt_dev->udp_src_max, 633 pkt_dev->udp_dst_min, pkt_dev->udp_dst_max); 634 635 seq_printf(seq, 636 " src_mac_count: %d dst_mac_count: %d\n", 637 pkt_dev->src_mac_count, pkt_dev->dst_mac_count); 638 639 if (pkt_dev->nr_labels) { 640 unsigned i; 641 seq_printf(seq, " mpls: "); 642 for(i = 0; i < pkt_dev->nr_labels; i++) 643 seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), 644 i == pkt_dev->nr_labels-1 ? "\n" : ", "); 645 } 646 647 seq_printf(seq, " Flags: "); 648 649 if (pkt_dev->flags & F_IPV6) 650 seq_printf(seq, "IPV6 "); 651 652 if (pkt_dev->flags & F_IPSRC_RND) 653 seq_printf(seq, "IPSRC_RND "); 654 655 if (pkt_dev->flags & F_IPDST_RND) 656 seq_printf(seq, "IPDST_RND "); 657 658 if (pkt_dev->flags & F_TXSIZE_RND) 659 seq_printf(seq, "TXSIZE_RND "); 660 661 if (pkt_dev->flags & F_UDPSRC_RND) 662 seq_printf(seq, "UDPSRC_RND "); 663 664 if (pkt_dev->flags & F_UDPDST_RND) 665 seq_printf(seq, "UDPDST_RND "); 666 667 if (pkt_dev->flags & F_MPLS_RND) 668 seq_printf(seq, "MPLS_RND "); 669 670 if (pkt_dev->flags & F_MACSRC_RND) 671 seq_printf(seq, "MACSRC_RND "); 672 673 if (pkt_dev->flags & F_MACDST_RND) 674 seq_printf(seq, "MACDST_RND "); 675 676 seq_puts(seq, "\n"); 677 678 sa = pkt_dev->started_at; 679 stopped = pkt_dev->stopped_at; 680 if (pkt_dev->running) 681 stopped = now; /* not really stopped, more like last-running-at */ 682 683 seq_printf(seq, 684 "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n", 685 (unsigned long long)pkt_dev->sofar, 686 (unsigned long long)pkt_dev->errors, (unsigned long long)sa, 687 (unsigned long long)stopped, 688 (unsigned long long)pkt_dev->idle_acc); 689 690 seq_printf(seq, 691 " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", 692 pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, 693 pkt_dev->cur_src_mac_offset); 694 695 if (pkt_dev->flags & F_IPV6) { 696 char b1[128], b2[128]; 697 fmt_ip6(b1, pkt_dev->cur_in6_daddr.s6_addr); 698 fmt_ip6(b2, pkt_dev->cur_in6_saddr.s6_addr); 699 seq_printf(seq, " cur_saddr: %s cur_daddr: %s\n", b2, b1); 700 } else 701 seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n", 702 pkt_dev->cur_saddr, pkt_dev->cur_daddr); 703 704 seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n", 705 pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); 706 707 seq_printf(seq, " flows: %u\n", pkt_dev->nflows); 708 709 if (pkt_dev->result[0]) 710 seq_printf(seq, "Result: %s\n", pkt_dev->result); 711 else 712 seq_printf(seq, "Result: Idle\n"); 713 714 return 0; 715 } 716 717 718 static int hex32_arg(const char __user *user_buffer, __u32 *num) 719 { 720 int i = 0; 721 *num = 0; 722 723 for(; i < 8; i++) { 724 char c; 725 *num <<= 4; 726 if (get_user(c, &user_buffer[i])) 727 return -EFAULT; 728 if ((c >= '0') && (c <= '9')) 729 *num |= c - '0'; 730 else if ((c >= 'a') && (c <= 'f')) 731 *num |= c - 'a' + 10; 732 else if ((c >= 'A') && (c <= 'F')) 733 *num |= c - 'A' + 10; 734 else 735 break; 736 } 737 return i; 738 } 739 740 static int count_trail_chars(const char __user * user_buffer, 741 unsigned int maxlen) 742 { 743 int i; 744 745 for (i = 0; i < maxlen; i++) { 746 char c; 747 if (get_user(c, &user_buffer[i])) 748 return -EFAULT; 749 switch (c) { 750 case '\"': 751 case '\n': 752 case '\r': 753 case '\t': 754 case ' ': 755 case '=': 756 break; 757 default: 758 goto done; 759 }; 760 } 761 done: 762 return i; 763 } 764 765 static unsigned long num_arg(const char __user * user_buffer, 766 unsigned long maxlen, unsigned long *num) 767 { 768 int i = 0; 769 *num = 0; 770 771 for (; i < maxlen; i++) { 772 char c; 773 if (get_user(c, &user_buffer[i])) 774 return -EFAULT; 775 if ((c >= '0') && (c <= '9')) { 776 *num *= 10; 777 *num += c - '0'; 778 } else 779 break; 780 } 781 return i; 782 } 783 784 static int strn_len(const char __user * user_buffer, unsigned int maxlen) 785 { 786 int i = 0; 787 788 for (; i < maxlen; i++) { 789 char c; 790 if (get_user(c, &user_buffer[i])) 791 return -EFAULT; 792 switch (c) { 793 case '\"': 794 case '\n': 795 case '\r': 796 case '\t': 797 case ' ': 798 goto done_str; 799 break; 800 default: 801 break; 802 }; 803 } 804 done_str: 805 return i; 806 } 807 808 static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) 809 { 810 unsigned n = 0; 811 char c; 812 ssize_t i = 0; 813 int len; 814 815 pkt_dev->nr_labels = 0; 816 do { 817 __u32 tmp; 818 len = hex32_arg(&buffer[i], &tmp); 819 if (len <= 0) 820 return len; 821 pkt_dev->labels[n] = htonl(tmp); 822 if (pkt_dev->labels[n] & MPLS_STACK_BOTTOM) 823 pkt_dev->flags |= F_MPLS_RND; 824 i += len; 825 if (get_user(c, &buffer[i])) 826 return -EFAULT; 827 i++; 828 n++; 829 if (n >= MAX_MPLS_LABELS) 830 return -E2BIG; 831 } while(c == ','); 832 833 pkt_dev->nr_labels = n; 834 return i; 835 } 836 837 static ssize_t pktgen_if_write(struct file *file, 838 const char __user * user_buffer, size_t count, 839 loff_t * offset) 840 { 841 struct seq_file *seq = (struct seq_file *)file->private_data; 842 struct pktgen_dev *pkt_dev = seq->private; 843 int i = 0, max, len; 844 char name[16], valstr[32]; 845 unsigned long value = 0; 846 char *pg_result = NULL; 847 int tmp = 0; 848 char buf[128]; 849 850 pg_result = &(pkt_dev->result[0]); 851 852 if (count < 1) { 853 printk("pktgen: wrong command format\n"); 854 return -EINVAL; 855 } 856 857 max = count - i; 858 tmp = count_trail_chars(&user_buffer[i], max); 859 if (tmp < 0) { 860 printk("pktgen: illegal format\n"); 861 return tmp; 862 } 863 i += tmp; 864 865 /* Read variable name */ 866 867 len = strn_len(&user_buffer[i], sizeof(name) - 1); 868 if (len < 0) { 869 return len; 870 } 871 memset(name, 0, sizeof(name)); 872 if (copy_from_user(name, &user_buffer[i], len)) 873 return -EFAULT; 874 i += len; 875 876 max = count - i; 877 len = count_trail_chars(&user_buffer[i], max); 878 if (len < 0) 879 return len; 880 881 i += len; 882 883 if (debug) { 884 char tb[count + 1]; 885 if (copy_from_user(tb, user_buffer, count)) 886 return -EFAULT; 887 tb[count] = 0; 888 printk("pktgen: %s,%lu buffer -:%s:-\n", name, 889 (unsigned long)count, tb); 890 } 891 892 if (!strcmp(name, "min_pkt_size")) { 893 len = num_arg(&user_buffer[i], 10, &value); 894 if (len < 0) { 895 return len; 896 } 897 i += len; 898 if (value < 14 + 20 + 8) 899 value = 14 + 20 + 8; 900 if (value != pkt_dev->min_pkt_size) { 901 pkt_dev->min_pkt_size = value; 902 pkt_dev->cur_pkt_size = value; 903 } 904 sprintf(pg_result, "OK: min_pkt_size=%u", 905 pkt_dev->min_pkt_size); 906 return count; 907 } 908 909 if (!strcmp(name, "max_pkt_size")) { 910 len = num_arg(&user_buffer[i], 10, &value); 911 if (len < 0) { 912 return len; 913 } 914 i += len; 915 if (value < 14 + 20 + 8) 916 value = 14 + 20 + 8; 917 if (value != pkt_dev->max_pkt_size) { 918 pkt_dev->max_pkt_size = value; 919 pkt_dev->cur_pkt_size = value; 920 } 921 sprintf(pg_result, "OK: max_pkt_size=%u", 922 pkt_dev->max_pkt_size); 923 return count; 924 } 925 926 /* Shortcut for min = max */ 927 928 if (!strcmp(name, "pkt_size")) { 929 len = num_arg(&user_buffer[i], 10, &value); 930 if (len < 0) { 931 return len; 932 } 933 i += len; 934 if (value < 14 + 20 + 8) 935 value = 14 + 20 + 8; 936 if (value != pkt_dev->min_pkt_size) { 937 pkt_dev->min_pkt_size = value; 938 pkt_dev->max_pkt_size = value; 939 pkt_dev->cur_pkt_size = value; 940 } 941 sprintf(pg_result, "OK: pkt_size=%u", pkt_dev->min_pkt_size); 942 return count; 943 } 944 945 if (!strcmp(name, "debug")) { 946 len = num_arg(&user_buffer[i], 10, &value); 947 if (len < 0) { 948 return len; 949 } 950 i += len; 951 debug = value; 952 sprintf(pg_result, "OK: debug=%u", debug); 953 return count; 954 } 955 956 if (!strcmp(name, "frags")) { 957 len = num_arg(&user_buffer[i], 10, &value); 958 if (len < 0) { 959 return len; 960 } 961 i += len; 962 pkt_dev->nfrags = value; 963 sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags); 964 return count; 965 } 966 if (!strcmp(name, "delay")) { 967 len = num_arg(&user_buffer[i], 10, &value); 968 if (len < 0) { 969 return len; 970 } 971 i += len; 972 if (value == 0x7FFFFFFF) { 973 pkt_dev->delay_us = 0x7FFFFFFF; 974 pkt_dev->delay_ns = 0; 975 } else { 976 pkt_dev->delay_us = value / 1000; 977 pkt_dev->delay_ns = value % 1000; 978 } 979 sprintf(pg_result, "OK: delay=%u", 980 1000 * pkt_dev->delay_us + pkt_dev->delay_ns); 981 return count; 982 } 983 if (!strcmp(name, "udp_src_min")) { 984 len = num_arg(&user_buffer[i], 10, &value); 985 if (len < 0) { 986 return len; 987 } 988 i += len; 989 if (value != pkt_dev->udp_src_min) { 990 pkt_dev->udp_src_min = value; 991 pkt_dev->cur_udp_src = value; 992 } 993 sprintf(pg_result, "OK: udp_src_min=%u", pkt_dev->udp_src_min); 994 return count; 995 } 996 if (!strcmp(name, "udp_dst_min")) { 997 len = num_arg(&user_buffer[i], 10, &value); 998 if (len < 0) { 999 return len; 1000 } 1001 i += len; 1002 if (value != pkt_dev->udp_dst_min) { 1003 pkt_dev->udp_dst_min = value; 1004 pkt_dev->cur_udp_dst = value; 1005 } 1006 sprintf(pg_result, "OK: udp_dst_min=%u", pkt_dev->udp_dst_min); 1007 return count; 1008 } 1009 if (!strcmp(name, "udp_src_max")) { 1010 len = num_arg(&user_buffer[i], 10, &value); 1011 if (len < 0) { 1012 return len; 1013 } 1014 i += len; 1015 if (value != pkt_dev->udp_src_max) { 1016 pkt_dev->udp_src_max = value; 1017 pkt_dev->cur_udp_src = value; 1018 } 1019 sprintf(pg_result, "OK: udp_src_max=%u", pkt_dev->udp_src_max); 1020 return count; 1021 } 1022 if (!strcmp(name, "udp_dst_max")) { 1023 len = num_arg(&user_buffer[i], 10, &value); 1024 if (len < 0) { 1025 return len; 1026 } 1027 i += len; 1028 if (value != pkt_dev->udp_dst_max) { 1029 pkt_dev->udp_dst_max = value; 1030 pkt_dev->cur_udp_dst = value; 1031 } 1032 sprintf(pg_result, "OK: udp_dst_max=%u", pkt_dev->udp_dst_max); 1033 return count; 1034 } 1035 if (!strcmp(name, "clone_skb")) { 1036 len = num_arg(&user_buffer[i], 10, &value); 1037 if (len < 0) { 1038 return len; 1039 } 1040 i += len; 1041 pkt_dev->clone_skb = value; 1042 1043 sprintf(pg_result, "OK: clone_skb=%d", pkt_dev->clone_skb); 1044 return count; 1045 } 1046 if (!strcmp(name, "count")) { 1047 len = num_arg(&user_buffer[i], 10, &value); 1048 if (len < 0) { 1049 return len; 1050 } 1051 i += len; 1052 pkt_dev->count = value; 1053 sprintf(pg_result, "OK: count=%llu", 1054 (unsigned long long)pkt_dev->count); 1055 return count; 1056 } 1057 if (!strcmp(name, "src_mac_count")) { 1058 len = num_arg(&user_buffer[i], 10, &value); 1059 if (len < 0) { 1060 return len; 1061 } 1062 i += len; 1063 if (pkt_dev->src_mac_count != value) { 1064 pkt_dev->src_mac_count = value; 1065 pkt_dev->cur_src_mac_offset = 0; 1066 } 1067 sprintf(pg_result, "OK: src_mac_count=%d", 1068 pkt_dev->src_mac_count); 1069 return count; 1070 } 1071 if (!strcmp(name, "dst_mac_count")) { 1072 len = num_arg(&user_buffer[i], 10, &value); 1073 if (len < 0) { 1074 return len; 1075 } 1076 i += len; 1077 if (pkt_dev->dst_mac_count != value) { 1078 pkt_dev->dst_mac_count = value; 1079 pkt_dev->cur_dst_mac_offset = 0; 1080 } 1081 sprintf(pg_result, "OK: dst_mac_count=%d", 1082 pkt_dev->dst_mac_count); 1083 return count; 1084 } 1085 if (!strcmp(name, "flag")) { 1086 char f[32]; 1087 memset(f, 0, 32); 1088 len = strn_len(&user_buffer[i], sizeof(f) - 1); 1089 if (len < 0) { 1090 return len; 1091 } 1092 if (copy_from_user(f, &user_buffer[i], len)) 1093 return -EFAULT; 1094 i += len; 1095 if (strcmp(f, "IPSRC_RND") == 0) 1096 pkt_dev->flags |= F_IPSRC_RND; 1097 1098 else if (strcmp(f, "!IPSRC_RND") == 0) 1099 pkt_dev->flags &= ~F_IPSRC_RND; 1100 1101 else if (strcmp(f, "TXSIZE_RND") == 0) 1102 pkt_dev->flags |= F_TXSIZE_RND; 1103 1104 else if (strcmp(f, "!TXSIZE_RND") == 0) 1105 pkt_dev->flags &= ~F_TXSIZE_RND; 1106 1107 else if (strcmp(f, "IPDST_RND") == 0) 1108 pkt_dev->flags |= F_IPDST_RND; 1109 1110 else if (strcmp(f, "!IPDST_RND") == 0) 1111 pkt_dev->flags &= ~F_IPDST_RND; 1112 1113 else if (strcmp(f, "UDPSRC_RND") == 0) 1114 pkt_dev->flags |= F_UDPSRC_RND; 1115 1116 else if (strcmp(f, "!UDPSRC_RND") == 0) 1117 pkt_dev->flags &= ~F_UDPSRC_RND; 1118 1119 else if (strcmp(f, "UDPDST_RND") == 0) 1120 pkt_dev->flags |= F_UDPDST_RND; 1121 1122 else if (strcmp(f, "!UDPDST_RND") == 0) 1123 pkt_dev->flags &= ~F_UDPDST_RND; 1124 1125 else if (strcmp(f, "MACSRC_RND") == 0) 1126 pkt_dev->flags |= F_MACSRC_RND; 1127 1128 else if (strcmp(f, "!MACSRC_RND") == 0) 1129 pkt_dev->flags &= ~F_MACSRC_RND; 1130 1131 else if (strcmp(f, "MACDST_RND") == 0) 1132 pkt_dev->flags |= F_MACDST_RND; 1133 1134 else if (strcmp(f, "!MACDST_RND") == 0) 1135 pkt_dev->flags &= ~F_MACDST_RND; 1136 1137 else if (strcmp(f, "MPLS_RND") == 0) 1138 pkt_dev->flags |= F_MPLS_RND; 1139 1140 else if (strcmp(f, "!MPLS_RND") == 0) 1141 pkt_dev->flags &= ~F_MPLS_RND; 1142 1143 else { 1144 sprintf(pg_result, 1145 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", 1146 f, 1147 "IPSRC_RND, IPDST_RND, TXSIZE_RND, UDPSRC_RND, UDPDST_RND, MACSRC_RND, MACDST_RND\n"); 1148 return count; 1149 } 1150 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); 1151 return count; 1152 } 1153 if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) { 1154 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1); 1155 if (len < 0) { 1156 return len; 1157 } 1158 1159 if (copy_from_user(buf, &user_buffer[i], len)) 1160 return -EFAULT; 1161 buf[len] = 0; 1162 if (strcmp(buf, pkt_dev->dst_min) != 0) { 1163 memset(pkt_dev->dst_min, 0, sizeof(pkt_dev->dst_min)); 1164 strncpy(pkt_dev->dst_min, buf, len); 1165 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); 1166 pkt_dev->cur_daddr = pkt_dev->daddr_min; 1167 } 1168 if (debug) 1169 printk("pktgen: dst_min set to: %s\n", 1170 pkt_dev->dst_min); 1171 i += len; 1172 sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min); 1173 return count; 1174 } 1175 if (!strcmp(name, "dst_max")) { 1176 len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1); 1177 if (len < 0) { 1178 return len; 1179 } 1180 1181 if (copy_from_user(buf, &user_buffer[i], len)) 1182 return -EFAULT; 1183 1184 buf[len] = 0; 1185 if (strcmp(buf, pkt_dev->dst_max) != 0) { 1186 memset(pkt_dev->dst_max, 0, sizeof(pkt_dev->dst_max)); 1187 strncpy(pkt_dev->dst_max, buf, len); 1188 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); 1189 pkt_dev->cur_daddr = pkt_dev->daddr_max; 1190 } 1191 if (debug) 1192 printk("pktgen: dst_max set to: %s\n", 1193 pkt_dev->dst_max); 1194 i += len; 1195 sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max); 1196 return count; 1197 } 1198 if (!strcmp(name, "dst6")) { 1199 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1200 if (len < 0) 1201 return len; 1202 1203 pkt_dev->flags |= F_IPV6; 1204 1205 if (copy_from_user(buf, &user_buffer[i], len)) 1206 return -EFAULT; 1207 buf[len] = 0; 1208 1209 scan_ip6(buf, pkt_dev->in6_daddr.s6_addr); 1210 fmt_ip6(buf, pkt_dev->in6_daddr.s6_addr); 1211 1212 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr); 1213 1214 if (debug) 1215 printk("pktgen: dst6 set to: %s\n", buf); 1216 1217 i += len; 1218 sprintf(pg_result, "OK: dst6=%s", buf); 1219 return count; 1220 } 1221 if (!strcmp(name, "dst6_min")) { 1222 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1223 if (len < 0) 1224 return len; 1225 1226 pkt_dev->flags |= F_IPV6; 1227 1228 if (copy_from_user(buf, &user_buffer[i], len)) 1229 return -EFAULT; 1230 buf[len] = 0; 1231 1232 scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); 1233 fmt_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); 1234 1235 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, 1236 &pkt_dev->min_in6_daddr); 1237 if (debug) 1238 printk("pktgen: dst6_min set to: %s\n", buf); 1239 1240 i += len; 1241 sprintf(pg_result, "OK: dst6_min=%s", buf); 1242 return count; 1243 } 1244 if (!strcmp(name, "dst6_max")) { 1245 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1246 if (len < 0) 1247 return len; 1248 1249 pkt_dev->flags |= F_IPV6; 1250 1251 if (copy_from_user(buf, &user_buffer[i], len)) 1252 return -EFAULT; 1253 buf[len] = 0; 1254 1255 scan_ip6(buf, pkt_dev->max_in6_daddr.s6_addr); 1256 fmt_ip6(buf, pkt_dev->max_in6_daddr.s6_addr); 1257 1258 if (debug) 1259 printk("pktgen: dst6_max set to: %s\n", buf); 1260 1261 i += len; 1262 sprintf(pg_result, "OK: dst6_max=%s", buf); 1263 return count; 1264 } 1265 if (!strcmp(name, "src6")) { 1266 len = strn_len(&user_buffer[i], sizeof(buf) - 1); 1267 if (len < 0) 1268 return len; 1269 1270 pkt_dev->flags |= F_IPV6; 1271 1272 if (copy_from_user(buf, &user_buffer[i], len)) 1273 return -EFAULT; 1274 buf[len] = 0; 1275 1276 scan_ip6(buf, pkt_dev->in6_saddr.s6_addr); 1277 fmt_ip6(buf, pkt_dev->in6_saddr.s6_addr); 1278 1279 ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr); 1280 1281 if (debug) 1282 printk("pktgen: src6 set to: %s\n", buf); 1283 1284 i += len; 1285 sprintf(pg_result, "OK: src6=%s", buf); 1286 return count; 1287 } 1288 if (!strcmp(name, "src_min")) { 1289 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1); 1290 if (len < 0) { 1291 return len; 1292 } 1293 if (copy_from_user(buf, &user_buffer[i], len)) 1294 return -EFAULT; 1295 buf[len] = 0; 1296 if (strcmp(buf, pkt_dev->src_min) != 0) { 1297 memset(pkt_dev->src_min, 0, sizeof(pkt_dev->src_min)); 1298 strncpy(pkt_dev->src_min, buf, len); 1299 pkt_dev->saddr_min = in_aton(pkt_dev->src_min); 1300 pkt_dev->cur_saddr = pkt_dev->saddr_min; 1301 } 1302 if (debug) 1303 printk("pktgen: src_min set to: %s\n", 1304 pkt_dev->src_min); 1305 i += len; 1306 sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min); 1307 return count; 1308 } 1309 if (!strcmp(name, "src_max")) { 1310 len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1); 1311 if (len < 0) { 1312 return len; 1313 } 1314 if (copy_from_user(buf, &user_buffer[i], len)) 1315 return -EFAULT; 1316 buf[len] = 0; 1317 if (strcmp(buf, pkt_dev->src_max) != 0) { 1318 memset(pkt_dev->src_max, 0, sizeof(pkt_dev->src_max)); 1319 strncpy(pkt_dev->src_max, buf, len); 1320 pkt_dev->saddr_max = in_aton(pkt_dev->src_max); 1321 pkt_dev->cur_saddr = pkt_dev->saddr_max; 1322 } 1323 if (debug) 1324 printk("pktgen: src_max set to: %s\n", 1325 pkt_dev->src_max); 1326 i += len; 1327 sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max); 1328 return count; 1329 } 1330 if (!strcmp(name, "dst_mac")) { 1331 char *v = valstr; 1332 unsigned char old_dmac[ETH_ALEN]; 1333 unsigned char *m = pkt_dev->dst_mac; 1334 memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN); 1335 1336 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1337 if (len < 0) { 1338 return len; 1339 } 1340 memset(valstr, 0, sizeof(valstr)); 1341 if (copy_from_user(valstr, &user_buffer[i], len)) 1342 return -EFAULT; 1343 i += len; 1344 1345 for (*m = 0; *v && m < pkt_dev->dst_mac + 6; v++) { 1346 if (*v >= '0' && *v <= '9') { 1347 *m *= 16; 1348 *m += *v - '0'; 1349 } 1350 if (*v >= 'A' && *v <= 'F') { 1351 *m *= 16; 1352 *m += *v - 'A' + 10; 1353 } 1354 if (*v >= 'a' && *v <= 'f') { 1355 *m *= 16; 1356 *m += *v - 'a' + 10; 1357 } 1358 if (*v == ':') { 1359 m++; 1360 *m = 0; 1361 } 1362 } 1363 1364 /* Set up Dest MAC */ 1365 if (compare_ether_addr(old_dmac, pkt_dev->dst_mac)) 1366 memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN); 1367 1368 sprintf(pg_result, "OK: dstmac"); 1369 return count; 1370 } 1371 if (!strcmp(name, "src_mac")) { 1372 char *v = valstr; 1373 unsigned char *m = pkt_dev->src_mac; 1374 1375 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1376 if (len < 0) { 1377 return len; 1378 } 1379 memset(valstr, 0, sizeof(valstr)); 1380 if (copy_from_user(valstr, &user_buffer[i], len)) 1381 return -EFAULT; 1382 i += len; 1383 1384 for (*m = 0; *v && m < pkt_dev->src_mac + 6; v++) { 1385 if (*v >= '0' && *v <= '9') { 1386 *m *= 16; 1387 *m += *v - '0'; 1388 } 1389 if (*v >= 'A' && *v <= 'F') { 1390 *m *= 16; 1391 *m += *v - 'A' + 10; 1392 } 1393 if (*v >= 'a' && *v <= 'f') { 1394 *m *= 16; 1395 *m += *v - 'a' + 10; 1396 } 1397 if (*v == ':') { 1398 m++; 1399 *m = 0; 1400 } 1401 } 1402 1403 sprintf(pg_result, "OK: srcmac"); 1404 return count; 1405 } 1406 1407 if (!strcmp(name, "clear_counters")) { 1408 pktgen_clear_counters(pkt_dev); 1409 sprintf(pg_result, "OK: Clearing counters.\n"); 1410 return count; 1411 } 1412 1413 if (!strcmp(name, "flows")) { 1414 len = num_arg(&user_buffer[i], 10, &value); 1415 if (len < 0) { 1416 return len; 1417 } 1418 i += len; 1419 if (value > MAX_CFLOWS) 1420 value = MAX_CFLOWS; 1421 1422 pkt_dev->cflows = value; 1423 sprintf(pg_result, "OK: flows=%u", pkt_dev->cflows); 1424 return count; 1425 } 1426 1427 if (!strcmp(name, "flowlen")) { 1428 len = num_arg(&user_buffer[i], 10, &value); 1429 if (len < 0) { 1430 return len; 1431 } 1432 i += len; 1433 pkt_dev->lflow = value; 1434 sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow); 1435 return count; 1436 } 1437 1438 if (!strcmp(name, "mpls")) { 1439 unsigned n, offset; 1440 len = get_labels(&user_buffer[i], pkt_dev); 1441 if (len < 0) { return len; } 1442 i += len; 1443 offset = sprintf(pg_result, "OK: mpls="); 1444 for(n = 0; n < pkt_dev->nr_labels; n++) 1445 offset += sprintf(pg_result + offset, 1446 "%08x%s", ntohl(pkt_dev->labels[n]), 1447 n == pkt_dev->nr_labels-1 ? "" : ","); 1448 return count; 1449 } 1450 1451 sprintf(pkt_dev->result, "No such parameter \"%s\"", name); 1452 return -EINVAL; 1453 } 1454 1455 static int pktgen_if_open(struct inode *inode, struct file *file) 1456 { 1457 return single_open(file, pktgen_if_show, PDE(inode)->data); 1458 } 1459 1460 static struct file_operations pktgen_if_fops = { 1461 .owner = THIS_MODULE, 1462 .open = pktgen_if_open, 1463 .read = seq_read, 1464 .llseek = seq_lseek, 1465 .write = pktgen_if_write, 1466 .release = single_release, 1467 }; 1468 1469 static int pktgen_thread_show(struct seq_file *seq, void *v) 1470 { 1471 struct pktgen_thread *t = seq->private; 1472 struct pktgen_dev *pkt_dev; 1473 1474 BUG_ON(!t); 1475 1476 seq_printf(seq, "Name: %s max_before_softirq: %d\n", 1477 t->name, t->max_before_softirq); 1478 1479 seq_printf(seq, "Running: "); 1480 1481 if_lock(t); 1482 list_for_each_entry(pkt_dev, &t->if_list, list) 1483 if (pkt_dev->running) 1484 seq_printf(seq, "%s ", pkt_dev->ifname); 1485 1486 seq_printf(seq, "\nStopped: "); 1487 1488 list_for_each_entry(pkt_dev, &t->if_list, list) 1489 if (!pkt_dev->running) 1490 seq_printf(seq, "%s ", pkt_dev->ifname); 1491 1492 if (t->result[0]) 1493 seq_printf(seq, "\nResult: %s\n", t->result); 1494 else 1495 seq_printf(seq, "\nResult: NA\n"); 1496 1497 if_unlock(t); 1498 1499 return 0; 1500 } 1501 1502 static ssize_t pktgen_thread_write(struct file *file, 1503 const char __user * user_buffer, 1504 size_t count, loff_t * offset) 1505 { 1506 struct seq_file *seq = (struct seq_file *)file->private_data; 1507 struct pktgen_thread *t = seq->private; 1508 int i = 0, max, len, ret; 1509 char name[40]; 1510 char *pg_result; 1511 unsigned long value = 0; 1512 1513 if (count < 1) { 1514 // sprintf(pg_result, "Wrong command format"); 1515 return -EINVAL; 1516 } 1517 1518 max = count - i; 1519 len = count_trail_chars(&user_buffer[i], max); 1520 if (len < 0) 1521 return len; 1522 1523 i += len; 1524 1525 /* Read variable name */ 1526 1527 len = strn_len(&user_buffer[i], sizeof(name) - 1); 1528 if (len < 0) 1529 return len; 1530 1531 memset(name, 0, sizeof(name)); 1532 if (copy_from_user(name, &user_buffer[i], len)) 1533 return -EFAULT; 1534 i += len; 1535 1536 max = count - i; 1537 len = count_trail_chars(&user_buffer[i], max); 1538 if (len < 0) 1539 return len; 1540 1541 i += len; 1542 1543 if (debug) 1544 printk("pktgen: t=%s, count=%lu\n", name, (unsigned long)count); 1545 1546 if (!t) { 1547 printk("pktgen: ERROR: No thread\n"); 1548 ret = -EINVAL; 1549 goto out; 1550 } 1551 1552 pg_result = &(t->result[0]); 1553 1554 if (!strcmp(name, "add_device")) { 1555 char f[32]; 1556 memset(f, 0, 32); 1557 len = strn_len(&user_buffer[i], sizeof(f) - 1); 1558 if (len < 0) { 1559 ret = len; 1560 goto out; 1561 } 1562 if (copy_from_user(f, &user_buffer[i], len)) 1563 return -EFAULT; 1564 i += len; 1565 mutex_lock(&pktgen_thread_lock); 1566 pktgen_add_device(t, f); 1567 mutex_unlock(&pktgen_thread_lock); 1568 ret = count; 1569 sprintf(pg_result, "OK: add_device=%s", f); 1570 goto out; 1571 } 1572 1573 if (!strcmp(name, "rem_device_all")) { 1574 mutex_lock(&pktgen_thread_lock); 1575 t->control |= T_REMDEVALL; 1576 mutex_unlock(&pktgen_thread_lock); 1577 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ 1578 ret = count; 1579 sprintf(pg_result, "OK: rem_device_all"); 1580 goto out; 1581 } 1582 1583 if (!strcmp(name, "max_before_softirq")) { 1584 len = num_arg(&user_buffer[i], 10, &value); 1585 mutex_lock(&pktgen_thread_lock); 1586 t->max_before_softirq = value; 1587 mutex_unlock(&pktgen_thread_lock); 1588 ret = count; 1589 sprintf(pg_result, "OK: max_before_softirq=%lu", value); 1590 goto out; 1591 } 1592 1593 ret = -EINVAL; 1594 out: 1595 return ret; 1596 } 1597 1598 static int pktgen_thread_open(struct inode *inode, struct file *file) 1599 { 1600 return single_open(file, pktgen_thread_show, PDE(inode)->data); 1601 } 1602 1603 static struct file_operations pktgen_thread_fops = { 1604 .owner = THIS_MODULE, 1605 .open = pktgen_thread_open, 1606 .read = seq_read, 1607 .llseek = seq_lseek, 1608 .write = pktgen_thread_write, 1609 .release = single_release, 1610 }; 1611 1612 /* Think find or remove for NN */ 1613 static struct pktgen_dev *__pktgen_NN_threads(const char *ifname, int remove) 1614 { 1615 struct pktgen_thread *t; 1616 struct pktgen_dev *pkt_dev = NULL; 1617 1618 list_for_each_entry(t, &pktgen_threads, th_list) { 1619 pkt_dev = pktgen_find_dev(t, ifname); 1620 if (pkt_dev) { 1621 if (remove) { 1622 if_lock(t); 1623 pkt_dev->removal_mark = 1; 1624 t->control |= T_REMDEV; 1625 if_unlock(t); 1626 } 1627 break; 1628 } 1629 } 1630 return pkt_dev; 1631 } 1632 1633 /* 1634 * mark a device for removal 1635 */ 1636 static int pktgen_mark_device(const char *ifname) 1637 { 1638 struct pktgen_dev *pkt_dev = NULL; 1639 const int max_tries = 10, msec_per_try = 125; 1640 int i = 0; 1641 int ret = 0; 1642 1643 mutex_lock(&pktgen_thread_lock); 1644 PG_DEBUG(printk("pktgen: pktgen_mark_device marking %s for removal\n", 1645 ifname)); 1646 1647 while (1) { 1648 1649 pkt_dev = __pktgen_NN_threads(ifname, REMOVE); 1650 if (pkt_dev == NULL) 1651 break; /* success */ 1652 1653 mutex_unlock(&pktgen_thread_lock); 1654 PG_DEBUG(printk("pktgen: pktgen_mark_device waiting for %s " 1655 "to disappear....\n", ifname)); 1656 schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try)); 1657 mutex_lock(&pktgen_thread_lock); 1658 1659 if (++i >= max_tries) { 1660 printk("pktgen_mark_device: timed out after waiting " 1661 "%d msec for device %s to be removed\n", 1662 msec_per_try * i, ifname); 1663 ret = 1; 1664 break; 1665 } 1666 1667 } 1668 1669 mutex_unlock(&pktgen_thread_lock); 1670 1671 return ret; 1672 } 1673 1674 static int pktgen_device_event(struct notifier_block *unused, 1675 unsigned long event, void *ptr) 1676 { 1677 struct net_device *dev = (struct net_device *)(ptr); 1678 1679 /* It is OK that we do not hold the group lock right now, 1680 * as we run under the RTNL lock. 1681 */ 1682 1683 switch (event) { 1684 case NETDEV_CHANGEADDR: 1685 case NETDEV_GOING_DOWN: 1686 case NETDEV_DOWN: 1687 case NETDEV_UP: 1688 /* Ignore for now */ 1689 break; 1690 1691 case NETDEV_UNREGISTER: 1692 pktgen_mark_device(dev->name); 1693 break; 1694 }; 1695 1696 return NOTIFY_DONE; 1697 } 1698 1699 /* Associate pktgen_dev with a device. */ 1700 1701 static struct net_device *pktgen_setup_dev(struct pktgen_dev *pkt_dev) 1702 { 1703 struct net_device *odev; 1704 1705 /* Clean old setups */ 1706 1707 if (pkt_dev->odev) { 1708 dev_put(pkt_dev->odev); 1709 pkt_dev->odev = NULL; 1710 } 1711 1712 odev = dev_get_by_name(pkt_dev->ifname); 1713 1714 if (!odev) { 1715 printk("pktgen: no such netdevice: \"%s\"\n", pkt_dev->ifname); 1716 goto out; 1717 } 1718 if (odev->type != ARPHRD_ETHER) { 1719 printk("pktgen: not an ethernet device: \"%s\"\n", 1720 pkt_dev->ifname); 1721 goto out_put; 1722 } 1723 if (!netif_running(odev)) { 1724 printk("pktgen: device is down: \"%s\"\n", pkt_dev->ifname); 1725 goto out_put; 1726 } 1727 pkt_dev->odev = odev; 1728 1729 return pkt_dev->odev; 1730 1731 out_put: 1732 dev_put(odev); 1733 out: 1734 return NULL; 1735 1736 } 1737 1738 /* Read pkt_dev from the interface and set up internal pktgen_dev 1739 * structure to have the right information to create/send packets 1740 */ 1741 static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) 1742 { 1743 /* Try once more, just in case it works now. */ 1744 if (!pkt_dev->odev) 1745 pktgen_setup_dev(pkt_dev); 1746 1747 if (!pkt_dev->odev) { 1748 printk("pktgen: ERROR: pkt_dev->odev == NULL in setup_inject.\n"); 1749 sprintf(pkt_dev->result, 1750 "ERROR: pkt_dev->odev == NULL in setup_inject.\n"); 1751 return; 1752 } 1753 1754 /* Default to the interface's mac if not explicitly set. */ 1755 1756 if (is_zero_ether_addr(pkt_dev->src_mac)) 1757 memcpy(&(pkt_dev->hh[6]), pkt_dev->odev->dev_addr, ETH_ALEN); 1758 1759 /* Set up Dest MAC */ 1760 memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN); 1761 1762 /* Set up pkt size */ 1763 pkt_dev->cur_pkt_size = pkt_dev->min_pkt_size; 1764 1765 if (pkt_dev->flags & F_IPV6) { 1766 /* 1767 * Skip this automatic address setting until locks or functions 1768 * gets exported 1769 */ 1770 1771 #ifdef NOTNOW 1772 int i, set = 0, err = 1; 1773 struct inet6_dev *idev; 1774 1775 for (i = 0; i < IN6_ADDR_HSIZE; i++) 1776 if (pkt_dev->cur_in6_saddr.s6_addr[i]) { 1777 set = 1; 1778 break; 1779 } 1780 1781 if (!set) { 1782 1783 /* 1784 * Use linklevel address if unconfigured. 1785 * 1786 * use ipv6_get_lladdr if/when it's get exported 1787 */ 1788 1789 read_lock(&addrconf_lock); 1790 if ((idev = __in6_dev_get(pkt_dev->odev)) != NULL) { 1791 struct inet6_ifaddr *ifp; 1792 1793 read_lock_bh(&idev->lock); 1794 for (ifp = idev->addr_list; ifp; 1795 ifp = ifp->if_next) { 1796 if (ifp->scope == IFA_LINK 1797 && !(ifp-> 1798 flags & IFA_F_TENTATIVE)) { 1799 ipv6_addr_copy(&pkt_dev-> 1800 cur_in6_saddr, 1801 &ifp->addr); 1802 err = 0; 1803 break; 1804 } 1805 } 1806 read_unlock_bh(&idev->lock); 1807 } 1808 read_unlock(&addrconf_lock); 1809 if (err) 1810 printk("pktgen: ERROR: IPv6 link address not availble.\n"); 1811 } 1812 #endif 1813 } else { 1814 pkt_dev->saddr_min = 0; 1815 pkt_dev->saddr_max = 0; 1816 if (strlen(pkt_dev->src_min) == 0) { 1817 1818 struct in_device *in_dev; 1819 1820 rcu_read_lock(); 1821 in_dev = __in_dev_get_rcu(pkt_dev->odev); 1822 if (in_dev) { 1823 if (in_dev->ifa_list) { 1824 pkt_dev->saddr_min = 1825 in_dev->ifa_list->ifa_address; 1826 pkt_dev->saddr_max = pkt_dev->saddr_min; 1827 } 1828 } 1829 rcu_read_unlock(); 1830 } else { 1831 pkt_dev->saddr_min = in_aton(pkt_dev->src_min); 1832 pkt_dev->saddr_max = in_aton(pkt_dev->src_max); 1833 } 1834 1835 pkt_dev->daddr_min = in_aton(pkt_dev->dst_min); 1836 pkt_dev->daddr_max = in_aton(pkt_dev->dst_max); 1837 } 1838 /* Initialize current values. */ 1839 pkt_dev->cur_dst_mac_offset = 0; 1840 pkt_dev->cur_src_mac_offset = 0; 1841 pkt_dev->cur_saddr = pkt_dev->saddr_min; 1842 pkt_dev->cur_daddr = pkt_dev->daddr_min; 1843 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min; 1844 pkt_dev->cur_udp_src = pkt_dev->udp_src_min; 1845 pkt_dev->nflows = 0; 1846 } 1847 1848 static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us) 1849 { 1850 __u64 start; 1851 __u64 now; 1852 1853 start = now = getCurUs(); 1854 printk(KERN_INFO "sleeping for %d\n", (int)(spin_until_us - now)); 1855 while (now < spin_until_us) { 1856 /* TODO: optimize sleeping behavior */ 1857 if (spin_until_us - now > jiffies_to_usecs(1) + 1) 1858 schedule_timeout_interruptible(1); 1859 else if (spin_until_us - now > 100) { 1860 do_softirq(); 1861 if (!pkt_dev->running) 1862 return; 1863 if (need_resched()) 1864 schedule(); 1865 } 1866 1867 now = getCurUs(); 1868 } 1869 1870 pkt_dev->idle_acc += now - start; 1871 } 1872 1873 /* Increment/randomize headers according to flags and current values 1874 * for IP src/dest, UDP src/dst port, MAC-Addr src/dst 1875 */ 1876 static void mod_cur_headers(struct pktgen_dev *pkt_dev) 1877 { 1878 __u32 imn; 1879 __u32 imx; 1880 int flow = 0; 1881 1882 if (pkt_dev->cflows) { 1883 flow = pktgen_random() % pkt_dev->cflows; 1884 1885 if (pkt_dev->flows[flow].count > pkt_dev->lflow) 1886 pkt_dev->flows[flow].count = 0; 1887 } 1888 1889 /* Deal with source MAC */ 1890 if (pkt_dev->src_mac_count > 1) { 1891 __u32 mc; 1892 __u32 tmp; 1893 1894 if (pkt_dev->flags & F_MACSRC_RND) 1895 mc = pktgen_random() % (pkt_dev->src_mac_count); 1896 else { 1897 mc = pkt_dev->cur_src_mac_offset++; 1898 if (pkt_dev->cur_src_mac_offset > 1899 pkt_dev->src_mac_count) 1900 pkt_dev->cur_src_mac_offset = 0; 1901 } 1902 1903 tmp = pkt_dev->src_mac[5] + (mc & 0xFF); 1904 pkt_dev->hh[11] = tmp; 1905 tmp = (pkt_dev->src_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8)); 1906 pkt_dev->hh[10] = tmp; 1907 tmp = (pkt_dev->src_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8)); 1908 pkt_dev->hh[9] = tmp; 1909 tmp = (pkt_dev->src_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8)); 1910 pkt_dev->hh[8] = tmp; 1911 tmp = (pkt_dev->src_mac[1] + (tmp >> 8)); 1912 pkt_dev->hh[7] = tmp; 1913 } 1914 1915 /* Deal with Destination MAC */ 1916 if (pkt_dev->dst_mac_count > 1) { 1917 __u32 mc; 1918 __u32 tmp; 1919 1920 if (pkt_dev->flags & F_MACDST_RND) 1921 mc = pktgen_random() % (pkt_dev->dst_mac_count); 1922 1923 else { 1924 mc = pkt_dev->cur_dst_mac_offset++; 1925 if (pkt_dev->cur_dst_mac_offset > 1926 pkt_dev->dst_mac_count) { 1927 pkt_dev->cur_dst_mac_offset = 0; 1928 } 1929 } 1930 1931 tmp = pkt_dev->dst_mac[5] + (mc & 0xFF); 1932 pkt_dev->hh[5] = tmp; 1933 tmp = (pkt_dev->dst_mac[4] + ((mc >> 8) & 0xFF) + (tmp >> 8)); 1934 pkt_dev->hh[4] = tmp; 1935 tmp = (pkt_dev->dst_mac[3] + ((mc >> 16) & 0xFF) + (tmp >> 8)); 1936 pkt_dev->hh[3] = tmp; 1937 tmp = (pkt_dev->dst_mac[2] + ((mc >> 24) & 0xFF) + (tmp >> 8)); 1938 pkt_dev->hh[2] = tmp; 1939 tmp = (pkt_dev->dst_mac[1] + (tmp >> 8)); 1940 pkt_dev->hh[1] = tmp; 1941 } 1942 1943 if (pkt_dev->flags & F_MPLS_RND) { 1944 unsigned i; 1945 for(i = 0; i < pkt_dev->nr_labels; i++) 1946 if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) 1947 pkt_dev->labels[i] = MPLS_STACK_BOTTOM | 1948 (pktgen_random() & 1949 htonl(0x000fffff)); 1950 } 1951 1952 if (pkt_dev->udp_src_min < pkt_dev->udp_src_max) { 1953 if (pkt_dev->flags & F_UDPSRC_RND) 1954 pkt_dev->cur_udp_src = 1955 ((pktgen_random() % 1956 (pkt_dev->udp_src_max - pkt_dev->udp_src_min)) + 1957 pkt_dev->udp_src_min); 1958 1959 else { 1960 pkt_dev->cur_udp_src++; 1961 if (pkt_dev->cur_udp_src >= pkt_dev->udp_src_max) 1962 pkt_dev->cur_udp_src = pkt_dev->udp_src_min; 1963 } 1964 } 1965 1966 if (pkt_dev->udp_dst_min < pkt_dev->udp_dst_max) { 1967 if (pkt_dev->flags & F_UDPDST_RND) { 1968 pkt_dev->cur_udp_dst = 1969 ((pktgen_random() % 1970 (pkt_dev->udp_dst_max - pkt_dev->udp_dst_min)) + 1971 pkt_dev->udp_dst_min); 1972 } else { 1973 pkt_dev->cur_udp_dst++; 1974 if (pkt_dev->cur_udp_dst >= pkt_dev->udp_dst_max) 1975 pkt_dev->cur_udp_dst = pkt_dev->udp_dst_min; 1976 } 1977 } 1978 1979 if (!(pkt_dev->flags & F_IPV6)) { 1980 1981 if ((imn = ntohl(pkt_dev->saddr_min)) < (imx = 1982 ntohl(pkt_dev-> 1983 saddr_max))) { 1984 __u32 t; 1985 if (pkt_dev->flags & F_IPSRC_RND) 1986 t = ((pktgen_random() % (imx - imn)) + imn); 1987 else { 1988 t = ntohl(pkt_dev->cur_saddr); 1989 t++; 1990 if (t > imx) { 1991 t = imn; 1992 } 1993 } 1994 pkt_dev->cur_saddr = htonl(t); 1995 } 1996 1997 if (pkt_dev->cflows && pkt_dev->flows[flow].count != 0) { 1998 pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr; 1999 } else { 2000 2001 if ((imn = ntohl(pkt_dev->daddr_min)) < (imx = 2002 ntohl(pkt_dev-> 2003 daddr_max))) 2004 { 2005 __u32 t; 2006 if (pkt_dev->flags & F_IPDST_RND) { 2007 2008 t = ((pktgen_random() % (imx - imn)) + 2009 imn); 2010 t = htonl(t); 2011 2012 while (LOOPBACK(t) || MULTICAST(t) 2013 || BADCLASS(t) || ZERONET(t) 2014 || LOCAL_MCAST(t)) { 2015 t = ((pktgen_random() % 2016 (imx - imn)) + imn); 2017 t = htonl(t); 2018 } 2019 pkt_dev->cur_daddr = t; 2020 } 2021 2022 else { 2023 t = ntohl(pkt_dev->cur_daddr); 2024 t++; 2025 if (t > imx) { 2026 t = imn; 2027 } 2028 pkt_dev->cur_daddr = htonl(t); 2029 } 2030 } 2031 if (pkt_dev->cflows) { 2032 pkt_dev->flows[flow].cur_daddr = 2033 pkt_dev->cur_daddr; 2034 pkt_dev->nflows++; 2035 } 2036 } 2037 } else { /* IPV6 * */ 2038 2039 if (pkt_dev->min_in6_daddr.s6_addr32[0] == 0 && 2040 pkt_dev->min_in6_daddr.s6_addr32[1] == 0 && 2041 pkt_dev->min_in6_daddr.s6_addr32[2] == 0 && 2042 pkt_dev->min_in6_daddr.s6_addr32[3] == 0) ; 2043 else { 2044 int i; 2045 2046 /* Only random destinations yet */ 2047 2048 for (i = 0; i < 4; i++) { 2049 pkt_dev->cur_in6_daddr.s6_addr32[i] = 2050 ((pktgen_random() | 2051 pkt_dev->min_in6_daddr.s6_addr32[i]) & 2052 pkt_dev->max_in6_daddr.s6_addr32[i]); 2053 } 2054 } 2055 } 2056 2057 if (pkt_dev->min_pkt_size < pkt_dev->max_pkt_size) { 2058 __u32 t; 2059 if (pkt_dev->flags & F_TXSIZE_RND) { 2060 t = ((pktgen_random() % 2061 (pkt_dev->max_pkt_size - pkt_dev->min_pkt_size)) 2062 + pkt_dev->min_pkt_size); 2063 } else { 2064 t = pkt_dev->cur_pkt_size + 1; 2065 if (t > pkt_dev->max_pkt_size) 2066 t = pkt_dev->min_pkt_size; 2067 } 2068 pkt_dev->cur_pkt_size = t; 2069 } 2070 2071 pkt_dev->flows[flow].count++; 2072 } 2073 2074 static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) 2075 { 2076 unsigned i; 2077 for(i = 0; i < pkt_dev->nr_labels; i++) { 2078 *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; 2079 } 2080 mpls--; 2081 *mpls |= MPLS_STACK_BOTTOM; 2082 } 2083 2084 static struct sk_buff *fill_packet_ipv4(struct net_device *odev, 2085 struct pktgen_dev *pkt_dev) 2086 { 2087 struct sk_buff *skb = NULL; 2088 __u8 *eth; 2089 struct udphdr *udph; 2090 int datalen, iplen; 2091 struct iphdr *iph; 2092 struct pktgen_hdr *pgh = NULL; 2093 __be16 protocol = __constant_htons(ETH_P_IP); 2094 __be32 *mpls; 2095 2096 if (pkt_dev->nr_labels) 2097 protocol = __constant_htons(ETH_P_MPLS_UC); 2098 2099 /* Update any of the values, used when we're incrementing various 2100 * fields. 2101 */ 2102 mod_cur_headers(pkt_dev); 2103 2104 datalen = (odev->hard_header_len + 16) & ~0xf; 2105 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen + 2106 pkt_dev->nr_labels*sizeof(u32), GFP_ATOMIC); 2107 if (!skb) { 2108 sprintf(pkt_dev->result, "No memory"); 2109 return NULL; 2110 } 2111 2112 skb_reserve(skb, datalen); 2113 2114 /* Reserve for ethernet and IP header */ 2115 eth = (__u8 *) skb_push(skb, 14); 2116 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); 2117 if (pkt_dev->nr_labels) 2118 mpls_push(mpls, pkt_dev); 2119 iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr)); 2120 udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); 2121 2122 memcpy(eth, pkt_dev->hh, 12); 2123 *(u16 *) & eth[12] = protocol; 2124 2125 /* Eth + IPh + UDPh + mpls */ 2126 datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - 2127 pkt_dev->nr_labels*sizeof(u32); 2128 if (datalen < sizeof(struct pktgen_hdr)) 2129 datalen = sizeof(struct pktgen_hdr); 2130 2131 udph->source = htons(pkt_dev->cur_udp_src); 2132 udph->dest = htons(pkt_dev->cur_udp_dst); 2133 udph->len = htons(datalen + 8); /* DATA + udphdr */ 2134 udph->check = 0; /* No checksum */ 2135 2136 iph->ihl = 5; 2137 iph->version = 4; 2138 iph->ttl = 32; 2139 iph->tos = 0; 2140 iph->protocol = IPPROTO_UDP; /* UDP */ 2141 iph->saddr = pkt_dev->cur_saddr; 2142 iph->daddr = pkt_dev->cur_daddr; 2143 iph->frag_off = 0; 2144 iplen = 20 + 8 + datalen; 2145 iph->tot_len = htons(iplen); 2146 iph->check = 0; 2147 iph->check = ip_fast_csum((void *)iph, iph->ihl); 2148 skb->protocol = protocol; 2149 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); 2150 skb->dev = odev; 2151 skb->pkt_type = PACKET_HOST; 2152 2153 if (pkt_dev->nfrags <= 0) 2154 pgh = (struct pktgen_hdr *)skb_put(skb, datalen); 2155 else { 2156 int frags = pkt_dev->nfrags; 2157 int i; 2158 2159 pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8); 2160 2161 if (frags > MAX_SKB_FRAGS) 2162 frags = MAX_SKB_FRAGS; 2163 if (datalen > frags * PAGE_SIZE) { 2164 skb_put(skb, datalen - frags * PAGE_SIZE); 2165 datalen = frags * PAGE_SIZE; 2166 } 2167 2168 i = 0; 2169 while (datalen > 0) { 2170 struct page *page = alloc_pages(GFP_KERNEL, 0); 2171 skb_shinfo(skb)->frags[i].page = page; 2172 skb_shinfo(skb)->frags[i].page_offset = 0; 2173 skb_shinfo(skb)->frags[i].size = 2174 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); 2175 datalen -= skb_shinfo(skb)->frags[i].size; 2176 skb->len += skb_shinfo(skb)->frags[i].size; 2177 skb->data_len += skb_shinfo(skb)->frags[i].size; 2178 i++; 2179 skb_shinfo(skb)->nr_frags = i; 2180 } 2181 2182 while (i < frags) { 2183 int rem; 2184 2185 if (i == 0) 2186 break; 2187 2188 rem = skb_shinfo(skb)->frags[i - 1].size / 2; 2189 if (rem == 0) 2190 break; 2191 2192 skb_shinfo(skb)->frags[i - 1].size -= rem; 2193 2194 skb_shinfo(skb)->frags[i] = 2195 skb_shinfo(skb)->frags[i - 1]; 2196 get_page(skb_shinfo(skb)->frags[i].page); 2197 skb_shinfo(skb)->frags[i].page = 2198 skb_shinfo(skb)->frags[i - 1].page; 2199 skb_shinfo(skb)->frags[i].page_offset += 2200 skb_shinfo(skb)->frags[i - 1].size; 2201 skb_shinfo(skb)->frags[i].size = rem; 2202 i++; 2203 skb_shinfo(skb)->nr_frags = i; 2204 } 2205 } 2206 2207 /* Stamp the time, and sequence number, convert them to network byte order */ 2208 2209 if (pgh) { 2210 struct timeval timestamp; 2211 2212 pgh->pgh_magic = htonl(PKTGEN_MAGIC); 2213 pgh->seq_num = htonl(pkt_dev->seq_num); 2214 2215 do_gettimeofday(×tamp); 2216 pgh->tv_sec = htonl(timestamp.tv_sec); 2217 pgh->tv_usec = htonl(timestamp.tv_usec); 2218 } 2219 pkt_dev->seq_num++; 2220 2221 return skb; 2222 } 2223 2224 /* 2225 * scan_ip6, fmt_ip taken from dietlibc-0.21 2226 * Author Felix von Leitner <felix-dietlibc@fefe.de> 2227 * 2228 * Slightly modified for kernel. 2229 * Should be candidate for net/ipv4/utils.c 2230 * --ro 2231 */ 2232 2233 static unsigned int scan_ip6(const char *s, char ip[16]) 2234 { 2235 unsigned int i; 2236 unsigned int len = 0; 2237 unsigned long u; 2238 char suffix[16]; 2239 unsigned int prefixlen = 0; 2240 unsigned int suffixlen = 0; 2241 __u32 tmp; 2242 2243 for (i = 0; i < 16; i++) 2244 ip[i] = 0; 2245 2246 for (;;) { 2247 if (*s == ':') { 2248 len++; 2249 if (s[1] == ':') { /* Found "::", skip to part 2 */ 2250 s += 2; 2251 len++; 2252 break; 2253 } 2254 s++; 2255 } 2256 { 2257 char *tmp; 2258 u = simple_strtoul(s, &tmp, 16); 2259 i = tmp - s; 2260 } 2261 2262 if (!i) 2263 return 0; 2264 if (prefixlen == 12 && s[i] == '.') { 2265 2266 /* the last 4 bytes may be written as IPv4 address */ 2267 2268 tmp = in_aton(s); 2269 memcpy((struct in_addr *)(ip + 12), &tmp, sizeof(tmp)); 2270 return i + len; 2271 } 2272 ip[prefixlen++] = (u >> 8); 2273 ip[prefixlen++] = (u & 255); 2274 s += i; 2275 len += i; 2276 if (prefixlen == 16) 2277 return len; 2278 } 2279 2280 /* part 2, after "::" */ 2281 for (;;) { 2282 if (*s == ':') { 2283 if (suffixlen == 0) 2284 break; 2285 s++; 2286 len++; 2287 } else if (suffixlen != 0) 2288 break; 2289 { 2290 char *tmp; 2291 u = simple_strtol(s, &tmp, 16); 2292 i = tmp - s; 2293 } 2294 if (!i) { 2295 if (*s) 2296 len--; 2297 break; 2298 } 2299 if (suffixlen + prefixlen <= 12 && s[i] == '.') { 2300 tmp = in_aton(s); 2301 memcpy((struct in_addr *)(suffix + suffixlen), &tmp, 2302 sizeof(tmp)); 2303 suffixlen += 4; 2304 len += strlen(s); 2305 break; 2306 } 2307 suffix[suffixlen++] = (u >> 8); 2308 suffix[suffixlen++] = (u & 255); 2309 s += i; 2310 len += i; 2311 if (prefixlen + suffixlen == 16) 2312 break; 2313 } 2314 for (i = 0; i < suffixlen; i++) 2315 ip[16 - suffixlen + i] = suffix[i]; 2316 return len; 2317 } 2318 2319 static char tohex(char hexdigit) 2320 { 2321 return hexdigit > 9 ? hexdigit + 'a' - 10 : hexdigit + '0'; 2322 } 2323 2324 static int fmt_xlong(char *s, unsigned int i) 2325 { 2326 char *bak = s; 2327 *s = tohex((i >> 12) & 0xf); 2328 if (s != bak || *s != '0') 2329 ++s; 2330 *s = tohex((i >> 8) & 0xf); 2331 if (s != bak || *s != '0') 2332 ++s; 2333 *s = tohex((i >> 4) & 0xf); 2334 if (s != bak || *s != '0') 2335 ++s; 2336 *s = tohex(i & 0xf); 2337 return s - bak + 1; 2338 } 2339 2340 static unsigned int fmt_ip6(char *s, const char ip[16]) 2341 { 2342 unsigned int len; 2343 unsigned int i; 2344 unsigned int temp; 2345 unsigned int compressing; 2346 int j; 2347 2348 len = 0; 2349 compressing = 0; 2350 for (j = 0; j < 16; j += 2) { 2351 2352 #ifdef V4MAPPEDPREFIX 2353 if (j == 12 && !memcmp(ip, V4mappedprefix, 12)) { 2354 inet_ntoa_r(*(struct in_addr *)(ip + 12), s); 2355 temp = strlen(s); 2356 return len + temp; 2357 } 2358 #endif 2359 temp = ((unsigned long)(unsigned char)ip[j] << 8) + 2360 (unsigned long)(unsigned char)ip[j + 1]; 2361 if (temp == 0) { 2362 if (!compressing) { 2363 compressing = 1; 2364 if (j == 0) { 2365 *s++ = ':'; 2366 ++len; 2367 } 2368 } 2369 } else { 2370 if (compressing) { 2371 compressing = 0; 2372 *s++ = ':'; 2373 ++len; 2374 } 2375 i = fmt_xlong(s, temp); 2376 len += i; 2377 s += i; 2378 if (j < 14) { 2379 *s++ = ':'; 2380 ++len; 2381 } 2382 } 2383 } 2384 if (compressing) { 2385 *s++ = ':'; 2386 ++len; 2387 } 2388 *s = 0; 2389 return len; 2390 } 2391 2392 static struct sk_buff *fill_packet_ipv6(struct net_device *odev, 2393 struct pktgen_dev *pkt_dev) 2394 { 2395 struct sk_buff *skb = NULL; 2396 __u8 *eth; 2397 struct udphdr *udph; 2398 int datalen; 2399 struct ipv6hdr *iph; 2400 struct pktgen_hdr *pgh = NULL; 2401 __be16 protocol = __constant_htons(ETH_P_IPV6); 2402 __be32 *mpls; 2403 2404 if (pkt_dev->nr_labels) 2405 protocol = __constant_htons(ETH_P_MPLS_UC); 2406 2407 /* Update any of the values, used when we're incrementing various 2408 * fields. 2409 */ 2410 mod_cur_headers(pkt_dev); 2411 2412 skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 + 2413 pkt_dev->nr_labels*sizeof(u32), GFP_ATOMIC); 2414 if (!skb) { 2415 sprintf(pkt_dev->result, "No memory"); 2416 return NULL; 2417 } 2418 2419 skb_reserve(skb, 16); 2420 2421 /* Reserve for ethernet and IP header */ 2422 eth = (__u8 *) skb_push(skb, 14); 2423 mpls = (__be32 *)skb_put(skb, pkt_dev->nr_labels*sizeof(__u32)); 2424 if (pkt_dev->nr_labels) 2425 mpls_push(mpls, pkt_dev); 2426 iph = (struct ipv6hdr *)skb_put(skb, sizeof(struct ipv6hdr)); 2427 udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); 2428 2429 memcpy(eth, pkt_dev->hh, 12); 2430 *(u16 *) & eth[12] = __constant_htons(ETH_P_IPV6); 2431 2432 /* Eth + IPh + UDPh + mpls */ 2433 datalen = pkt_dev->cur_pkt_size - 14 - 2434 sizeof(struct ipv6hdr) - sizeof(struct udphdr) - 2435 pkt_dev->nr_labels*sizeof(u32); 2436 2437 if (datalen < sizeof(struct pktgen_hdr)) { 2438 datalen = sizeof(struct pktgen_hdr); 2439 if (net_ratelimit()) 2440 printk(KERN_INFO "pktgen: increased datalen to %d\n", 2441 datalen); 2442 } 2443 2444 udph->source = htons(pkt_dev->cur_udp_src); 2445 udph->dest = htons(pkt_dev->cur_udp_dst); 2446 udph->len = htons(datalen + sizeof(struct udphdr)); 2447 udph->check = 0; /* No checksum */ 2448 2449 *(u32 *) iph = __constant_htonl(0x60000000); /* Version + flow */ 2450 2451 iph->hop_limit = 32; 2452 2453 iph->payload_len = htons(sizeof(struct udphdr) + datalen); 2454 iph->nexthdr = IPPROTO_UDP; 2455 2456 ipv6_addr_copy(&iph->daddr, &pkt_dev->cur_in6_daddr); 2457 ipv6_addr_copy(&iph->saddr, &pkt_dev->cur_in6_saddr); 2458 2459 skb->mac.raw = ((u8 *) iph) - 14 - pkt_dev->nr_labels*sizeof(u32); 2460 skb->protocol = protocol; 2461 skb->dev = odev; 2462 skb->pkt_type = PACKET_HOST; 2463 2464 if (pkt_dev->nfrags <= 0) 2465 pgh = (struct pktgen_hdr *)skb_put(skb, datalen); 2466 else { 2467 int frags = pkt_dev->nfrags; 2468 int i; 2469 2470 pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8); 2471 2472 if (frags > MAX_SKB_FRAGS) 2473 frags = MAX_SKB_FRAGS; 2474 if (datalen > frags * PAGE_SIZE) { 2475 skb_put(skb, datalen - frags * PAGE_SIZE); 2476 datalen = frags * PAGE_SIZE; 2477 } 2478 2479 i = 0; 2480 while (datalen > 0) { 2481 struct page *page = alloc_pages(GFP_KERNEL, 0); 2482 skb_shinfo(skb)->frags[i].page = page; 2483 skb_shinfo(skb)->frags[i].page_offset = 0; 2484 skb_shinfo(skb)->frags[i].size = 2485 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); 2486 datalen -= skb_shinfo(skb)->frags[i].size; 2487 skb->len += skb_shinfo(skb)->frags[i].size; 2488 skb->data_len += skb_shinfo(skb)->frags[i].size; 2489 i++; 2490 skb_shinfo(skb)->nr_frags = i; 2491 } 2492 2493 while (i < frags) { 2494 int rem; 2495 2496 if (i == 0) 2497 break; 2498 2499 rem = skb_shinfo(skb)->frags[i - 1].size / 2; 2500 if (rem == 0) 2501 break; 2502 2503 skb_shinfo(skb)->frags[i - 1].size -= rem; 2504 2505 skb_shinfo(skb)->frags[i] = 2506 skb_shinfo(skb)->frags[i - 1]; 2507 get_page(skb_shinfo(skb)->frags[i].page); 2508 skb_shinfo(skb)->frags[i].page = 2509 skb_shinfo(skb)->frags[i - 1].page; 2510 skb_shinfo(skb)->frags[i].page_offset += 2511 skb_shinfo(skb)->frags[i - 1].size; 2512 skb_shinfo(skb)->frags[i].size = rem; 2513 i++; 2514 skb_shinfo(skb)->nr_frags = i; 2515 } 2516 } 2517 2518 /* Stamp the time, and sequence number, convert them to network byte order */ 2519 /* should we update cloned packets too ? */ 2520 if (pgh) { 2521 struct timeval timestamp; 2522 2523 pgh->pgh_magic = htonl(PKTGEN_MAGIC); 2524 pgh->seq_num = htonl(pkt_dev->seq_num); 2525 2526 do_gettimeofday(×tamp); 2527 pgh->tv_sec = htonl(timestamp.tv_sec); 2528 pgh->tv_usec = htonl(timestamp.tv_usec); 2529 } 2530 pkt_dev->seq_num++; 2531 2532 return skb; 2533 } 2534 2535 static inline struct sk_buff *fill_packet(struct net_device *odev, 2536 struct pktgen_dev *pkt_dev) 2537 { 2538 if (pkt_dev->flags & F_IPV6) 2539 return fill_packet_ipv6(odev, pkt_dev); 2540 else 2541 return fill_packet_ipv4(odev, pkt_dev); 2542 } 2543 2544 static void pktgen_clear_counters(struct pktgen_dev *pkt_dev) 2545 { 2546 pkt_dev->seq_num = 1; 2547 pkt_dev->idle_acc = 0; 2548 pkt_dev->sofar = 0; 2549 pkt_dev->tx_bytes = 0; 2550 pkt_dev->errors = 0; 2551 } 2552 2553 /* Set up structure for sending pkts, clear counters */ 2554 2555 static void pktgen_run(struct pktgen_thread *t) 2556 { 2557 struct pktgen_dev *pkt_dev; 2558 int started = 0; 2559 2560 PG_DEBUG(printk("pktgen: entering pktgen_run. %p\n", t)); 2561 2562 if_lock(t); 2563 list_for_each_entry(pkt_dev, &t->if_list, list) { 2564 2565 /* 2566 * setup odev and create initial packet. 2567 */ 2568 pktgen_setup_inject(pkt_dev); 2569 2570 if (pkt_dev->odev) { 2571 pktgen_clear_counters(pkt_dev); 2572 pkt_dev->running = 1; /* Cranke yeself! */ 2573 pkt_dev->skb = NULL; 2574 pkt_dev->started_at = getCurUs(); 2575 pkt_dev->next_tx_us = getCurUs(); /* Transmit immediately */ 2576 pkt_dev->next_tx_ns = 0; 2577 2578 strcpy(pkt_dev->result, "Starting"); 2579 started++; 2580 } else 2581 strcpy(pkt_dev->result, "Error starting"); 2582 } 2583 if_unlock(t); 2584 if (started) 2585 t->control &= ~(T_STOP); 2586 } 2587 2588 static void pktgen_stop_all_threads_ifs(void) 2589 { 2590 struct pktgen_thread *t; 2591 2592 PG_DEBUG(printk("pktgen: entering pktgen_stop_all_threads_ifs.\n")); 2593 2594 mutex_lock(&pktgen_thread_lock); 2595 2596 list_for_each_entry(t, &pktgen_threads, th_list) 2597 t->control |= T_STOP; 2598 2599 mutex_unlock(&pktgen_thread_lock); 2600 } 2601 2602 static int thread_is_running(struct pktgen_thread *t) 2603 { 2604 struct pktgen_dev *pkt_dev; 2605 int res = 0; 2606 2607 list_for_each_entry(pkt_dev, &t->if_list, list) 2608 if (pkt_dev->running) { 2609 res = 1; 2610 break; 2611 } 2612 return res; 2613 } 2614 2615 static int pktgen_wait_thread_run(struct pktgen_thread *t) 2616 { 2617 if_lock(t); 2618 2619 while (thread_is_running(t)) { 2620 2621 if_unlock(t); 2622 2623 msleep_interruptible(100); 2624 2625 if (signal_pending(current)) 2626 goto signal; 2627 if_lock(t); 2628 } 2629 if_unlock(t); 2630 return 1; 2631 signal: 2632 return 0; 2633 } 2634 2635 static int pktgen_wait_all_threads_run(void) 2636 { 2637 struct pktgen_thread *t; 2638 int sig = 1; 2639 2640 mutex_lock(&pktgen_thread_lock); 2641 2642 list_for_each_entry(t, &pktgen_threads, th_list) { 2643 sig = pktgen_wait_thread_run(t); 2644 if (sig == 0) 2645 break; 2646 } 2647 2648 if (sig == 0) 2649 list_for_each_entry(t, &pktgen_threads, th_list) 2650 t->control |= (T_STOP); 2651 2652 mutex_unlock(&pktgen_thread_lock); 2653 return sig; 2654 } 2655 2656 static void pktgen_run_all_threads(void) 2657 { 2658 struct pktgen_thread *t; 2659 2660 PG_DEBUG(printk("pktgen: entering pktgen_run_all_threads.\n")); 2661 2662 mutex_lock(&pktgen_thread_lock); 2663 2664 list_for_each_entry(t, &pktgen_threads, th_list) 2665 t->control |= (T_RUN); 2666 2667 mutex_unlock(&pktgen_thread_lock); 2668 2669 schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */ 2670 2671 pktgen_wait_all_threads_run(); 2672 } 2673 2674 static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) 2675 { 2676 __u64 total_us, bps, mbps, pps, idle; 2677 char *p = pkt_dev->result; 2678 2679 total_us = pkt_dev->stopped_at - pkt_dev->started_at; 2680 2681 idle = pkt_dev->idle_acc; 2682 2683 p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n", 2684 (unsigned long long)total_us, 2685 (unsigned long long)(total_us - idle), 2686 (unsigned long long)idle, 2687 (unsigned long long)pkt_dev->sofar, 2688 pkt_dev->cur_pkt_size, nr_frags); 2689 2690 pps = pkt_dev->sofar * USEC_PER_SEC; 2691 2692 while ((total_us >> 32) != 0) { 2693 pps >>= 1; 2694 total_us >>= 1; 2695 } 2696 2697 do_div(pps, total_us); 2698 2699 bps = pps * 8 * pkt_dev->cur_pkt_size; 2700 2701 mbps = bps; 2702 do_div(mbps, 1000000); 2703 p += sprintf(p, " %llupps %lluMb/sec (%llubps) errors: %llu", 2704 (unsigned long long)pps, 2705 (unsigned long long)mbps, 2706 (unsigned long long)bps, 2707 (unsigned long long)pkt_dev->errors); 2708 } 2709 2710 /* Set stopped-at timer, remove from running list, do counters & statistics */ 2711 2712 static int pktgen_stop_device(struct pktgen_dev *pkt_dev) 2713 { 2714 int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; 2715 2716 if (!pkt_dev->running) { 2717 printk("pktgen: interface: %s is already stopped\n", 2718 pkt_dev->ifname); 2719 return -EINVAL; 2720 } 2721 2722 pkt_dev->stopped_at = getCurUs(); 2723 pkt_dev->running = 0; 2724 2725 show_results(pkt_dev, nr_frags); 2726 2727 return 0; 2728 } 2729 2730 static struct pktgen_dev *next_to_run(struct pktgen_thread *t) 2731 { 2732 struct pktgen_dev *pkt_dev, *best = NULL; 2733 2734 if_lock(t); 2735 2736 list_for_each_entry(pkt_dev, &t->if_list, list) { 2737 if (!pkt_dev->running) 2738 continue; 2739 if (best == NULL) 2740 best = pkt_dev; 2741 else if (pkt_dev->next_tx_us < best->next_tx_us) 2742 best = pkt_dev; 2743 } 2744 if_unlock(t); 2745 return best; 2746 } 2747 2748 static void pktgen_stop(struct pktgen_thread *t) 2749 { 2750 struct pktgen_dev *pkt_dev; 2751 2752 PG_DEBUG(printk("pktgen: entering pktgen_stop\n")); 2753 2754 if_lock(t); 2755 2756 list_for_each_entry(pkt_dev, &t->if_list, list) { 2757 pktgen_stop_device(pkt_dev); 2758 if (pkt_dev->skb) 2759 kfree_skb(pkt_dev->skb); 2760 2761 pkt_dev->skb = NULL; 2762 } 2763 2764 if_unlock(t); 2765 } 2766 2767 /* 2768 * one of our devices needs to be removed - find it 2769 * and remove it 2770 */ 2771 static void pktgen_rem_one_if(struct pktgen_thread *t) 2772 { 2773 struct list_head *q, *n; 2774 struct pktgen_dev *cur; 2775 2776 PG_DEBUG(printk("pktgen: entering pktgen_rem_one_if\n")); 2777 2778 if_lock(t); 2779 2780 list_for_each_safe(q, n, &t->if_list) { 2781 cur = list_entry(q, struct pktgen_dev, list); 2782 2783 if (!cur->removal_mark) 2784 continue; 2785 2786 if (cur->skb) 2787 kfree_skb(cur->skb); 2788 cur->skb = NULL; 2789 2790 pktgen_remove_device(t, cur); 2791 2792 break; 2793 } 2794 2795 if_unlock(t); 2796 } 2797 2798 static void pktgen_rem_all_ifs(struct pktgen_thread *t) 2799 { 2800 struct list_head *q, *n; 2801 struct pktgen_dev *cur; 2802 2803 /* Remove all devices, free mem */ 2804 2805 PG_DEBUG(printk("pktgen: entering pktgen_rem_all_ifs\n")); 2806 if_lock(t); 2807 2808 list_for_each_safe(q, n, &t->if_list) { 2809 cur = list_entry(q, struct pktgen_dev, list); 2810 2811 if (cur->skb) 2812 kfree_skb(cur->skb); 2813 cur->skb = NULL; 2814 2815 pktgen_remove_device(t, cur); 2816 } 2817 2818 if_unlock(t); 2819 } 2820 2821 static void pktgen_rem_thread(struct pktgen_thread *t) 2822 { 2823 /* Remove from the thread list */ 2824 2825 remove_proc_entry(t->name, pg_proc_dir); 2826 2827 mutex_lock(&pktgen_thread_lock); 2828 2829 list_del(&t->th_list); 2830 2831 mutex_unlock(&pktgen_thread_lock); 2832 } 2833 2834 static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev) 2835 { 2836 struct net_device *odev = NULL; 2837 __u64 idle_start = 0; 2838 int ret; 2839 2840 odev = pkt_dev->odev; 2841 2842 if (pkt_dev->delay_us || pkt_dev->delay_ns) { 2843 u64 now; 2844 2845 now = getCurUs(); 2846 if (now < pkt_dev->next_tx_us) 2847 spin(pkt_dev, pkt_dev->next_tx_us); 2848 2849 /* This is max DELAY, this has special meaning of 2850 * "never transmit" 2851 */ 2852 if (pkt_dev->delay_us == 0x7FFFFFFF) { 2853 pkt_dev->next_tx_us = getCurUs() + pkt_dev->delay_us; 2854 pkt_dev->next_tx_ns = pkt_dev->delay_ns; 2855 goto out; 2856 } 2857 } 2858 2859 if (netif_queue_stopped(odev) || need_resched()) { 2860 idle_start = getCurUs(); 2861 2862 if (!netif_running(odev)) { 2863 pktgen_stop_device(pkt_dev); 2864 if (pkt_dev->skb) 2865 kfree_skb(pkt_dev->skb); 2866 pkt_dev->skb = NULL; 2867 goto out; 2868 } 2869 if (need_resched()) 2870 schedule(); 2871 2872 pkt_dev->idle_acc += getCurUs() - idle_start; 2873 2874 if (netif_queue_stopped(odev)) { 2875 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 2876 pkt_dev->next_tx_ns = 0; 2877 goto out; /* Try the next interface */ 2878 } 2879 } 2880 2881 if (pkt_dev->last_ok || !pkt_dev->skb) { 2882 if ((++pkt_dev->clone_count >= pkt_dev->clone_skb) 2883 || (!pkt_dev->skb)) { 2884 /* build a new pkt */ 2885 if (pkt_dev->skb) 2886 kfree_skb(pkt_dev->skb); 2887 2888 pkt_dev->skb = fill_packet(odev, pkt_dev); 2889 if (pkt_dev->skb == NULL) { 2890 printk("pktgen: ERROR: couldn't allocate skb in fill_packet.\n"); 2891 schedule(); 2892 pkt_dev->clone_count--; /* back out increment, OOM */ 2893 goto out; 2894 } 2895 pkt_dev->allocated_skbs++; 2896 pkt_dev->clone_count = 0; /* reset counter */ 2897 } 2898 } 2899 2900 netif_tx_lock_bh(odev); 2901 if (!netif_queue_stopped(odev)) { 2902 2903 atomic_inc(&(pkt_dev->skb->users)); 2904 retry_now: 2905 ret = odev->hard_start_xmit(pkt_dev->skb, odev); 2906 if (likely(ret == NETDEV_TX_OK)) { 2907 pkt_dev->last_ok = 1; 2908 pkt_dev->sofar++; 2909 pkt_dev->seq_num++; 2910 pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; 2911 2912 } else if (ret == NETDEV_TX_LOCKED 2913 && (odev->features & NETIF_F_LLTX)) { 2914 cpu_relax(); 2915 goto retry_now; 2916 } else { /* Retry it next time */ 2917 2918 atomic_dec(&(pkt_dev->skb->users)); 2919 2920 if (debug && net_ratelimit()) 2921 printk(KERN_INFO "pktgen: Hard xmit error\n"); 2922 2923 pkt_dev->errors++; 2924 pkt_dev->last_ok = 0; 2925 } 2926 2927 pkt_dev->next_tx_us = getCurUs(); 2928 pkt_dev->next_tx_ns = 0; 2929 2930 pkt_dev->next_tx_us += pkt_dev->delay_us; 2931 pkt_dev->next_tx_ns += pkt_dev->delay_ns; 2932 2933 if (pkt_dev->next_tx_ns > 1000) { 2934 pkt_dev->next_tx_us++; 2935 pkt_dev->next_tx_ns -= 1000; 2936 } 2937 } 2938 2939 else { /* Retry it next time */ 2940 pkt_dev->last_ok = 0; 2941 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 2942 pkt_dev->next_tx_ns = 0; 2943 } 2944 2945 netif_tx_unlock_bh(odev); 2946 2947 /* If pkt_dev->count is zero, then run forever */ 2948 if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { 2949 if (atomic_read(&(pkt_dev->skb->users)) != 1) { 2950 idle_start = getCurUs(); 2951 while (atomic_read(&(pkt_dev->skb->users)) != 1) { 2952 if (signal_pending(current)) { 2953 break; 2954 } 2955 schedule(); 2956 } 2957 pkt_dev->idle_acc += getCurUs() - idle_start; 2958 } 2959 2960 /* Done with this */ 2961 pktgen_stop_device(pkt_dev); 2962 if (pkt_dev->skb) 2963 kfree_skb(pkt_dev->skb); 2964 pkt_dev->skb = NULL; 2965 } 2966 out:; 2967 } 2968 2969 /* 2970 * Main loop of the thread goes here 2971 */ 2972 2973 static void pktgen_thread_worker(struct pktgen_thread *t) 2974 { 2975 DEFINE_WAIT(wait); 2976 struct pktgen_dev *pkt_dev = NULL; 2977 int cpu = t->cpu; 2978 sigset_t tmpsig; 2979 u32 max_before_softirq; 2980 u32 tx_since_softirq = 0; 2981 2982 daemonize("pktgen/%d", cpu); 2983 2984 /* Block all signals except SIGKILL, SIGSTOP and SIGTERM */ 2985 2986 spin_lock_irq(¤t->sighand->siglock); 2987 tmpsig = current->blocked; 2988 siginitsetinv(¤t->blocked, 2989 sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGTERM)); 2990 2991 recalc_sigpending(); 2992 spin_unlock_irq(¤t->sighand->siglock); 2993 2994 /* Migrate to the right CPU */ 2995 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 2996 if (smp_processor_id() != cpu) 2997 BUG(); 2998 2999 init_waitqueue_head(&t->queue); 3000 3001 t->control &= ~(T_TERMINATE); 3002 t->control &= ~(T_RUN); 3003 t->control &= ~(T_STOP); 3004 t->control &= ~(T_REMDEVALL); 3005 t->control &= ~(T_REMDEV); 3006 3007 t->pid = current->pid; 3008 3009 PG_DEBUG(printk("pktgen: starting pktgen/%d: pid=%d\n", cpu, current->pid)); 3010 3011 max_before_softirq = t->max_before_softirq; 3012 3013 __set_current_state(TASK_INTERRUPTIBLE); 3014 mb(); 3015 3016 while (1) { 3017 3018 __set_current_state(TASK_RUNNING); 3019 3020 /* 3021 * Get next dev to xmit -- if any. 3022 */ 3023 3024 pkt_dev = next_to_run(t); 3025 3026 if (pkt_dev) { 3027 3028 pktgen_xmit(pkt_dev); 3029 3030 /* 3031 * We like to stay RUNNING but must also give 3032 * others fair share. 3033 */ 3034 3035 tx_since_softirq += pkt_dev->last_ok; 3036 3037 if (tx_since_softirq > max_before_softirq) { 3038 if (local_softirq_pending()) 3039 do_softirq(); 3040 tx_since_softirq = 0; 3041 } 3042 } else { 3043 prepare_to_wait(&(t->queue), &wait, TASK_INTERRUPTIBLE); 3044 schedule_timeout(HZ / 10); 3045 finish_wait(&(t->queue), &wait); 3046 } 3047 3048 /* 3049 * Back from sleep, either due to the timeout or signal. 3050 * We check if we have any "posted" work for us. 3051 */ 3052 3053 if (t->control & T_TERMINATE || signal_pending(current)) 3054 /* we received a request to terminate ourself */ 3055 break; 3056 3057 if (t->control & T_STOP) { 3058 pktgen_stop(t); 3059 t->control &= ~(T_STOP); 3060 } 3061 3062 if (t->control & T_RUN) { 3063 pktgen_run(t); 3064 t->control &= ~(T_RUN); 3065 } 3066 3067 if (t->control & T_REMDEVALL) { 3068 pktgen_rem_all_ifs(t); 3069 t->control &= ~(T_REMDEVALL); 3070 } 3071 3072 if (t->control & T_REMDEV) { 3073 pktgen_rem_one_if(t); 3074 t->control &= ~(T_REMDEV); 3075 } 3076 3077 if (need_resched()) 3078 schedule(); 3079 } 3080 3081 PG_DEBUG(printk("pktgen: %s stopping all device\n", t->name)); 3082 pktgen_stop(t); 3083 3084 PG_DEBUG(printk("pktgen: %s removing all device\n", t->name)); 3085 pktgen_rem_all_ifs(t); 3086 3087 PG_DEBUG(printk("pktgen: %s removing thread.\n", t->name)); 3088 pktgen_rem_thread(t); 3089 3090 t->removed = 1; 3091 } 3092 3093 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, 3094 const char *ifname) 3095 { 3096 struct pktgen_dev *p, *pkt_dev = NULL; 3097 if_lock(t); 3098 3099 list_for_each_entry(p, &t->if_list, list) 3100 if (strncmp(p->ifname, ifname, IFNAMSIZ) == 0) { 3101 pkt_dev = p; 3102 break; 3103 } 3104 3105 if_unlock(t); 3106 PG_DEBUG(printk("pktgen: find_dev(%s) returning %p\n", ifname, pkt_dev)); 3107 return pkt_dev; 3108 } 3109 3110 /* 3111 * Adds a dev at front of if_list. 3112 */ 3113 3114 static int add_dev_to_thread(struct pktgen_thread *t, 3115 struct pktgen_dev *pkt_dev) 3116 { 3117 int rv = 0; 3118 3119 if_lock(t); 3120 3121 if (pkt_dev->pg_thread) { 3122 printk("pktgen: ERROR: already assigned to a thread.\n"); 3123 rv = -EBUSY; 3124 goto out; 3125 } 3126 3127 list_add(&pkt_dev->list, &t->if_list); 3128 pkt_dev->pg_thread = t; 3129 pkt_dev->running = 0; 3130 3131 out: 3132 if_unlock(t); 3133 return rv; 3134 } 3135 3136 /* Called under thread lock */ 3137 3138 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) 3139 { 3140 struct pktgen_dev *pkt_dev; 3141 struct proc_dir_entry *pe; 3142 3143 /* We don't allow a device to be on several threads */ 3144 3145 pkt_dev = __pktgen_NN_threads(ifname, FIND); 3146 if (pkt_dev) { 3147 printk("pktgen: ERROR: interface already used.\n"); 3148 return -EBUSY; 3149 } 3150 3151 pkt_dev = kzalloc(sizeof(struct pktgen_dev), GFP_KERNEL); 3152 if (!pkt_dev) 3153 return -ENOMEM; 3154 3155 pkt_dev->flows = vmalloc(MAX_CFLOWS * sizeof(struct flow_state)); 3156 if (pkt_dev->flows == NULL) { 3157 kfree(pkt_dev); 3158 return -ENOMEM; 3159 } 3160 memset(pkt_dev->flows, 0, MAX_CFLOWS * sizeof(struct flow_state)); 3161 3162 pkt_dev->removal_mark = 0; 3163 pkt_dev->min_pkt_size = ETH_ZLEN; 3164 pkt_dev->max_pkt_size = ETH_ZLEN; 3165 pkt_dev->nfrags = 0; 3166 pkt_dev->clone_skb = pg_clone_skb_d; 3167 pkt_dev->delay_us = pg_delay_d / 1000; 3168 pkt_dev->delay_ns = pg_delay_d % 1000; 3169 pkt_dev->count = pg_count_d; 3170 pkt_dev->sofar = 0; 3171 pkt_dev->udp_src_min = 9; /* sink port */ 3172 pkt_dev->udp_src_max = 9; 3173 pkt_dev->udp_dst_min = 9; 3174 pkt_dev->udp_dst_max = 9; 3175 3176 strncpy(pkt_dev->ifname, ifname, IFNAMSIZ); 3177 3178 if (!pktgen_setup_dev(pkt_dev)) { 3179 printk("pktgen: ERROR: pktgen_setup_dev failed.\n"); 3180 if (pkt_dev->flows) 3181 vfree(pkt_dev->flows); 3182 kfree(pkt_dev); 3183 return -ENODEV; 3184 } 3185 3186 pe = create_proc_entry(ifname, 0600, pg_proc_dir); 3187 if (!pe) { 3188 printk("pktgen: cannot create %s/%s procfs entry.\n", 3189 PG_PROC_DIR, ifname); 3190 if (pkt_dev->flows) 3191 vfree(pkt_dev->flows); 3192 kfree(pkt_dev); 3193 return -EINVAL; 3194 } 3195 pe->proc_fops = &pktgen_if_fops; 3196 pe->data = pkt_dev; 3197 3198 return add_dev_to_thread(t, pkt_dev); 3199 } 3200 3201 static struct pktgen_thread *__init pktgen_find_thread(const char *name) 3202 { 3203 struct pktgen_thread *t; 3204 3205 mutex_lock(&pktgen_thread_lock); 3206 3207 list_for_each_entry(t, &pktgen_threads, th_list) 3208 if (strcmp(t->name, name) == 0) { 3209 mutex_unlock(&pktgen_thread_lock); 3210 return t; 3211 } 3212 3213 mutex_unlock(&pktgen_thread_lock); 3214 return NULL; 3215 } 3216 3217 static int __init pktgen_create_thread(const char *name, int cpu) 3218 { 3219 int err; 3220 struct pktgen_thread *t = NULL; 3221 struct proc_dir_entry *pe; 3222 3223 if (strlen(name) > 31) { 3224 printk("pktgen: ERROR: Thread name cannot be more than 31 characters.\n"); 3225 return -EINVAL; 3226 } 3227 3228 if (pktgen_find_thread(name)) { 3229 printk("pktgen: ERROR: thread: %s already exists\n", name); 3230 return -EINVAL; 3231 } 3232 3233 t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL); 3234 if (!t) { 3235 printk("pktgen: ERROR: out of memory, can't create new thread.\n"); 3236 return -ENOMEM; 3237 } 3238 3239 strcpy(t->name, name); 3240 spin_lock_init(&t->if_lock); 3241 t->cpu = cpu; 3242 3243 pe = create_proc_entry(t->name, 0600, pg_proc_dir); 3244 if (!pe) { 3245 printk("pktgen: cannot create %s/%s procfs entry.\n", 3246 PG_PROC_DIR, t->name); 3247 kfree(t); 3248 return -EINVAL; 3249 } 3250 3251 pe->proc_fops = &pktgen_thread_fops; 3252 pe->data = t; 3253 3254 INIT_LIST_HEAD(&t->if_list); 3255 3256 list_add_tail(&t->th_list, &pktgen_threads); 3257 3258 t->removed = 0; 3259 3260 err = kernel_thread((void *)pktgen_thread_worker, (void *)t, 3261 CLONE_FS | CLONE_FILES | CLONE_SIGHAND); 3262 if (err < 0) { 3263 printk("pktgen: kernel_thread() failed for cpu %d\n", t->cpu); 3264 remove_proc_entry(t->name, pg_proc_dir); 3265 list_del(&t->th_list); 3266 kfree(t); 3267 return err; 3268 } 3269 3270 return 0; 3271 } 3272 3273 /* 3274 * Removes a device from the thread if_list. 3275 */ 3276 static void _rem_dev_from_if_list(struct pktgen_thread *t, 3277 struct pktgen_dev *pkt_dev) 3278 { 3279 struct list_head *q, *n; 3280 struct pktgen_dev *p; 3281 3282 list_for_each_safe(q, n, &t->if_list) { 3283 p = list_entry(q, struct pktgen_dev, list); 3284 if (p == pkt_dev) 3285 list_del(&p->list); 3286 } 3287 } 3288 3289 static int pktgen_remove_device(struct pktgen_thread *t, 3290 struct pktgen_dev *pkt_dev) 3291 { 3292 3293 PG_DEBUG(printk("pktgen: remove_device pkt_dev=%p\n", pkt_dev)); 3294 3295 if (pkt_dev->running) { 3296 printk("pktgen:WARNING: trying to remove a running interface, stopping it now.\n"); 3297 pktgen_stop_device(pkt_dev); 3298 } 3299 3300 /* Dis-associate from the interface */ 3301 3302 if (pkt_dev->odev) { 3303 dev_put(pkt_dev->odev); 3304 pkt_dev->odev = NULL; 3305 } 3306 3307 /* And update the thread if_list */ 3308 3309 _rem_dev_from_if_list(t, pkt_dev); 3310 3311 /* Clean up proc file system */ 3312 3313 remove_proc_entry(pkt_dev->ifname, pg_proc_dir); 3314 3315 if (pkt_dev->flows) 3316 vfree(pkt_dev->flows); 3317 kfree(pkt_dev); 3318 return 0; 3319 } 3320 3321 static int __init pg_init(void) 3322 { 3323 int cpu; 3324 struct proc_dir_entry *pe; 3325 3326 printk(version); 3327 3328 pg_proc_dir = proc_mkdir(PG_PROC_DIR, proc_net); 3329 if (!pg_proc_dir) 3330 return -ENODEV; 3331 pg_proc_dir->owner = THIS_MODULE; 3332 3333 pe = create_proc_entry(PGCTRL, 0600, pg_proc_dir); 3334 if (pe == NULL) { 3335 printk("pktgen: ERROR: cannot create %s procfs entry.\n", 3336 PGCTRL); 3337 proc_net_remove(PG_PROC_DIR); 3338 return -EINVAL; 3339 } 3340 3341 pe->proc_fops = &pktgen_fops; 3342 pe->data = NULL; 3343 3344 /* Register us to receive netdevice events */ 3345 register_netdevice_notifier(&pktgen_notifier_block); 3346 3347 for_each_online_cpu(cpu) { 3348 int err; 3349 char buf[30]; 3350 3351 sprintf(buf, "kpktgend_%i", cpu); 3352 err = pktgen_create_thread(buf, cpu); 3353 if (err) 3354 printk("pktgen: WARNING: Cannot create thread for cpu %d (%d)\n", 3355 cpu, err); 3356 } 3357 3358 if (list_empty(&pktgen_threads)) { 3359 printk("pktgen: ERROR: Initialization failed for all threads\n"); 3360 unregister_netdevice_notifier(&pktgen_notifier_block); 3361 remove_proc_entry(PGCTRL, pg_proc_dir); 3362 proc_net_remove(PG_PROC_DIR); 3363 return -ENODEV; 3364 } 3365 3366 return 0; 3367 } 3368 3369 static void __exit pg_cleanup(void) 3370 { 3371 struct pktgen_thread *t; 3372 struct list_head *q, *n; 3373 wait_queue_head_t queue; 3374 init_waitqueue_head(&queue); 3375 3376 /* Stop all interfaces & threads */ 3377 3378 list_for_each_safe(q, n, &pktgen_threads) { 3379 t = list_entry(q, struct pktgen_thread, th_list); 3380 t->control |= (T_TERMINATE); 3381 3382 wait_event_interruptible_timeout(queue, (t->removed == 1), HZ); 3383 } 3384 3385 /* Un-register us from receiving netdevice events */ 3386 unregister_netdevice_notifier(&pktgen_notifier_block); 3387 3388 /* Clean up proc file system */ 3389 remove_proc_entry(PGCTRL, pg_proc_dir); 3390 proc_net_remove(PG_PROC_DIR); 3391 } 3392 3393 module_init(pg_init); 3394 module_exit(pg_cleanup); 3395 3396 MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se"); 3397 MODULE_DESCRIPTION("Packet Generator tool"); 3398 MODULE_LICENSE("GPL"); 3399 module_param(pg_count_d, int, 0); 3400 module_param(pg_delay_d, int, 0); 3401 module_param(pg_clone_skb_d, int, 0); 3402 module_param(debug, int, 0); 3403