1 /* 2 * Equalizer Load-balancer for serial network interfaces. 3 * 4 * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes 5 * NCM: Network and Communications Management, Inc. 6 * 7 * (c) Copyright 2002 David S. Miller (davem@redhat.com) 8 * 9 * This software may be used and distributed according to the terms 10 * of the GNU General Public License, incorporated herein by reference. 11 * 12 * The author may be reached as simon@ncm.com, or C/O 13 * NCM 14 * Attn: Simon Janes 15 * 6803 Whittier Ave 16 * McLean VA 22101 17 * Phone: 1-703-847-0040 ext 103 18 */ 19 20 /* 21 * Sources: 22 * skeleton.c by Donald Becker. 23 * Inspirations: 24 * The Harried and Overworked Alan Cox 25 * Conspiracies: 26 * The Alan Cox and Mike McLagan plot to get someone else to do the code, 27 * which turned out to be me. 28 */ 29 30 /* 31 * $Log: eql.c,v $ 32 * Revision 1.2 1996/04/11 17:51:52 guru 33 * Added one-line eql_remove_slave patch. 34 * 35 * Revision 1.1 1996/04/11 17:44:17 guru 36 * Initial revision 37 * 38 * Revision 3.13 1996/01/21 15:17:18 alan 39 * tx_queue_len changes. 40 * reformatted. 41 * 42 * Revision 3.12 1995/03/22 21:07:51 anarchy 43 * Added capable() checks on configuration. 44 * Moved header file. 45 * 46 * Revision 3.11 1995/01/19 23:14:31 guru 47 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - 48 * (priority_Bps) + bytes_queued * 8; 49 * 50 * Revision 3.10 1995/01/19 23:07:53 guru 51 * back to 52 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - 53 * (priority_Bps) + bytes_queued; 54 * 55 * Revision 3.9 1995/01/19 22:38:20 guru 56 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - 57 * (priority_Bps) + bytes_queued * 4; 58 * 59 * Revision 3.8 1995/01/19 22:30:55 guru 60 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) - 61 * (priority_Bps) + bytes_queued * 2; 62 * 63 * Revision 3.7 1995/01/19 21:52:35 guru 64 * printk's trimmed out. 65 * 66 * Revision 3.6 1995/01/19 21:49:56 guru 67 * This is working pretty well. I gained 1 K/s in speed.. now it's just 68 * robustness and printk's to be diked out. 69 * 70 * Revision 3.5 1995/01/18 22:29:59 guru 71 * still crashes the kernel when the lock_wait thing is woken up. 72 * 73 * Revision 3.4 1995/01/18 21:59:47 guru 74 * Broken set-bit locking snapshot 75 * 76 * Revision 3.3 1995/01/17 22:09:18 guru 77 * infinite sleep in a lock somewhere.. 78 * 79 * Revision 3.2 1995/01/15 16:46:06 guru 80 * Log trimmed of non-pertinent 1.x branch messages 81 * 82 * Revision 3.1 1995/01/15 14:41:45 guru 83 * New Scheduler and timer stuff... 84 * 85 * Revision 1.15 1995/01/15 14:29:02 guru 86 * Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one 87 * with the dumber scheduler 88 * 89 * Revision 1.14 1995/01/15 02:37:08 guru 90 * shock.. the kept-new-versions could have zonked working 91 * stuff.. shudder 92 * 93 * Revision 1.13 1995/01/15 02:36:31 guru 94 * big changes 95 * 96 * scheduler was torn out and replaced with something smarter 97 * 98 * global names not prefixed with eql_ were renamed to protect 99 * against namespace collisions 100 * 101 * a few more abstract interfaces were added to facilitate any 102 * potential change of datastructure. the driver is still using 103 * a linked list of slaves. going to a heap would be a bit of 104 * an overkill. 105 * 106 * this compiles fine with no warnings. 107 * 108 * the locking mechanism and timer stuff must be written however, 109 * this version will not work otherwise 110 * 111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM 112 */ 113 114 #include <linux/capability.h> 115 #include <linux/module.h> 116 #include <linux/kernel.h> 117 #include <linux/init.h> 118 #include <linux/slab.h> 119 #include <linux/timer.h> 120 #include <linux/netdevice.h> 121 #include <net/net_namespace.h> 122 123 #include <linux/if.h> 124 #include <linux/if_arp.h> 125 #include <linux/if_eql.h> 126 127 #include <asm/uaccess.h> 128 129 static int eql_open(struct net_device *dev); 130 static int eql_close(struct net_device *dev); 131 static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 132 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev); 133 134 #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE) 135 #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER) 136 137 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave); 138 139 static void eql_timer(unsigned long param) 140 { 141 equalizer_t *eql = (equalizer_t *) param; 142 struct list_head *this, *tmp, *head; 143 144 spin_lock_bh(&eql->queue.lock); 145 head = &eql->queue.all_slaves; 146 list_for_each_safe(this, tmp, head) { 147 slave_t *slave = list_entry(this, slave_t, list); 148 149 if ((slave->dev->flags & IFF_UP) == IFF_UP) { 150 slave->bytes_queued -= slave->priority_Bps; 151 if (slave->bytes_queued < 0) 152 slave->bytes_queued = 0; 153 } else { 154 eql_kill_one_slave(&eql->queue, slave); 155 } 156 157 } 158 spin_unlock_bh(&eql->queue.lock); 159 160 eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; 161 add_timer(&eql->timer); 162 } 163 164 static const char version[] __initconst = 165 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n"; 166 167 static const struct net_device_ops eql_netdev_ops = { 168 .ndo_open = eql_open, 169 .ndo_stop = eql_close, 170 .ndo_do_ioctl = eql_ioctl, 171 .ndo_start_xmit = eql_slave_xmit, 172 }; 173 174 static void __init eql_setup(struct net_device *dev) 175 { 176 equalizer_t *eql = netdev_priv(dev); 177 178 init_timer(&eql->timer); 179 eql->timer.data = (unsigned long) eql; 180 eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL; 181 eql->timer.function = eql_timer; 182 183 spin_lock_init(&eql->queue.lock); 184 INIT_LIST_HEAD(&eql->queue.all_slaves); 185 eql->queue.master_dev = dev; 186 187 dev->netdev_ops = &eql_netdev_ops; 188 189 /* 190 * Now we undo some of the things that eth_setup does 191 * that we don't like 192 */ 193 194 dev->mtu = EQL_DEFAULT_MTU; /* set to 576 in if_eql.h */ 195 dev->flags = IFF_MASTER; 196 197 dev->type = ARPHRD_SLIP; 198 dev->tx_queue_len = 5; /* Hands them off fast */ 199 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 200 } 201 202 static int eql_open(struct net_device *dev) 203 { 204 equalizer_t *eql = netdev_priv(dev); 205 206 /* XXX We should force this off automatically for the user. */ 207 printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on " 208 "your slave devices.\n", dev->name); 209 210 BUG_ON(!list_empty(&eql->queue.all_slaves)); 211 212 eql->min_slaves = 1; 213 eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */ 214 215 add_timer(&eql->timer); 216 217 return 0; 218 } 219 220 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave) 221 { 222 list_del(&slave->list); 223 queue->num_slaves--; 224 slave->dev->flags &= ~IFF_SLAVE; 225 dev_put(slave->dev); 226 kfree(slave); 227 } 228 229 static void eql_kill_slave_queue(slave_queue_t *queue) 230 { 231 struct list_head *head, *tmp, *this; 232 233 spin_lock_bh(&queue->lock); 234 235 head = &queue->all_slaves; 236 list_for_each_safe(this, tmp, head) { 237 slave_t *s = list_entry(this, slave_t, list); 238 239 eql_kill_one_slave(queue, s); 240 } 241 242 spin_unlock_bh(&queue->lock); 243 } 244 245 static int eql_close(struct net_device *dev) 246 { 247 equalizer_t *eql = netdev_priv(dev); 248 249 /* 250 * The timer has to be stopped first before we start hacking away 251 * at the data structure it scans every so often... 252 */ 253 254 del_timer_sync(&eql->timer); 255 256 eql_kill_slave_queue(&eql->queue); 257 258 return 0; 259 } 260 261 static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq); 262 static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq); 263 264 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc); 265 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc); 266 267 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc); 268 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc); 269 270 static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 271 { 272 if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG && 273 !capable(CAP_NET_ADMIN)) 274 return -EPERM; 275 276 switch (cmd) { 277 case EQL_ENSLAVE: 278 return eql_enslave(dev, ifr->ifr_data); 279 case EQL_EMANCIPATE: 280 return eql_emancipate(dev, ifr->ifr_data); 281 case EQL_GETSLAVECFG: 282 return eql_g_slave_cfg(dev, ifr->ifr_data); 283 case EQL_SETSLAVECFG: 284 return eql_s_slave_cfg(dev, ifr->ifr_data); 285 case EQL_GETMASTRCFG: 286 return eql_g_master_cfg(dev, ifr->ifr_data); 287 case EQL_SETMASTRCFG: 288 return eql_s_master_cfg(dev, ifr->ifr_data); 289 default: 290 return -EOPNOTSUPP; 291 } 292 } 293 294 /* queue->lock must be held */ 295 static slave_t *__eql_schedule_slaves(slave_queue_t *queue) 296 { 297 unsigned long best_load = ~0UL; 298 struct list_head *this, *tmp, *head; 299 slave_t *best_slave; 300 301 best_slave = NULL; 302 303 /* Make a pass to set the best slave. */ 304 head = &queue->all_slaves; 305 list_for_each_safe(this, tmp, head) { 306 slave_t *slave = list_entry(this, slave_t, list); 307 unsigned long slave_load, bytes_queued, priority_Bps; 308 309 /* Go through the slave list once, updating best_slave 310 * whenever a new best_load is found. 311 */ 312 bytes_queued = slave->bytes_queued; 313 priority_Bps = slave->priority_Bps; 314 if ((slave->dev->flags & IFF_UP) == IFF_UP) { 315 slave_load = (~0UL - (~0UL / 2)) - 316 (priority_Bps) + bytes_queued * 8; 317 318 if (slave_load < best_load) { 319 best_load = slave_load; 320 best_slave = slave; 321 } 322 } else { 323 /* We found a dead slave, kill it. */ 324 eql_kill_one_slave(queue, slave); 325 } 326 } 327 return best_slave; 328 } 329 330 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev) 331 { 332 equalizer_t *eql = netdev_priv(dev); 333 slave_t *slave; 334 335 spin_lock(&eql->queue.lock); 336 337 slave = __eql_schedule_slaves(&eql->queue); 338 if (slave) { 339 struct net_device *slave_dev = slave->dev; 340 341 skb->dev = slave_dev; 342 skb->priority = 1; 343 slave->bytes_queued += skb->len; 344 dev_queue_xmit(skb); 345 dev->stats.tx_packets++; 346 } else { 347 dev->stats.tx_dropped++; 348 dev_kfree_skb(skb); 349 } 350 351 spin_unlock(&eql->queue.lock); 352 353 return NETDEV_TX_OK; 354 } 355 356 /* 357 * Private ioctl functions 358 */ 359 360 /* queue->lock must be held */ 361 static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev) 362 { 363 struct list_head *this, *head; 364 365 head = &queue->all_slaves; 366 list_for_each(this, head) { 367 slave_t *slave = list_entry(this, slave_t, list); 368 369 if (slave->dev == dev) 370 return slave; 371 } 372 373 return NULL; 374 } 375 376 static inline int eql_is_full(slave_queue_t *queue) 377 { 378 equalizer_t *eql = netdev_priv(queue->master_dev); 379 380 if (queue->num_slaves >= eql->max_slaves) 381 return 1; 382 return 0; 383 } 384 385 /* queue->lock must be held */ 386 static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave) 387 { 388 if (!eql_is_full(queue)) { 389 slave_t *duplicate_slave = NULL; 390 391 duplicate_slave = __eql_find_slave_dev(queue, slave->dev); 392 if (duplicate_slave) 393 eql_kill_one_slave(queue, duplicate_slave); 394 395 list_add(&slave->list, &queue->all_slaves); 396 queue->num_slaves++; 397 slave->dev->flags |= IFF_SLAVE; 398 399 return 0; 400 } 401 402 return -ENOSPC; 403 } 404 405 static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *srqp) 406 { 407 struct net_device *slave_dev; 408 slaving_request_t srq; 409 410 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) 411 return -EFAULT; 412 413 slave_dev = dev_get_by_name(&init_net, srq.slave_name); 414 if (slave_dev) { 415 if ((master_dev->flags & IFF_UP) == IFF_UP) { 416 /* slave is not a master & not already a slave: */ 417 if (!eql_is_master(slave_dev) && 418 !eql_is_slave(slave_dev)) { 419 slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL); 420 equalizer_t *eql = netdev_priv(master_dev); 421 int ret; 422 423 if (!s) { 424 dev_put(slave_dev); 425 return -ENOMEM; 426 } 427 428 memset(s, 0, sizeof(*s)); 429 s->dev = slave_dev; 430 s->priority = srq.priority; 431 s->priority_bps = srq.priority; 432 s->priority_Bps = srq.priority / 8; 433 434 spin_lock_bh(&eql->queue.lock); 435 ret = __eql_insert_slave(&eql->queue, s); 436 if (ret) { 437 dev_put(slave_dev); 438 kfree(s); 439 } 440 spin_unlock_bh(&eql->queue.lock); 441 442 return ret; 443 } 444 } 445 dev_put(slave_dev); 446 } 447 448 return -EINVAL; 449 } 450 451 static int eql_emancipate(struct net_device *master_dev, slaving_request_t __user *srqp) 452 { 453 equalizer_t *eql = netdev_priv(master_dev); 454 struct net_device *slave_dev; 455 slaving_request_t srq; 456 int ret; 457 458 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t))) 459 return -EFAULT; 460 461 slave_dev = dev_get_by_name(&init_net, srq.slave_name); 462 ret = -EINVAL; 463 if (slave_dev) { 464 spin_lock_bh(&eql->queue.lock); 465 466 if (eql_is_slave(slave_dev)) { 467 slave_t *slave = __eql_find_slave_dev(&eql->queue, 468 slave_dev); 469 470 if (slave) { 471 eql_kill_one_slave(&eql->queue, slave); 472 ret = 0; 473 } 474 } 475 dev_put(slave_dev); 476 477 spin_unlock_bh(&eql->queue.lock); 478 } 479 480 return ret; 481 } 482 483 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp) 484 { 485 equalizer_t *eql = netdev_priv(dev); 486 slave_t *slave; 487 struct net_device *slave_dev; 488 slave_config_t sc; 489 int ret; 490 491 if (copy_from_user(&sc, scp, sizeof (slave_config_t))) 492 return -EFAULT; 493 494 slave_dev = dev_get_by_name(&init_net, sc.slave_name); 495 if (!slave_dev) 496 return -ENODEV; 497 498 ret = -EINVAL; 499 500 spin_lock_bh(&eql->queue.lock); 501 if (eql_is_slave(slave_dev)) { 502 slave = __eql_find_slave_dev(&eql->queue, slave_dev); 503 if (slave) { 504 sc.priority = slave->priority; 505 ret = 0; 506 } 507 } 508 spin_unlock_bh(&eql->queue.lock); 509 510 dev_put(slave_dev); 511 512 if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t))) 513 ret = -EFAULT; 514 515 return ret; 516 } 517 518 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp) 519 { 520 slave_t *slave; 521 equalizer_t *eql; 522 struct net_device *slave_dev; 523 slave_config_t sc; 524 int ret; 525 526 if (copy_from_user(&sc, scp, sizeof (slave_config_t))) 527 return -EFAULT; 528 529 slave_dev = dev_get_by_name(&init_net, sc.slave_name); 530 if (!slave_dev) 531 return -ENODEV; 532 533 ret = -EINVAL; 534 535 eql = netdev_priv(dev); 536 spin_lock_bh(&eql->queue.lock); 537 if (eql_is_slave(slave_dev)) { 538 slave = __eql_find_slave_dev(&eql->queue, slave_dev); 539 if (slave) { 540 slave->priority = sc.priority; 541 slave->priority_bps = sc.priority; 542 slave->priority_Bps = sc.priority / 8; 543 ret = 0; 544 } 545 } 546 spin_unlock_bh(&eql->queue.lock); 547 548 dev_put(slave_dev); 549 550 return ret; 551 } 552 553 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp) 554 { 555 equalizer_t *eql; 556 master_config_t mc; 557 558 if (eql_is_master(dev)) { 559 eql = netdev_priv(dev); 560 mc.max_slaves = eql->max_slaves; 561 mc.min_slaves = eql->min_slaves; 562 if (copy_to_user(mcp, &mc, sizeof (master_config_t))) 563 return -EFAULT; 564 return 0; 565 } 566 return -EINVAL; 567 } 568 569 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp) 570 { 571 equalizer_t *eql; 572 master_config_t mc; 573 574 if (copy_from_user(&mc, mcp, sizeof (master_config_t))) 575 return -EFAULT; 576 577 if (eql_is_master(dev)) { 578 eql = netdev_priv(dev); 579 eql->max_slaves = mc.max_slaves; 580 eql->min_slaves = mc.min_slaves; 581 return 0; 582 } 583 return -EINVAL; 584 } 585 586 static struct net_device *dev_eql; 587 588 static int __init eql_init_module(void) 589 { 590 int err; 591 592 printk(version); 593 594 dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup); 595 if (!dev_eql) 596 return -ENOMEM; 597 598 err = register_netdev(dev_eql); 599 if (err) 600 free_netdev(dev_eql); 601 return err; 602 } 603 604 static void __exit eql_cleanup_module(void) 605 { 606 unregister_netdev(dev_eql); 607 free_netdev(dev_eql); 608 } 609 610 module_init(eql_init_module); 611 module_exit(eql_cleanup_module); 612 MODULE_LICENSE("GPL"); 613