1 /* 2 * Generic HDLC support routines for Linux 3 * Frame Relay support 4 * 5 * Copyright (C) 1999 - 2005 Krzysztof Halasa <khc@pm.waw.pl> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of version 2 of the GNU General Public License 9 * as published by the Free Software Foundation. 10 * 11 12 Theory of PVC state 13 14 DCE mode: 15 16 (exist,new) -> 0,0 when "PVC create" or if "link unreliable" 17 0,x -> 1,1 if "link reliable" when sending FULL STATUS 18 1,1 -> 1,0 if received FULL STATUS ACK 19 20 (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create" 21 -> 1 when "PVC up" and (exist,new) = 1,0 22 23 DTE mode: 24 (exist,new,active) = FULL STATUS if "link reliable" 25 = 0, 0, 0 if "link unreliable" 26 No LMI: 27 active = open and "link reliable" 28 exist = new = not used 29 30 CCITT LMI: ITU-T Q.933 Annex A 31 ANSI LMI: ANSI T1.617 Annex D 32 CISCO LMI: the original, aka "Gang of Four" LMI 33 34 */ 35 36 #include <linux/module.h> 37 #include <linux/kernel.h> 38 #include <linux/slab.h> 39 #include <linux/poll.h> 40 #include <linux/errno.h> 41 #include <linux/if_arp.h> 42 #include <linux/init.h> 43 #include <linux/skbuff.h> 44 #include <linux/pkt_sched.h> 45 #include <linux/random.h> 46 #include <linux/inetdevice.h> 47 #include <linux/lapb.h> 48 #include <linux/rtnetlink.h> 49 #include <linux/etherdevice.h> 50 #include <linux/hdlc.h> 51 52 #undef DEBUG_PKT 53 #undef DEBUG_ECN 54 #undef DEBUG_LINK 55 56 #define FR_UI 0x03 57 #define FR_PAD 0x00 58 59 #define NLPID_IP 0xCC 60 #define NLPID_IPV6 0x8E 61 #define NLPID_SNAP 0x80 62 #define NLPID_PAD 0x00 63 #define NLPID_CCITT_ANSI_LMI 0x08 64 #define NLPID_CISCO_LMI 0x09 65 66 67 #define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */ 68 #define LMI_CISCO_DLCI 1023 69 70 #define LMI_CALLREF 0x00 /* Call Reference */ 71 #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */ 72 #define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */ 73 #define LMI_CCITT_REPTYPE 0x51 74 #define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */ 75 #define LMI_CCITT_ALIVE 0x53 76 #define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */ 77 #define LMI_CCITT_PVCSTAT 0x57 78 79 #define LMI_FULLREP 0x00 /* full report */ 80 #define LMI_INTEGRITY 0x01 /* link integrity report */ 81 #define LMI_SINGLE 0x02 /* single PVC report */ 82 83 #define LMI_STATUS_ENQUIRY 0x75 84 #define LMI_STATUS 0x7D /* reply */ 85 86 #define LMI_REPT_LEN 1 /* report type element length */ 87 #define LMI_INTEG_LEN 2 /* link integrity element length */ 88 89 #define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */ 90 #define LMI_ANSI_LENGTH 14 91 92 93 typedef struct { 94 #if defined(__LITTLE_ENDIAN_BITFIELD) 95 unsigned ea1: 1; 96 unsigned cr: 1; 97 unsigned dlcih: 6; 98 99 unsigned ea2: 1; 100 unsigned de: 1; 101 unsigned becn: 1; 102 unsigned fecn: 1; 103 unsigned dlcil: 4; 104 #else 105 unsigned dlcih: 6; 106 unsigned cr: 1; 107 unsigned ea1: 1; 108 109 unsigned dlcil: 4; 110 unsigned fecn: 1; 111 unsigned becn: 1; 112 unsigned de: 1; 113 unsigned ea2: 1; 114 #endif 115 }__attribute__ ((packed)) fr_hdr; 116 117 118 static inline u16 q922_to_dlci(u8 *hdr) 119 { 120 return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4); 121 } 122 123 124 125 static inline void dlci_to_q922(u8 *hdr, u16 dlci) 126 { 127 hdr[0] = (dlci >> 2) & 0xFC; 128 hdr[1] = ((dlci << 4) & 0xF0) | 0x01; 129 } 130 131 132 133 static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci) 134 { 135 pvc_device *pvc = hdlc->state.fr.first_pvc; 136 137 while (pvc) { 138 if (pvc->dlci == dlci) 139 return pvc; 140 if (pvc->dlci > dlci) 141 return NULL; /* the listed is sorted */ 142 pvc = pvc->next; 143 } 144 145 return NULL; 146 } 147 148 149 static inline pvc_device* add_pvc(struct net_device *dev, u16 dlci) 150 { 151 hdlc_device *hdlc = dev_to_hdlc(dev); 152 pvc_device *pvc, **pvc_p = &hdlc->state.fr.first_pvc; 153 154 while (*pvc_p) { 155 if ((*pvc_p)->dlci == dlci) 156 return *pvc_p; 157 if ((*pvc_p)->dlci > dlci) 158 break; /* the list is sorted */ 159 pvc_p = &(*pvc_p)->next; 160 } 161 162 pvc = kmalloc(sizeof(pvc_device), GFP_ATOMIC); 163 if (!pvc) 164 return NULL; 165 166 memset(pvc, 0, sizeof(pvc_device)); 167 pvc->dlci = dlci; 168 pvc->master = dev; 169 pvc->next = *pvc_p; /* Put it in the chain */ 170 *pvc_p = pvc; 171 return pvc; 172 } 173 174 175 static inline int pvc_is_used(pvc_device *pvc) 176 { 177 return pvc->main != NULL || pvc->ether != NULL; 178 } 179 180 181 static inline void pvc_carrier(int on, pvc_device *pvc) 182 { 183 if (on) { 184 if (pvc->main) 185 if (!netif_carrier_ok(pvc->main)) 186 netif_carrier_on(pvc->main); 187 if (pvc->ether) 188 if (!netif_carrier_ok(pvc->ether)) 189 netif_carrier_on(pvc->ether); 190 } else { 191 if (pvc->main) 192 if (netif_carrier_ok(pvc->main)) 193 netif_carrier_off(pvc->main); 194 if (pvc->ether) 195 if (netif_carrier_ok(pvc->ether)) 196 netif_carrier_off(pvc->ether); 197 } 198 } 199 200 201 static inline void delete_unused_pvcs(hdlc_device *hdlc) 202 { 203 pvc_device **pvc_p = &hdlc->state.fr.first_pvc; 204 205 while (*pvc_p) { 206 if (!pvc_is_used(*pvc_p)) { 207 pvc_device *pvc = *pvc_p; 208 *pvc_p = pvc->next; 209 kfree(pvc); 210 continue; 211 } 212 pvc_p = &(*pvc_p)->next; 213 } 214 } 215 216 217 static inline struct net_device** get_dev_p(pvc_device *pvc, int type) 218 { 219 if (type == ARPHRD_ETHER) 220 return &pvc->ether; 221 else 222 return &pvc->main; 223 } 224 225 226 static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) 227 { 228 u16 head_len; 229 struct sk_buff *skb = *skb_p; 230 231 switch (skb->protocol) { 232 case __constant_ntohs(NLPID_CCITT_ANSI_LMI): 233 head_len = 4; 234 skb_push(skb, head_len); 235 skb->data[3] = NLPID_CCITT_ANSI_LMI; 236 break; 237 238 case __constant_ntohs(NLPID_CISCO_LMI): 239 head_len = 4; 240 skb_push(skb, head_len); 241 skb->data[3] = NLPID_CISCO_LMI; 242 break; 243 244 case __constant_ntohs(ETH_P_IP): 245 head_len = 4; 246 skb_push(skb, head_len); 247 skb->data[3] = NLPID_IP; 248 break; 249 250 case __constant_ntohs(ETH_P_IPV6): 251 head_len = 4; 252 skb_push(skb, head_len); 253 skb->data[3] = NLPID_IPV6; 254 break; 255 256 case __constant_ntohs(ETH_P_802_3): 257 head_len = 10; 258 if (skb_headroom(skb) < head_len) { 259 struct sk_buff *skb2 = skb_realloc_headroom(skb, 260 head_len); 261 if (!skb2) 262 return -ENOBUFS; 263 dev_kfree_skb(skb); 264 skb = *skb_p = skb2; 265 } 266 skb_push(skb, head_len); 267 skb->data[3] = FR_PAD; 268 skb->data[4] = NLPID_SNAP; 269 skb->data[5] = FR_PAD; 270 skb->data[6] = 0x80; 271 skb->data[7] = 0xC2; 272 skb->data[8] = 0x00; 273 skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */ 274 break; 275 276 default: 277 head_len = 10; 278 skb_push(skb, head_len); 279 skb->data[3] = FR_PAD; 280 skb->data[4] = NLPID_SNAP; 281 skb->data[5] = FR_PAD; 282 skb->data[6] = FR_PAD; 283 skb->data[7] = FR_PAD; 284 *(u16*)(skb->data + 8) = skb->protocol; 285 } 286 287 dlci_to_q922(skb->data, dlci); 288 skb->data[2] = FR_UI; 289 return 0; 290 } 291 292 293 294 static int pvc_open(struct net_device *dev) 295 { 296 pvc_device *pvc = dev_to_pvc(dev); 297 298 if ((pvc->master->flags & IFF_UP) == 0) 299 return -EIO; /* Master must be UP in order to activate PVC */ 300 301 if (pvc->open_count++ == 0) { 302 hdlc_device *hdlc = dev_to_hdlc(pvc->master); 303 if (hdlc->state.fr.settings.lmi == LMI_NONE) 304 pvc->state.active = hdlc->carrier; 305 306 pvc_carrier(pvc->state.active, pvc); 307 hdlc->state.fr.dce_changed = 1; 308 } 309 return 0; 310 } 311 312 313 314 static int pvc_close(struct net_device *dev) 315 { 316 pvc_device *pvc = dev_to_pvc(dev); 317 318 if (--pvc->open_count == 0) { 319 hdlc_device *hdlc = dev_to_hdlc(pvc->master); 320 if (hdlc->state.fr.settings.lmi == LMI_NONE) 321 pvc->state.active = 0; 322 323 if (hdlc->state.fr.settings.dce) { 324 hdlc->state.fr.dce_changed = 1; 325 pvc->state.active = 0; 326 } 327 } 328 return 0; 329 } 330 331 332 333 int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 334 { 335 pvc_device *pvc = dev_to_pvc(dev); 336 fr_proto_pvc_info info; 337 338 if (ifr->ifr_settings.type == IF_GET_PROTO) { 339 if (dev->type == ARPHRD_ETHER) 340 ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC; 341 else 342 ifr->ifr_settings.type = IF_PROTO_FR_PVC; 343 344 if (ifr->ifr_settings.size < sizeof(info)) { 345 /* data size wanted */ 346 ifr->ifr_settings.size = sizeof(info); 347 return -ENOBUFS; 348 } 349 350 info.dlci = pvc->dlci; 351 memcpy(info.master, pvc->master->name, IFNAMSIZ); 352 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info, 353 &info, sizeof(info))) 354 return -EFAULT; 355 return 0; 356 } 357 358 return -EINVAL; 359 } 360 361 362 static inline struct net_device_stats *pvc_get_stats(struct net_device *dev) 363 { 364 return netdev_priv(dev); 365 } 366 367 368 369 static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) 370 { 371 pvc_device *pvc = dev_to_pvc(dev); 372 struct net_device_stats *stats = pvc_get_stats(dev); 373 374 if (pvc->state.active) { 375 if (dev->type == ARPHRD_ETHER) { 376 int pad = ETH_ZLEN - skb->len; 377 if (pad > 0) { /* Pad the frame with zeros */ 378 int len = skb->len; 379 if (skb_tailroom(skb) < pad) 380 if (pskb_expand_head(skb, 0, pad, 381 GFP_ATOMIC)) { 382 stats->tx_dropped++; 383 dev_kfree_skb(skb); 384 return 0; 385 } 386 skb_put(skb, pad); 387 memset(skb->data + len, 0, pad); 388 } 389 skb->protocol = __constant_htons(ETH_P_802_3); 390 } 391 if (!fr_hard_header(&skb, pvc->dlci)) { 392 stats->tx_bytes += skb->len; 393 stats->tx_packets++; 394 if (pvc->state.fecn) /* TX Congestion counter */ 395 stats->tx_compressed++; 396 skb->dev = pvc->master; 397 dev_queue_xmit(skb); 398 return 0; 399 } 400 } 401 402 stats->tx_dropped++; 403 dev_kfree_skb(skb); 404 return 0; 405 } 406 407 408 409 static int pvc_change_mtu(struct net_device *dev, int new_mtu) 410 { 411 if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU)) 412 return -EINVAL; 413 dev->mtu = new_mtu; 414 return 0; 415 } 416 417 418 419 static inline void fr_log_dlci_active(pvc_device *pvc) 420 { 421 printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n", 422 pvc->master->name, 423 pvc->dlci, 424 pvc->main ? pvc->main->name : "", 425 pvc->main && pvc->ether ? " " : "", 426 pvc->ether ? pvc->ether->name : "", 427 pvc->state.new ? " new" : "", 428 !pvc->state.exist ? "deleted" : 429 pvc->state.active ? "active" : "inactive"); 430 } 431 432 433 434 static inline u8 fr_lmi_nextseq(u8 x) 435 { 436 x++; 437 return x ? x : 1; 438 } 439 440 441 442 static void fr_lmi_send(struct net_device *dev, int fullrep) 443 { 444 hdlc_device *hdlc = dev_to_hdlc(dev); 445 struct sk_buff *skb; 446 pvc_device *pvc = hdlc->state.fr.first_pvc; 447 int lmi = hdlc->state.fr.settings.lmi; 448 int dce = hdlc->state.fr.settings.dce; 449 int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH; 450 int stat_len = (lmi == LMI_CISCO) ? 6 : 3; 451 u8 *data; 452 int i = 0; 453 454 if (dce && fullrep) { 455 len += hdlc->state.fr.dce_pvc_count * (2 + stat_len); 456 if (len > HDLC_MAX_MRU) { 457 printk(KERN_WARNING "%s: Too many PVCs while sending " 458 "LMI full report\n", dev->name); 459 return; 460 } 461 } 462 463 skb = dev_alloc_skb(len); 464 if (!skb) { 465 printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n", 466 dev->name); 467 return; 468 } 469 memset(skb->data, 0, len); 470 skb_reserve(skb, 4); 471 if (lmi == LMI_CISCO) { 472 skb->protocol = __constant_htons(NLPID_CISCO_LMI); 473 fr_hard_header(&skb, LMI_CISCO_DLCI); 474 } else { 475 skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI); 476 fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); 477 } 478 data = skb->tail; 479 data[i++] = LMI_CALLREF; 480 data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY; 481 if (lmi == LMI_ANSI) 482 data[i++] = LMI_ANSI_LOCKSHIFT; 483 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE : 484 LMI_ANSI_CISCO_REPTYPE; 485 data[i++] = LMI_REPT_LEN; 486 data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY; 487 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE; 488 data[i++] = LMI_INTEG_LEN; 489 data[i++] = hdlc->state.fr.txseq =fr_lmi_nextseq(hdlc->state.fr.txseq); 490 data[i++] = hdlc->state.fr.rxseq; 491 492 if (dce && fullrep) { 493 while (pvc) { 494 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT : 495 LMI_ANSI_CISCO_PVCSTAT; 496 data[i++] = stat_len; 497 498 /* LMI start/restart */ 499 if (hdlc->state.fr.reliable && !pvc->state.exist) { 500 pvc->state.exist = pvc->state.new = 1; 501 fr_log_dlci_active(pvc); 502 } 503 504 /* ifconfig PVC up */ 505 if (pvc->open_count && !pvc->state.active && 506 pvc->state.exist && !pvc->state.new) { 507 pvc_carrier(1, pvc); 508 pvc->state.active = 1; 509 fr_log_dlci_active(pvc); 510 } 511 512 if (lmi == LMI_CISCO) { 513 data[i] = pvc->dlci >> 8; 514 data[i + 1] = pvc->dlci & 0xFF; 515 } else { 516 data[i] = (pvc->dlci >> 4) & 0x3F; 517 data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80; 518 data[i + 2] = 0x80; 519 } 520 521 if (pvc->state.new) 522 data[i + 2] |= 0x08; 523 else if (pvc->state.active) 524 data[i + 2] |= 0x02; 525 526 i += stat_len; 527 pvc = pvc->next; 528 } 529 } 530 531 skb_put(skb, i); 532 skb->priority = TC_PRIO_CONTROL; 533 skb->dev = dev; 534 skb->nh.raw = skb->data; 535 536 dev_queue_xmit(skb); 537 } 538 539 540 541 static void fr_set_link_state(int reliable, struct net_device *dev) 542 { 543 hdlc_device *hdlc = dev_to_hdlc(dev); 544 pvc_device *pvc = hdlc->state.fr.first_pvc; 545 546 hdlc->state.fr.reliable = reliable; 547 if (reliable) { 548 if (!netif_carrier_ok(dev)) 549 netif_carrier_on(dev); 550 551 hdlc->state.fr.n391cnt = 0; /* Request full status */ 552 hdlc->state.fr.dce_changed = 1; 553 554 if (hdlc->state.fr.settings.lmi == LMI_NONE) { 555 while (pvc) { /* Activate all PVCs */ 556 pvc_carrier(1, pvc); 557 pvc->state.exist = pvc->state.active = 1; 558 pvc->state.new = 0; 559 pvc = pvc->next; 560 } 561 } 562 } else { 563 if (netif_carrier_ok(dev)) 564 netif_carrier_off(dev); 565 566 while (pvc) { /* Deactivate all PVCs */ 567 pvc_carrier(0, pvc); 568 pvc->state.exist = pvc->state.active = 0; 569 pvc->state.new = 0; 570 if (!hdlc->state.fr.settings.dce) 571 pvc->state.bandwidth = 0; 572 pvc = pvc->next; 573 } 574 } 575 } 576 577 578 579 static void fr_timer(unsigned long arg) 580 { 581 struct net_device *dev = (struct net_device *)arg; 582 hdlc_device *hdlc = dev_to_hdlc(dev); 583 int i, cnt = 0, reliable; 584 u32 list; 585 586 if (hdlc->state.fr.settings.dce) { 587 reliable = hdlc->state.fr.request && 588 time_before(jiffies, hdlc->state.fr.last_poll + 589 hdlc->state.fr.settings.t392 * HZ); 590 hdlc->state.fr.request = 0; 591 } else { 592 hdlc->state.fr.last_errors <<= 1; /* Shift the list */ 593 if (hdlc->state.fr.request) { 594 if (hdlc->state.fr.reliable) 595 printk(KERN_INFO "%s: No LMI status reply " 596 "received\n", dev->name); 597 hdlc->state.fr.last_errors |= 1; 598 } 599 600 list = hdlc->state.fr.last_errors; 601 for (i = 0; i < hdlc->state.fr.settings.n393; i++, list >>= 1) 602 cnt += (list & 1); /* errors count */ 603 604 reliable = (cnt < hdlc->state.fr.settings.n392); 605 } 606 607 if (hdlc->state.fr.reliable != reliable) { 608 printk(KERN_INFO "%s: Link %sreliable\n", dev->name, 609 reliable ? "" : "un"); 610 fr_set_link_state(reliable, dev); 611 } 612 613 if (hdlc->state.fr.settings.dce) 614 hdlc->state.fr.timer.expires = jiffies + 615 hdlc->state.fr.settings.t392 * HZ; 616 else { 617 if (hdlc->state.fr.n391cnt) 618 hdlc->state.fr.n391cnt--; 619 620 fr_lmi_send(dev, hdlc->state.fr.n391cnt == 0); 621 622 hdlc->state.fr.last_poll = jiffies; 623 hdlc->state.fr.request = 1; 624 hdlc->state.fr.timer.expires = jiffies + 625 hdlc->state.fr.settings.t391 * HZ; 626 } 627 628 hdlc->state.fr.timer.function = fr_timer; 629 hdlc->state.fr.timer.data = arg; 630 add_timer(&hdlc->state.fr.timer); 631 } 632 633 634 635 static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb) 636 { 637 hdlc_device *hdlc = dev_to_hdlc(dev); 638 pvc_device *pvc; 639 u8 rxseq, txseq; 640 int lmi = hdlc->state.fr.settings.lmi; 641 int dce = hdlc->state.fr.settings.dce; 642 int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i; 643 644 if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH : 645 LMI_CCITT_CISCO_LENGTH)) { 646 printk(KERN_INFO "%s: Short LMI frame\n", dev->name); 647 return 1; 648 } 649 650 if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI : 651 NLPID_CCITT_ANSI_LMI)) { 652 printk(KERN_INFO "%s: Received non-LMI frame with LMI" 653 " DLCI\n", dev->name); 654 return 1; 655 } 656 657 if (skb->data[4] != LMI_CALLREF) { 658 printk(KERN_INFO "%s: Invalid LMI Call reference (0x%02X)\n", 659 dev->name, skb->data[4]); 660 return 1; 661 } 662 663 if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) { 664 printk(KERN_INFO "%s: Invalid LMI Message type (0x%02X)\n", 665 dev->name, skb->data[5]); 666 return 1; 667 } 668 669 if (lmi == LMI_ANSI) { 670 if (skb->data[6] != LMI_ANSI_LOCKSHIFT) { 671 printk(KERN_INFO "%s: Not ANSI locking shift in LMI" 672 " message (0x%02X)\n", dev->name, skb->data[6]); 673 return 1; 674 } 675 i = 7; 676 } else 677 i = 6; 678 679 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE : 680 LMI_ANSI_CISCO_REPTYPE)) { 681 printk(KERN_INFO "%s: Not an LMI Report type IE (0x%02X)\n", 682 dev->name, skb->data[i]); 683 return 1; 684 } 685 686 if (skb->data[++i] != LMI_REPT_LEN) { 687 printk(KERN_INFO "%s: Invalid LMI Report type IE length" 688 " (%u)\n", dev->name, skb->data[i]); 689 return 1; 690 } 691 692 reptype = skb->data[++i]; 693 if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) { 694 printk(KERN_INFO "%s: Unsupported LMI Report type (0x%02X)\n", 695 dev->name, reptype); 696 return 1; 697 } 698 699 if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE : 700 LMI_ANSI_CISCO_ALIVE)) { 701 printk(KERN_INFO "%s: Not an LMI Link integrity verification" 702 " IE (0x%02X)\n", dev->name, skb->data[i]); 703 return 1; 704 } 705 706 if (skb->data[++i] != LMI_INTEG_LEN) { 707 printk(KERN_INFO "%s: Invalid LMI Link integrity verification" 708 " IE length (%u)\n", dev->name, skb->data[i]); 709 return 1; 710 } 711 i++; 712 713 hdlc->state.fr.rxseq = skb->data[i++]; /* TX sequence from peer */ 714 rxseq = skb->data[i++]; /* Should confirm our sequence */ 715 716 txseq = hdlc->state.fr.txseq; 717 718 if (dce) 719 hdlc->state.fr.last_poll = jiffies; 720 721 error = 0; 722 if (!hdlc->state.fr.reliable) 723 error = 1; 724 725 if (rxseq == 0 || rxseq != txseq) { 726 hdlc->state.fr.n391cnt = 0; /* Ask for full report next time */ 727 error = 1; 728 } 729 730 if (dce) { 731 if (hdlc->state.fr.fullrep_sent && !error) { 732 /* Stop sending full report - the last one has been confirmed by DTE */ 733 hdlc->state.fr.fullrep_sent = 0; 734 pvc = hdlc->state.fr.first_pvc; 735 while (pvc) { 736 if (pvc->state.new) { 737 pvc->state.new = 0; 738 739 /* Tell DTE that new PVC is now active */ 740 hdlc->state.fr.dce_changed = 1; 741 } 742 pvc = pvc->next; 743 } 744 } 745 746 if (hdlc->state.fr.dce_changed) { 747 reptype = LMI_FULLREP; 748 hdlc->state.fr.fullrep_sent = 1; 749 hdlc->state.fr.dce_changed = 0; 750 } 751 752 hdlc->state.fr.request = 1; /* got request */ 753 fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0); 754 return 0; 755 } 756 757 /* DTE */ 758 759 hdlc->state.fr.request = 0; /* got response, no request pending */ 760 761 if (error) 762 return 0; 763 764 if (reptype != LMI_FULLREP) 765 return 0; 766 767 pvc = hdlc->state.fr.first_pvc; 768 769 while (pvc) { 770 pvc->state.deleted = 1; 771 pvc = pvc->next; 772 } 773 774 no_ram = 0; 775 while (skb->len >= i + 2 + stat_len) { 776 u16 dlci; 777 u32 bw; 778 unsigned int active, new; 779 780 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT : 781 LMI_ANSI_CISCO_PVCSTAT)) { 782 printk(KERN_INFO "%s: Not an LMI PVC status IE" 783 " (0x%02X)\n", dev->name, skb->data[i]); 784 return 1; 785 } 786 787 if (skb->data[++i] != stat_len) { 788 printk(KERN_INFO "%s: Invalid LMI PVC status IE length" 789 " (%u)\n", dev->name, skb->data[i]); 790 return 1; 791 } 792 i++; 793 794 new = !! (skb->data[i + 2] & 0x08); 795 active = !! (skb->data[i + 2] & 0x02); 796 if (lmi == LMI_CISCO) { 797 dlci = (skb->data[i] << 8) | skb->data[i + 1]; 798 bw = (skb->data[i + 3] << 16) | 799 (skb->data[i + 4] << 8) | 800 (skb->data[i + 5]); 801 } else { 802 dlci = ((skb->data[i] & 0x3F) << 4) | 803 ((skb->data[i + 1] & 0x78) >> 3); 804 bw = 0; 805 } 806 807 pvc = add_pvc(dev, dlci); 808 809 if (!pvc && !no_ram) { 810 printk(KERN_WARNING 811 "%s: Memory squeeze on fr_lmi_recv()\n", 812 dev->name); 813 no_ram = 1; 814 } 815 816 if (pvc) { 817 pvc->state.exist = 1; 818 pvc->state.deleted = 0; 819 if (active != pvc->state.active || 820 new != pvc->state.new || 821 bw != pvc->state.bandwidth || 822 !pvc->state.exist) { 823 pvc->state.new = new; 824 pvc->state.active = active; 825 pvc->state.bandwidth = bw; 826 pvc_carrier(active, pvc); 827 fr_log_dlci_active(pvc); 828 } 829 } 830 831 i += stat_len; 832 } 833 834 pvc = hdlc->state.fr.first_pvc; 835 836 while (pvc) { 837 if (pvc->state.deleted && pvc->state.exist) { 838 pvc_carrier(0, pvc); 839 pvc->state.active = pvc->state.new = 0; 840 pvc->state.exist = 0; 841 pvc->state.bandwidth = 0; 842 fr_log_dlci_active(pvc); 843 } 844 pvc = pvc->next; 845 } 846 847 /* Next full report after N391 polls */ 848 hdlc->state.fr.n391cnt = hdlc->state.fr.settings.n391; 849 850 return 0; 851 } 852 853 854 855 static int fr_rx(struct sk_buff *skb) 856 { 857 struct net_device *ndev = skb->dev; 858 hdlc_device *hdlc = dev_to_hdlc(ndev); 859 fr_hdr *fh = (fr_hdr*)skb->data; 860 u8 *data = skb->data; 861 u16 dlci; 862 pvc_device *pvc; 863 struct net_device *dev = NULL; 864 865 if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI) 866 goto rx_error; 867 868 dlci = q922_to_dlci(skb->data); 869 870 if ((dlci == LMI_CCITT_ANSI_DLCI && 871 (hdlc->state.fr.settings.lmi == LMI_ANSI || 872 hdlc->state.fr.settings.lmi == LMI_CCITT)) || 873 (dlci == LMI_CISCO_DLCI && 874 hdlc->state.fr.settings.lmi == LMI_CISCO)) { 875 if (fr_lmi_recv(ndev, skb)) 876 goto rx_error; 877 dev_kfree_skb_any(skb); 878 return NET_RX_SUCCESS; 879 } 880 881 pvc = find_pvc(hdlc, dlci); 882 if (!pvc) { 883 #ifdef DEBUG_PKT 884 printk(KERN_INFO "%s: No PVC for received frame's DLCI %d\n", 885 ndev->name, dlci); 886 #endif 887 dev_kfree_skb_any(skb); 888 return NET_RX_DROP; 889 } 890 891 if (pvc->state.fecn != fh->fecn) { 892 #ifdef DEBUG_ECN 893 printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", ndev->name, 894 dlci, fh->fecn ? "N" : "FF"); 895 #endif 896 pvc->state.fecn ^= 1; 897 } 898 899 if (pvc->state.becn != fh->becn) { 900 #ifdef DEBUG_ECN 901 printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", ndev->name, 902 dlci, fh->becn ? "N" : "FF"); 903 #endif 904 pvc->state.becn ^= 1; 905 } 906 907 908 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 909 hdlc->stats.rx_dropped++; 910 return NET_RX_DROP; 911 } 912 913 if (data[3] == NLPID_IP) { 914 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ 915 dev = pvc->main; 916 skb->protocol = htons(ETH_P_IP); 917 918 } else if (data[3] == NLPID_IPV6) { 919 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ 920 dev = pvc->main; 921 skb->protocol = htons(ETH_P_IPV6); 922 923 } else if (skb->len > 10 && data[3] == FR_PAD && 924 data[4] == NLPID_SNAP && data[5] == FR_PAD) { 925 u16 oui = ntohs(*(u16*)(data + 6)); 926 u16 pid = ntohs(*(u16*)(data + 8)); 927 skb_pull(skb, 10); 928 929 switch ((((u32)oui) << 16) | pid) { 930 case ETH_P_ARP: /* routed frame with SNAP */ 931 case ETH_P_IPX: 932 case ETH_P_IP: /* a long variant */ 933 case ETH_P_IPV6: 934 dev = pvc->main; 935 skb->protocol = htons(pid); 936 break; 937 938 case 0x80C20007: /* bridged Ethernet frame */ 939 if ((dev = pvc->ether) != NULL) 940 skb->protocol = eth_type_trans(skb, dev); 941 break; 942 943 default: 944 printk(KERN_INFO "%s: Unsupported protocol, OUI=%x " 945 "PID=%x\n", ndev->name, oui, pid); 946 dev_kfree_skb_any(skb); 947 return NET_RX_DROP; 948 } 949 } else { 950 printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x " 951 "length = %i\n", ndev->name, data[3], skb->len); 952 dev_kfree_skb_any(skb); 953 return NET_RX_DROP; 954 } 955 956 if (dev) { 957 struct net_device_stats *stats = pvc_get_stats(dev); 958 stats->rx_packets++; /* PVC traffic */ 959 stats->rx_bytes += skb->len; 960 if (pvc->state.becn) 961 stats->rx_compressed++; 962 skb->dev = dev; 963 netif_rx(skb); 964 return NET_RX_SUCCESS; 965 } else { 966 dev_kfree_skb_any(skb); 967 return NET_RX_DROP; 968 } 969 970 rx_error: 971 hdlc->stats.rx_errors++; /* Mark error */ 972 dev_kfree_skb_any(skb); 973 return NET_RX_DROP; 974 } 975 976 977 978 static void fr_start(struct net_device *dev) 979 { 980 hdlc_device *hdlc = dev_to_hdlc(dev); 981 #ifdef DEBUG_LINK 982 printk(KERN_DEBUG "fr_start\n"); 983 #endif 984 if (hdlc->state.fr.settings.lmi != LMI_NONE) { 985 hdlc->state.fr.reliable = 0; 986 hdlc->state.fr.dce_changed = 1; 987 hdlc->state.fr.request = 0; 988 hdlc->state.fr.fullrep_sent = 0; 989 hdlc->state.fr.last_errors = 0xFFFFFFFF; 990 hdlc->state.fr.n391cnt = 0; 991 hdlc->state.fr.txseq = hdlc->state.fr.rxseq = 0; 992 993 init_timer(&hdlc->state.fr.timer); 994 /* First poll after 1 s */ 995 hdlc->state.fr.timer.expires = jiffies + HZ; 996 hdlc->state.fr.timer.function = fr_timer; 997 hdlc->state.fr.timer.data = (unsigned long)dev; 998 add_timer(&hdlc->state.fr.timer); 999 } else 1000 fr_set_link_state(1, dev); 1001 } 1002 1003 1004 1005 static void fr_stop(struct net_device *dev) 1006 { 1007 hdlc_device *hdlc = dev_to_hdlc(dev); 1008 #ifdef DEBUG_LINK 1009 printk(KERN_DEBUG "fr_stop\n"); 1010 #endif 1011 if (hdlc->state.fr.settings.lmi != LMI_NONE) 1012 del_timer_sync(&hdlc->state.fr.timer); 1013 fr_set_link_state(0, dev); 1014 } 1015 1016 1017 1018 static void fr_close(struct net_device *dev) 1019 { 1020 hdlc_device *hdlc = dev_to_hdlc(dev); 1021 pvc_device *pvc = hdlc->state.fr.first_pvc; 1022 1023 while (pvc) { /* Shutdown all PVCs for this FRAD */ 1024 if (pvc->main) 1025 dev_close(pvc->main); 1026 if (pvc->ether) 1027 dev_close(pvc->ether); 1028 pvc = pvc->next; 1029 } 1030 } 1031 1032 static void dlci_setup(struct net_device *dev) 1033 { 1034 dev->type = ARPHRD_DLCI; 1035 dev->flags = IFF_POINTOPOINT; 1036 dev->hard_header_len = 10; 1037 dev->addr_len = 2; 1038 } 1039 1040 static int fr_add_pvc(struct net_device *master, unsigned int dlci, int type) 1041 { 1042 hdlc_device *hdlc = dev_to_hdlc(master); 1043 pvc_device *pvc = NULL; 1044 struct net_device *dev; 1045 int result, used; 1046 char * prefix = "pvc%d"; 1047 1048 if (type == ARPHRD_ETHER) 1049 prefix = "pvceth%d"; 1050 1051 if ((pvc = add_pvc(master, dlci)) == NULL) { 1052 printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n", 1053 master->name); 1054 return -ENOBUFS; 1055 } 1056 1057 if (*get_dev_p(pvc, type)) 1058 return -EEXIST; 1059 1060 used = pvc_is_used(pvc); 1061 1062 if (type == ARPHRD_ETHER) 1063 dev = alloc_netdev(sizeof(struct net_device_stats), 1064 "pvceth%d", ether_setup); 1065 else 1066 dev = alloc_netdev(sizeof(struct net_device_stats), 1067 "pvc%d", dlci_setup); 1068 1069 if (!dev) { 1070 printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n", 1071 master->name); 1072 delete_unused_pvcs(hdlc); 1073 return -ENOBUFS; 1074 } 1075 1076 if (type == ARPHRD_ETHER) { 1077 memcpy(dev->dev_addr, "\x00\x01", 2); 1078 get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); 1079 } else { 1080 *(u16*)dev->dev_addr = htons(dlci); 1081 dlci_to_q922(dev->broadcast, dlci); 1082 } 1083 dev->hard_start_xmit = pvc_xmit; 1084 dev->get_stats = pvc_get_stats; 1085 dev->open = pvc_open; 1086 dev->stop = pvc_close; 1087 dev->do_ioctl = pvc_ioctl; 1088 dev->change_mtu = pvc_change_mtu; 1089 dev->mtu = HDLC_MAX_MTU; 1090 dev->tx_queue_len = 0; 1091 dev->priv = pvc; 1092 1093 result = dev_alloc_name(dev, dev->name); 1094 if (result < 0) { 1095 free_netdev(dev); 1096 delete_unused_pvcs(hdlc); 1097 return result; 1098 } 1099 1100 if (register_netdevice(dev) != 0) { 1101 free_netdev(dev); 1102 delete_unused_pvcs(hdlc); 1103 return -EIO; 1104 } 1105 1106 dev->destructor = free_netdev; 1107 *get_dev_p(pvc, type) = dev; 1108 if (!used) { 1109 hdlc->state.fr.dce_changed = 1; 1110 hdlc->state.fr.dce_pvc_count++; 1111 } 1112 return 0; 1113 } 1114 1115 1116 1117 static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type) 1118 { 1119 pvc_device *pvc; 1120 struct net_device *dev; 1121 1122 if ((pvc = find_pvc(hdlc, dlci)) == NULL) 1123 return -ENOENT; 1124 1125 if ((dev = *get_dev_p(pvc, type)) == NULL) 1126 return -ENOENT; 1127 1128 if (dev->flags & IFF_UP) 1129 return -EBUSY; /* PVC in use */ 1130 1131 unregister_netdevice(dev); /* the destructor will free_netdev(dev) */ 1132 *get_dev_p(pvc, type) = NULL; 1133 1134 if (!pvc_is_used(pvc)) { 1135 hdlc->state.fr.dce_pvc_count--; 1136 hdlc->state.fr.dce_changed = 1; 1137 } 1138 delete_unused_pvcs(hdlc); 1139 return 0; 1140 } 1141 1142 1143 1144 static void fr_destroy(hdlc_device *hdlc) 1145 { 1146 pvc_device *pvc; 1147 1148 pvc = hdlc->state.fr.first_pvc; 1149 hdlc->state.fr.first_pvc = NULL; /* All PVCs destroyed */ 1150 hdlc->state.fr.dce_pvc_count = 0; 1151 hdlc->state.fr.dce_changed = 1; 1152 1153 while (pvc) { 1154 pvc_device *next = pvc->next; 1155 /* destructors will free_netdev() main and ether */ 1156 if (pvc->main) 1157 unregister_netdevice(pvc->main); 1158 1159 if (pvc->ether) 1160 unregister_netdevice(pvc->ether); 1161 1162 kfree(pvc); 1163 pvc = next; 1164 } 1165 } 1166 1167 1168 1169 int hdlc_fr_ioctl(struct net_device *dev, struct ifreq *ifr) 1170 { 1171 fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr; 1172 const size_t size = sizeof(fr_proto); 1173 fr_proto new_settings; 1174 hdlc_device *hdlc = dev_to_hdlc(dev); 1175 fr_proto_pvc pvc; 1176 int result; 1177 1178 switch (ifr->ifr_settings.type) { 1179 case IF_GET_PROTO: 1180 ifr->ifr_settings.type = IF_PROTO_FR; 1181 if (ifr->ifr_settings.size < size) { 1182 ifr->ifr_settings.size = size; /* data size wanted */ 1183 return -ENOBUFS; 1184 } 1185 if (copy_to_user(fr_s, &hdlc->state.fr.settings, size)) 1186 return -EFAULT; 1187 return 0; 1188 1189 case IF_PROTO_FR: 1190 if(!capable(CAP_NET_ADMIN)) 1191 return -EPERM; 1192 1193 if(dev->flags & IFF_UP) 1194 return -EBUSY; 1195 1196 if (copy_from_user(&new_settings, fr_s, size)) 1197 return -EFAULT; 1198 1199 if (new_settings.lmi == LMI_DEFAULT) 1200 new_settings.lmi = LMI_ANSI; 1201 1202 if ((new_settings.lmi != LMI_NONE && 1203 new_settings.lmi != LMI_ANSI && 1204 new_settings.lmi != LMI_CCITT && 1205 new_settings.lmi != LMI_CISCO) || 1206 new_settings.t391 < 1 || 1207 new_settings.t392 < 2 || 1208 new_settings.n391 < 1 || 1209 new_settings.n392 < 1 || 1210 new_settings.n393 < new_settings.n392 || 1211 new_settings.n393 > 32 || 1212 (new_settings.dce != 0 && 1213 new_settings.dce != 1)) 1214 return -EINVAL; 1215 1216 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); 1217 if (result) 1218 return result; 1219 1220 if (hdlc->proto.id != IF_PROTO_FR) { 1221 hdlc_proto_detach(hdlc); 1222 hdlc->state.fr.first_pvc = NULL; 1223 hdlc->state.fr.dce_pvc_count = 0; 1224 } 1225 memcpy(&hdlc->state.fr.settings, &new_settings, size); 1226 memset(&hdlc->proto, 0, sizeof(hdlc->proto)); 1227 1228 hdlc->proto.close = fr_close; 1229 hdlc->proto.start = fr_start; 1230 hdlc->proto.stop = fr_stop; 1231 hdlc->proto.detach = fr_destroy; 1232 hdlc->proto.netif_rx = fr_rx; 1233 hdlc->proto.id = IF_PROTO_FR; 1234 dev->hard_start_xmit = hdlc->xmit; 1235 dev->hard_header = NULL; 1236 dev->type = ARPHRD_FRAD; 1237 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1238 dev->addr_len = 0; 1239 return 0; 1240 1241 case IF_PROTO_FR_ADD_PVC: 1242 case IF_PROTO_FR_DEL_PVC: 1243 case IF_PROTO_FR_ADD_ETH_PVC: 1244 case IF_PROTO_FR_DEL_ETH_PVC: 1245 if(!capable(CAP_NET_ADMIN)) 1246 return -EPERM; 1247 1248 if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc, 1249 sizeof(fr_proto_pvc))) 1250 return -EFAULT; 1251 1252 if (pvc.dlci <= 0 || pvc.dlci >= 1024) 1253 return -EINVAL; /* Only 10 bits, DLCI 0 reserved */ 1254 1255 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC || 1256 ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC) 1257 result = ARPHRD_ETHER; /* bridged Ethernet device */ 1258 else 1259 result = ARPHRD_DLCI; 1260 1261 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC || 1262 ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC) 1263 return fr_add_pvc(dev, pvc.dlci, result); 1264 else 1265 return fr_del_pvc(hdlc, pvc.dlci, result); 1266 } 1267 1268 return -EINVAL; 1269 } 1270