1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic HDLC support routines for Linux 4 * Frame Relay support 5 * 6 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl> 7 * 8 9 Theory of PVC state 10 11 DCE mode: 12 13 (exist,new) -> 0,0 when "PVC create" or if "link unreliable" 14 0,x -> 1,1 if "link reliable" when sending FULL STATUS 15 1,1 -> 1,0 if received FULL STATUS ACK 16 17 (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create" 18 -> 1 when "PVC up" and (exist,new) = 1,0 19 20 DTE mode: 21 (exist,new,active) = FULL STATUS if "link reliable" 22 = 0, 0, 0 if "link unreliable" 23 No LMI: 24 active = open and "link reliable" 25 exist = new = not used 26 27 CCITT LMI: ITU-T Q.933 Annex A 28 ANSI LMI: ANSI T1.617 Annex D 29 CISCO LMI: the original, aka "Gang of Four" LMI 30 31 */ 32 33 #include <linux/errno.h> 34 #include <linux/etherdevice.h> 35 #include <linux/hdlc.h> 36 #include <linux/if_arp.h> 37 #include <linux/inetdevice.h> 38 #include <linux/init.h> 39 #include <linux/kernel.h> 40 #include <linux/module.h> 41 #include <linux/pkt_sched.h> 42 #include <linux/poll.h> 43 #include <linux/rtnetlink.h> 44 #include <linux/skbuff.h> 45 #include <linux/slab.h> 46 47 #undef DEBUG_PKT 48 #undef DEBUG_ECN 49 #undef DEBUG_LINK 50 #undef DEBUG_PROTO 51 #undef DEBUG_PVC 52 53 #define FR_UI 0x03 54 #define FR_PAD 0x00 55 56 #define NLPID_IP 0xCC 57 #define NLPID_IPV6 0x8E 58 #define NLPID_SNAP 0x80 59 #define NLPID_PAD 0x00 60 #define NLPID_CCITT_ANSI_LMI 0x08 61 #define NLPID_CISCO_LMI 0x09 62 63 64 #define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */ 65 #define LMI_CISCO_DLCI 1023 66 67 #define LMI_CALLREF 0x00 /* Call Reference */ 68 #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */ 69 #define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */ 70 #define LMI_CCITT_REPTYPE 0x51 71 #define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */ 72 #define LMI_CCITT_ALIVE 0x53 73 #define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */ 74 #define LMI_CCITT_PVCSTAT 0x57 75 76 #define LMI_FULLREP 0x00 /* full report */ 77 #define LMI_INTEGRITY 0x01 /* link integrity report */ 78 #define LMI_SINGLE 0x02 /* single PVC report */ 79 80 #define LMI_STATUS_ENQUIRY 0x75 81 #define LMI_STATUS 0x7D /* reply */ 82 83 #define LMI_REPT_LEN 1 /* report type element length */ 84 #define LMI_INTEG_LEN 2 /* link integrity element length */ 85 86 #define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */ 87 #define LMI_ANSI_LENGTH 14 88 89 90 struct fr_hdr { 91 #if defined(__LITTLE_ENDIAN_BITFIELD) 92 unsigned ea1: 1; 93 unsigned cr: 1; 94 unsigned dlcih: 6; 95 96 unsigned ea2: 1; 97 unsigned de: 1; 98 unsigned becn: 1; 99 unsigned fecn: 1; 100 unsigned dlcil: 4; 101 #else 102 unsigned dlcih: 6; 103 unsigned cr: 1; 104 unsigned ea1: 1; 105 106 unsigned dlcil: 4; 107 unsigned fecn: 1; 108 unsigned becn: 1; 109 unsigned de: 1; 110 unsigned ea2: 1; 111 #endif 112 } __packed; 113 114 115 struct pvc_device { 116 struct net_device *frad; 117 struct net_device *main; 118 struct net_device *ether; /* bridged Ethernet interface */ 119 struct pvc_device *next; /* Sorted in ascending DLCI order */ 120 int dlci; 121 int open_count; 122 123 struct { 124 unsigned int new: 1; 125 unsigned int active: 1; 126 unsigned int exist: 1; 127 unsigned int deleted: 1; 128 unsigned int fecn: 1; 129 unsigned int becn: 1; 130 unsigned int bandwidth; /* Cisco LMI reporting only */ 131 }state; 132 }; 133 134 struct frad_state { 135 fr_proto settings; 136 struct pvc_device *first_pvc; 137 int dce_pvc_count; 138 139 struct timer_list timer; 140 struct net_device *dev; 141 unsigned long last_poll; 142 int reliable; 143 int dce_changed; 144 int request; 145 int fullrep_sent; 146 u32 last_errors; /* last errors bit list */ 147 u8 n391cnt; 148 u8 txseq; /* TX sequence number */ 149 u8 rxseq; /* RX sequence number */ 150 }; 151 152 153 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr); 154 155 156 static inline u16 q922_to_dlci(u8 *hdr) 157 { 158 return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4); 159 } 160 161 162 static inline void dlci_to_q922(u8 *hdr, u16 dlci) 163 { 164 hdr[0] = (dlci >> 2) & 0xFC; 165 hdr[1] = ((dlci << 4) & 0xF0) | 0x01; 166 } 167 168 169 static inline struct frad_state* state(hdlc_device *hdlc) 170 { 171 return(struct frad_state *)(hdlc->state); 172 } 173 174 175 static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci) 176 { 177 struct pvc_device *pvc = state(hdlc)->first_pvc; 178 179 while (pvc) { 180 if (pvc->dlci == dlci) 181 return pvc; 182 if (pvc->dlci > dlci) 183 return NULL; /* the list is sorted */ 184 pvc = pvc->next; 185 } 186 187 return NULL; 188 } 189 190 191 static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci) 192 { 193 hdlc_device *hdlc = dev_to_hdlc(dev); 194 struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc; 195 196 while (*pvc_p) { 197 if ((*pvc_p)->dlci == dlci) 198 return *pvc_p; 199 if ((*pvc_p)->dlci > dlci) 200 break; /* the list is sorted */ 201 pvc_p = &(*pvc_p)->next; 202 } 203 204 pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC); 205 #ifdef DEBUG_PVC 206 printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev); 207 #endif 208 if (!pvc) 209 return NULL; 210 211 pvc->dlci = dlci; 212 pvc->frad = dev; 213 pvc->next = *pvc_p; /* Put it in the chain */ 214 *pvc_p = pvc; 215 return pvc; 216 } 217 218 219 static inline int pvc_is_used(struct pvc_device *pvc) 220 { 221 return pvc->main || pvc->ether; 222 } 223 224 225 static inline void pvc_carrier(int on, struct pvc_device *pvc) 226 { 227 if (on) { 228 if (pvc->main) 229 if (!netif_carrier_ok(pvc->main)) 230 netif_carrier_on(pvc->main); 231 if (pvc->ether) 232 if (!netif_carrier_ok(pvc->ether)) 233 netif_carrier_on(pvc->ether); 234 } else { 235 if (pvc->main) 236 if (netif_carrier_ok(pvc->main)) 237 netif_carrier_off(pvc->main); 238 if (pvc->ether) 239 if (netif_carrier_ok(pvc->ether)) 240 netif_carrier_off(pvc->ether); 241 } 242 } 243 244 245 static inline void delete_unused_pvcs(hdlc_device *hdlc) 246 { 247 struct pvc_device **pvc_p = &state(hdlc)->first_pvc; 248 249 while (*pvc_p) { 250 if (!pvc_is_used(*pvc_p)) { 251 struct pvc_device *pvc = *pvc_p; 252 #ifdef DEBUG_PVC 253 printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc); 254 #endif 255 *pvc_p = pvc->next; 256 kfree(pvc); 257 continue; 258 } 259 pvc_p = &(*pvc_p)->next; 260 } 261 } 262 263 264 static inline struct net_device **get_dev_p(struct pvc_device *pvc, 265 int type) 266 { 267 if (type == ARPHRD_ETHER) 268 return &pvc->ether; 269 else 270 return &pvc->main; 271 } 272 273 274 static int fr_hard_header(struct sk_buff *skb, u16 dlci) 275 { 276 if (!skb->dev) { /* Control packets */ 277 switch (dlci) { 278 case LMI_CCITT_ANSI_DLCI: 279 skb_push(skb, 4); 280 skb->data[3] = NLPID_CCITT_ANSI_LMI; 281 break; 282 283 case LMI_CISCO_DLCI: 284 skb_push(skb, 4); 285 skb->data[3] = NLPID_CISCO_LMI; 286 break; 287 288 default: 289 return -EINVAL; 290 } 291 292 } else if (skb->dev->type == ARPHRD_DLCI) { 293 switch (skb->protocol) { 294 case htons(ETH_P_IP): 295 skb_push(skb, 4); 296 skb->data[3] = NLPID_IP; 297 break; 298 299 case htons(ETH_P_IPV6): 300 skb_push(skb, 4); 301 skb->data[3] = NLPID_IPV6; 302 break; 303 304 default: 305 skb_push(skb, 10); 306 skb->data[3] = FR_PAD; 307 skb->data[4] = NLPID_SNAP; 308 /* OUI 00-00-00 indicates an Ethertype follows */ 309 skb->data[5] = 0x00; 310 skb->data[6] = 0x00; 311 skb->data[7] = 0x00; 312 /* This should be an Ethertype: */ 313 *(__be16 *)(skb->data + 8) = skb->protocol; 314 } 315 316 } else if (skb->dev->type == ARPHRD_ETHER) { 317 skb_push(skb, 10); 318 skb->data[3] = FR_PAD; 319 skb->data[4] = NLPID_SNAP; 320 /* OUI 00-80-C2 stands for the 802.1 organization */ 321 skb->data[5] = 0x00; 322 skb->data[6] = 0x80; 323 skb->data[7] = 0xC2; 324 /* PID 00-07 stands for Ethernet frames without FCS */ 325 skb->data[8] = 0x00; 326 skb->data[9] = 0x07; 327 328 } else { 329 return -EINVAL; 330 } 331 332 dlci_to_q922(skb->data, dlci); 333 skb->data[2] = FR_UI; 334 return 0; 335 } 336 337 338 339 static int pvc_open(struct net_device *dev) 340 { 341 struct pvc_device *pvc = dev->ml_priv; 342 343 if ((pvc->frad->flags & IFF_UP) == 0) 344 return -EIO; /* Frad must be UP in order to activate PVC */ 345 346 if (pvc->open_count++ == 0) { 347 hdlc_device *hdlc = dev_to_hdlc(pvc->frad); 348 if (state(hdlc)->settings.lmi == LMI_NONE) 349 pvc->state.active = netif_carrier_ok(pvc->frad); 350 351 pvc_carrier(pvc->state.active, pvc); 352 state(hdlc)->dce_changed = 1; 353 } 354 return 0; 355 } 356 357 358 359 static int pvc_close(struct net_device *dev) 360 { 361 struct pvc_device *pvc = dev->ml_priv; 362 363 if (--pvc->open_count == 0) { 364 hdlc_device *hdlc = dev_to_hdlc(pvc->frad); 365 if (state(hdlc)->settings.lmi == LMI_NONE) 366 pvc->state.active = 0; 367 368 if (state(hdlc)->settings.dce) { 369 state(hdlc)->dce_changed = 1; 370 pvc->state.active = 0; 371 } 372 } 373 return 0; 374 } 375 376 377 378 static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 379 { 380 struct pvc_device *pvc = dev->ml_priv; 381 fr_proto_pvc_info info; 382 383 if (ifr->ifr_settings.type == IF_GET_PROTO) { 384 if (dev->type == ARPHRD_ETHER) 385 ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC; 386 else 387 ifr->ifr_settings.type = IF_PROTO_FR_PVC; 388 389 if (ifr->ifr_settings.size < sizeof(info)) { 390 /* data size wanted */ 391 ifr->ifr_settings.size = sizeof(info); 392 return -ENOBUFS; 393 } 394 395 info.dlci = pvc->dlci; 396 memcpy(info.master, pvc->frad->name, IFNAMSIZ); 397 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info, 398 &info, sizeof(info))) 399 return -EFAULT; 400 return 0; 401 } 402 403 return -EINVAL; 404 } 405 406 static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) 407 { 408 struct pvc_device *pvc = dev->ml_priv; 409 410 if (!pvc->state.active) 411 goto drop; 412 413 if (dev->type == ARPHRD_ETHER) { 414 int pad = ETH_ZLEN - skb->len; 415 416 if (pad > 0) { /* Pad the frame with zeros */ 417 if (__skb_pad(skb, pad, false)) 418 goto out; 419 skb_put(skb, pad); 420 } 421 } 422 423 /* We already requested the header space with dev->needed_headroom. 424 * So this is just a protection in case the upper layer didn't take 425 * dev->needed_headroom into consideration. 426 */ 427 if (skb_headroom(skb) < 10) { 428 struct sk_buff *skb2 = skb_realloc_headroom(skb, 10); 429 430 if (!skb2) 431 goto drop; 432 dev_kfree_skb(skb); 433 skb = skb2; 434 } 435 436 skb->dev = dev; 437 if (fr_hard_header(skb, pvc->dlci)) 438 goto drop; 439 440 dev->stats.tx_bytes += skb->len; 441 dev->stats.tx_packets++; 442 if (pvc->state.fecn) /* TX Congestion counter */ 443 dev->stats.tx_compressed++; 444 skb->dev = pvc->frad; 445 skb->protocol = htons(ETH_P_HDLC); 446 skb_reset_network_header(skb); 447 dev_queue_xmit(skb); 448 return NETDEV_TX_OK; 449 450 drop: 451 kfree_skb(skb); 452 out: 453 dev->stats.tx_dropped++; 454 return NETDEV_TX_OK; 455 } 456 457 static inline void fr_log_dlci_active(struct pvc_device *pvc) 458 { 459 netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n", 460 pvc->dlci, 461 pvc->main ? pvc->main->name : "", 462 pvc->main && pvc->ether ? " " : "", 463 pvc->ether ? pvc->ether->name : "", 464 pvc->state.new ? " new" : "", 465 !pvc->state.exist ? "deleted" : 466 pvc->state.active ? "active" : "inactive"); 467 } 468 469 470 471 static inline u8 fr_lmi_nextseq(u8 x) 472 { 473 x++; 474 return x ? x : 1; 475 } 476 477 478 static void fr_lmi_send(struct net_device *dev, int fullrep) 479 { 480 hdlc_device *hdlc = dev_to_hdlc(dev); 481 struct sk_buff *skb; 482 struct pvc_device *pvc = state(hdlc)->first_pvc; 483 int lmi = state(hdlc)->settings.lmi; 484 int dce = state(hdlc)->settings.dce; 485 int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH; 486 int stat_len = (lmi == LMI_CISCO) ? 6 : 3; 487 u8 *data; 488 int i = 0; 489 490 if (dce && fullrep) { 491 len += state(hdlc)->dce_pvc_count * (2 + stat_len); 492 if (len > HDLC_MAX_MRU) { 493 netdev_warn(dev, "Too many PVCs while sending LMI full report\n"); 494 return; 495 } 496 } 497 498 skb = dev_alloc_skb(len); 499 if (!skb) { 500 netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n"); 501 return; 502 } 503 memset(skb->data, 0, len); 504 skb_reserve(skb, 4); 505 if (lmi == LMI_CISCO) { 506 fr_hard_header(skb, LMI_CISCO_DLCI); 507 } else { 508 fr_hard_header(skb, LMI_CCITT_ANSI_DLCI); 509 } 510 data = skb_tail_pointer(skb); 511 data[i++] = LMI_CALLREF; 512 data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY; 513 if (lmi == LMI_ANSI) 514 data[i++] = LMI_ANSI_LOCKSHIFT; 515 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE : 516 LMI_ANSI_CISCO_REPTYPE; 517 data[i++] = LMI_REPT_LEN; 518 data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY; 519 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE; 520 data[i++] = LMI_INTEG_LEN; 521 data[i++] = state(hdlc)->txseq = 522 fr_lmi_nextseq(state(hdlc)->txseq); 523 data[i++] = state(hdlc)->rxseq; 524 525 if (dce && fullrep) { 526 while (pvc) { 527 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT : 528 LMI_ANSI_CISCO_PVCSTAT; 529 data[i++] = stat_len; 530 531 /* LMI start/restart */ 532 if (state(hdlc)->reliable && !pvc->state.exist) { 533 pvc->state.exist = pvc->state.new = 1; 534 fr_log_dlci_active(pvc); 535 } 536 537 /* ifconfig PVC up */ 538 if (pvc->open_count && !pvc->state.active && 539 pvc->state.exist && !pvc->state.new) { 540 pvc_carrier(1, pvc); 541 pvc->state.active = 1; 542 fr_log_dlci_active(pvc); 543 } 544 545 if (lmi == LMI_CISCO) { 546 data[i] = pvc->dlci >> 8; 547 data[i + 1] = pvc->dlci & 0xFF; 548 } else { 549 data[i] = (pvc->dlci >> 4) & 0x3F; 550 data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80; 551 data[i + 2] = 0x80; 552 } 553 554 if (pvc->state.new) 555 data[i + 2] |= 0x08; 556 else if (pvc->state.active) 557 data[i + 2] |= 0x02; 558 559 i += stat_len; 560 pvc = pvc->next; 561 } 562 } 563 564 skb_put(skb, i); 565 skb->priority = TC_PRIO_CONTROL; 566 skb->dev = dev; 567 skb->protocol = htons(ETH_P_HDLC); 568 skb_reset_network_header(skb); 569 570 dev_queue_xmit(skb); 571 } 572 573 574 575 static void fr_set_link_state(int reliable, struct net_device *dev) 576 { 577 hdlc_device *hdlc = dev_to_hdlc(dev); 578 struct pvc_device *pvc = state(hdlc)->first_pvc; 579 580 state(hdlc)->reliable = reliable; 581 if (reliable) { 582 netif_dormant_off(dev); 583 state(hdlc)->n391cnt = 0; /* Request full status */ 584 state(hdlc)->dce_changed = 1; 585 586 if (state(hdlc)->settings.lmi == LMI_NONE) { 587 while (pvc) { /* Activate all PVCs */ 588 pvc_carrier(1, pvc); 589 pvc->state.exist = pvc->state.active = 1; 590 pvc->state.new = 0; 591 pvc = pvc->next; 592 } 593 } 594 } else { 595 netif_dormant_on(dev); 596 while (pvc) { /* Deactivate all PVCs */ 597 pvc_carrier(0, pvc); 598 pvc->state.exist = pvc->state.active = 0; 599 pvc->state.new = 0; 600 if (!state(hdlc)->settings.dce) 601 pvc->state.bandwidth = 0; 602 pvc = pvc->next; 603 } 604 } 605 } 606 607 608 static void fr_timer(struct timer_list *t) 609 { 610 struct frad_state *st = from_timer(st, t, timer); 611 struct net_device *dev = st->dev; 612 hdlc_device *hdlc = dev_to_hdlc(dev); 613 int i, cnt = 0, reliable; 614 u32 list; 615 616 if (state(hdlc)->settings.dce) { 617 reliable = state(hdlc)->request && 618 time_before(jiffies, state(hdlc)->last_poll + 619 state(hdlc)->settings.t392 * HZ); 620 state(hdlc)->request = 0; 621 } else { 622 state(hdlc)->last_errors <<= 1; /* Shift the list */ 623 if (state(hdlc)->request) { 624 if (state(hdlc)->reliable) 625 netdev_info(dev, "No LMI status reply received\n"); 626 state(hdlc)->last_errors |= 1; 627 } 628 629 list = state(hdlc)->last_errors; 630 for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1) 631 cnt += (list & 1); /* errors count */ 632 633 reliable = (cnt < state(hdlc)->settings.n392); 634 } 635 636 if (state(hdlc)->reliable != reliable) { 637 netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un"); 638 fr_set_link_state(reliable, dev); 639 } 640 641 if (state(hdlc)->settings.dce) 642 state(hdlc)->timer.expires = jiffies + 643 state(hdlc)->settings.t392 * HZ; 644 else { 645 if (state(hdlc)->n391cnt) 646 state(hdlc)->n391cnt--; 647 648 fr_lmi_send(dev, state(hdlc)->n391cnt == 0); 649 650 state(hdlc)->last_poll = jiffies; 651 state(hdlc)->request = 1; 652 state(hdlc)->timer.expires = jiffies + 653 state(hdlc)->settings.t391 * HZ; 654 } 655 656 add_timer(&state(hdlc)->timer); 657 } 658 659 660 static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb) 661 { 662 hdlc_device *hdlc = dev_to_hdlc(dev); 663 struct pvc_device *pvc; 664 u8 rxseq, txseq; 665 int lmi = state(hdlc)->settings.lmi; 666 int dce = state(hdlc)->settings.dce; 667 int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i; 668 669 if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH : 670 LMI_CCITT_CISCO_LENGTH)) { 671 netdev_info(dev, "Short LMI frame\n"); 672 return 1; 673 } 674 675 if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI : 676 NLPID_CCITT_ANSI_LMI)) { 677 netdev_info(dev, "Received non-LMI frame with LMI DLCI\n"); 678 return 1; 679 } 680 681 if (skb->data[4] != LMI_CALLREF) { 682 netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n", 683 skb->data[4]); 684 return 1; 685 } 686 687 if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) { 688 netdev_info(dev, "Invalid LMI Message type (0x%02X)\n", 689 skb->data[5]); 690 return 1; 691 } 692 693 if (lmi == LMI_ANSI) { 694 if (skb->data[6] != LMI_ANSI_LOCKSHIFT) { 695 netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n", 696 skb->data[6]); 697 return 1; 698 } 699 i = 7; 700 } else 701 i = 6; 702 703 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE : 704 LMI_ANSI_CISCO_REPTYPE)) { 705 netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n", 706 skb->data[i]); 707 return 1; 708 } 709 710 if (skb->data[++i] != LMI_REPT_LEN) { 711 netdev_info(dev, "Invalid LMI Report type IE length (%u)\n", 712 skb->data[i]); 713 return 1; 714 } 715 716 reptype = skb->data[++i]; 717 if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) { 718 netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n", 719 reptype); 720 return 1; 721 } 722 723 if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE : 724 LMI_ANSI_CISCO_ALIVE)) { 725 netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n", 726 skb->data[i]); 727 return 1; 728 } 729 730 if (skb->data[++i] != LMI_INTEG_LEN) { 731 netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n", 732 skb->data[i]); 733 return 1; 734 } 735 i++; 736 737 state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */ 738 rxseq = skb->data[i++]; /* Should confirm our sequence */ 739 740 txseq = state(hdlc)->txseq; 741 742 if (dce) 743 state(hdlc)->last_poll = jiffies; 744 745 error = 0; 746 if (!state(hdlc)->reliable) 747 error = 1; 748 749 if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */ 750 state(hdlc)->n391cnt = 0; 751 error = 1; 752 } 753 754 if (dce) { 755 if (state(hdlc)->fullrep_sent && !error) { 756 /* Stop sending full report - the last one has been confirmed by DTE */ 757 state(hdlc)->fullrep_sent = 0; 758 pvc = state(hdlc)->first_pvc; 759 while (pvc) { 760 if (pvc->state.new) { 761 pvc->state.new = 0; 762 763 /* Tell DTE that new PVC is now active */ 764 state(hdlc)->dce_changed = 1; 765 } 766 pvc = pvc->next; 767 } 768 } 769 770 if (state(hdlc)->dce_changed) { 771 reptype = LMI_FULLREP; 772 state(hdlc)->fullrep_sent = 1; 773 state(hdlc)->dce_changed = 0; 774 } 775 776 state(hdlc)->request = 1; /* got request */ 777 fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0); 778 return 0; 779 } 780 781 /* DTE */ 782 783 state(hdlc)->request = 0; /* got response, no request pending */ 784 785 if (error) 786 return 0; 787 788 if (reptype != LMI_FULLREP) 789 return 0; 790 791 pvc = state(hdlc)->first_pvc; 792 793 while (pvc) { 794 pvc->state.deleted = 1; 795 pvc = pvc->next; 796 } 797 798 no_ram = 0; 799 while (skb->len >= i + 2 + stat_len) { 800 u16 dlci; 801 u32 bw; 802 unsigned int active, new; 803 804 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT : 805 LMI_ANSI_CISCO_PVCSTAT)) { 806 netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n", 807 skb->data[i]); 808 return 1; 809 } 810 811 if (skb->data[++i] != stat_len) { 812 netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n", 813 skb->data[i]); 814 return 1; 815 } 816 i++; 817 818 new = !! (skb->data[i + 2] & 0x08); 819 active = !! (skb->data[i + 2] & 0x02); 820 if (lmi == LMI_CISCO) { 821 dlci = (skb->data[i] << 8) | skb->data[i + 1]; 822 bw = (skb->data[i + 3] << 16) | 823 (skb->data[i + 4] << 8) | 824 (skb->data[i + 5]); 825 } else { 826 dlci = ((skb->data[i] & 0x3F) << 4) | 827 ((skb->data[i + 1] & 0x78) >> 3); 828 bw = 0; 829 } 830 831 pvc = add_pvc(dev, dlci); 832 833 if (!pvc && !no_ram) { 834 netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n"); 835 no_ram = 1; 836 } 837 838 if (pvc) { 839 pvc->state.exist = 1; 840 pvc->state.deleted = 0; 841 if (active != pvc->state.active || 842 new != pvc->state.new || 843 bw != pvc->state.bandwidth || 844 !pvc->state.exist) { 845 pvc->state.new = new; 846 pvc->state.active = active; 847 pvc->state.bandwidth = bw; 848 pvc_carrier(active, pvc); 849 fr_log_dlci_active(pvc); 850 } 851 } 852 853 i += stat_len; 854 } 855 856 pvc = state(hdlc)->first_pvc; 857 858 while (pvc) { 859 if (pvc->state.deleted && pvc->state.exist) { 860 pvc_carrier(0, pvc); 861 pvc->state.active = pvc->state.new = 0; 862 pvc->state.exist = 0; 863 pvc->state.bandwidth = 0; 864 fr_log_dlci_active(pvc); 865 } 866 pvc = pvc->next; 867 } 868 869 /* Next full report after N391 polls */ 870 state(hdlc)->n391cnt = state(hdlc)->settings.n391; 871 872 return 0; 873 } 874 875 static int fr_snap_parse(struct sk_buff *skb, struct pvc_device *pvc) 876 { 877 /* OUI 00-00-00 indicates an Ethertype follows */ 878 if (skb->data[0] == 0x00 && 879 skb->data[1] == 0x00 && 880 skb->data[2] == 0x00) { 881 if (!pvc->main) 882 return -1; 883 skb->dev = pvc->main; 884 skb->protocol = *(__be16 *)(skb->data + 3); /* Ethertype */ 885 skb_pull(skb, 5); 886 skb_reset_mac_header(skb); 887 return 0; 888 889 /* OUI 00-80-C2 stands for the 802.1 organization */ 890 } else if (skb->data[0] == 0x00 && 891 skb->data[1] == 0x80 && 892 skb->data[2] == 0xC2) { 893 /* PID 00-07 stands for Ethernet frames without FCS */ 894 if (skb->data[3] == 0x00 && 895 skb->data[4] == 0x07) { 896 if (!pvc->ether) 897 return -1; 898 skb_pull(skb, 5); 899 if (skb->len < ETH_HLEN) 900 return -1; 901 skb->protocol = eth_type_trans(skb, pvc->ether); 902 return 0; 903 904 /* PID unsupported */ 905 } else { 906 return -1; 907 } 908 909 /* OUI unsupported */ 910 } else { 911 return -1; 912 } 913 } 914 915 static int fr_rx(struct sk_buff *skb) 916 { 917 struct net_device *frad = skb->dev; 918 hdlc_device *hdlc = dev_to_hdlc(frad); 919 struct fr_hdr *fh = (struct fr_hdr *)skb->data; 920 u8 *data = skb->data; 921 u16 dlci; 922 struct pvc_device *pvc; 923 struct net_device *dev; 924 925 if (skb->len < 4 || fh->ea1 || !fh->ea2 || data[2] != FR_UI) 926 goto rx_error; 927 928 dlci = q922_to_dlci(skb->data); 929 930 if ((dlci == LMI_CCITT_ANSI_DLCI && 931 (state(hdlc)->settings.lmi == LMI_ANSI || 932 state(hdlc)->settings.lmi == LMI_CCITT)) || 933 (dlci == LMI_CISCO_DLCI && 934 state(hdlc)->settings.lmi == LMI_CISCO)) { 935 if (fr_lmi_recv(frad, skb)) 936 goto rx_error; 937 dev_kfree_skb_any(skb); 938 return NET_RX_SUCCESS; 939 } 940 941 pvc = find_pvc(hdlc, dlci); 942 if (!pvc) { 943 #ifdef DEBUG_PKT 944 netdev_info(frad, "No PVC for received frame's DLCI %d\n", 945 dlci); 946 #endif 947 goto rx_drop; 948 } 949 950 if (pvc->state.fecn != fh->fecn) { 951 #ifdef DEBUG_ECN 952 printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name, 953 dlci, fh->fecn ? "N" : "FF"); 954 #endif 955 pvc->state.fecn ^= 1; 956 } 957 958 if (pvc->state.becn != fh->becn) { 959 #ifdef DEBUG_ECN 960 printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name, 961 dlci, fh->becn ? "N" : "FF"); 962 #endif 963 pvc->state.becn ^= 1; 964 } 965 966 967 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 968 frad->stats.rx_dropped++; 969 return NET_RX_DROP; 970 } 971 972 if (data[3] == NLPID_IP) { 973 if (!pvc->main) 974 goto rx_drop; 975 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ 976 skb->dev = pvc->main; 977 skb->protocol = htons(ETH_P_IP); 978 skb_reset_mac_header(skb); 979 980 } else if (data[3] == NLPID_IPV6) { 981 if (!pvc->main) 982 goto rx_drop; 983 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ 984 skb->dev = pvc->main; 985 skb->protocol = htons(ETH_P_IPV6); 986 skb_reset_mac_header(skb); 987 988 } else if (data[3] == FR_PAD) { 989 if (skb->len < 5) 990 goto rx_error; 991 if (data[4] == NLPID_SNAP) { /* A SNAP header follows */ 992 skb_pull(skb, 5); 993 if (skb->len < 5) /* Incomplete SNAP header */ 994 goto rx_error; 995 if (fr_snap_parse(skb, pvc)) 996 goto rx_drop; 997 } else { 998 goto rx_drop; 999 } 1000 1001 } else { 1002 netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n", 1003 data[3], skb->len); 1004 goto rx_drop; 1005 } 1006 1007 dev = skb->dev; 1008 dev->stats.rx_packets++; /* PVC traffic */ 1009 dev->stats.rx_bytes += skb->len; 1010 if (pvc->state.becn) 1011 dev->stats.rx_compressed++; 1012 netif_rx(skb); 1013 return NET_RX_SUCCESS; 1014 1015 rx_error: 1016 frad->stats.rx_errors++; /* Mark error */ 1017 rx_drop: 1018 dev_kfree_skb_any(skb); 1019 return NET_RX_DROP; 1020 } 1021 1022 1023 1024 static void fr_start(struct net_device *dev) 1025 { 1026 hdlc_device *hdlc = dev_to_hdlc(dev); 1027 #ifdef DEBUG_LINK 1028 printk(KERN_DEBUG "fr_start\n"); 1029 #endif 1030 if (state(hdlc)->settings.lmi != LMI_NONE) { 1031 state(hdlc)->reliable = 0; 1032 state(hdlc)->dce_changed = 1; 1033 state(hdlc)->request = 0; 1034 state(hdlc)->fullrep_sent = 0; 1035 state(hdlc)->last_errors = 0xFFFFFFFF; 1036 state(hdlc)->n391cnt = 0; 1037 state(hdlc)->txseq = state(hdlc)->rxseq = 0; 1038 1039 state(hdlc)->dev = dev; 1040 timer_setup(&state(hdlc)->timer, fr_timer, 0); 1041 /* First poll after 1 s */ 1042 state(hdlc)->timer.expires = jiffies + HZ; 1043 add_timer(&state(hdlc)->timer); 1044 } else 1045 fr_set_link_state(1, dev); 1046 } 1047 1048 1049 static void fr_stop(struct net_device *dev) 1050 { 1051 hdlc_device *hdlc = dev_to_hdlc(dev); 1052 #ifdef DEBUG_LINK 1053 printk(KERN_DEBUG "fr_stop\n"); 1054 #endif 1055 if (state(hdlc)->settings.lmi != LMI_NONE) 1056 del_timer_sync(&state(hdlc)->timer); 1057 fr_set_link_state(0, dev); 1058 } 1059 1060 1061 static void fr_close(struct net_device *dev) 1062 { 1063 hdlc_device *hdlc = dev_to_hdlc(dev); 1064 struct pvc_device *pvc = state(hdlc)->first_pvc; 1065 1066 while (pvc) { /* Shutdown all PVCs for this FRAD */ 1067 if (pvc->main) 1068 dev_close(pvc->main); 1069 if (pvc->ether) 1070 dev_close(pvc->ether); 1071 pvc = pvc->next; 1072 } 1073 } 1074 1075 1076 static void pvc_setup(struct net_device *dev) 1077 { 1078 dev->type = ARPHRD_DLCI; 1079 dev->flags = IFF_POINTOPOINT; 1080 dev->hard_header_len = 0; 1081 dev->addr_len = 2; 1082 netif_keep_dst(dev); 1083 } 1084 1085 static const struct net_device_ops pvc_ops = { 1086 .ndo_open = pvc_open, 1087 .ndo_stop = pvc_close, 1088 .ndo_start_xmit = pvc_xmit, 1089 .ndo_do_ioctl = pvc_ioctl, 1090 }; 1091 1092 static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) 1093 { 1094 hdlc_device *hdlc = dev_to_hdlc(frad); 1095 struct pvc_device *pvc; 1096 struct net_device *dev; 1097 int used; 1098 1099 if ((pvc = add_pvc(frad, dlci)) == NULL) { 1100 netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n"); 1101 return -ENOBUFS; 1102 } 1103 1104 if (*get_dev_p(pvc, type)) 1105 return -EEXIST; 1106 1107 used = pvc_is_used(pvc); 1108 1109 if (type == ARPHRD_ETHER) 1110 dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN, 1111 ether_setup); 1112 else 1113 dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup); 1114 1115 if (!dev) { 1116 netdev_warn(frad, "Memory squeeze on fr_pvc()\n"); 1117 delete_unused_pvcs(hdlc); 1118 return -ENOBUFS; 1119 } 1120 1121 if (type == ARPHRD_ETHER) { 1122 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1123 eth_hw_addr_random(dev); 1124 } else { 1125 *(__be16*)dev->dev_addr = htons(dlci); 1126 dlci_to_q922(dev->broadcast, dlci); 1127 } 1128 dev->netdev_ops = &pvc_ops; 1129 dev->mtu = HDLC_MAX_MTU; 1130 dev->min_mtu = 68; 1131 dev->max_mtu = HDLC_MAX_MTU; 1132 dev->needed_headroom = 10; 1133 dev->priv_flags |= IFF_NO_QUEUE; 1134 dev->ml_priv = pvc; 1135 1136 if (register_netdevice(dev) != 0) { 1137 free_netdev(dev); 1138 delete_unused_pvcs(hdlc); 1139 return -EIO; 1140 } 1141 1142 dev->needs_free_netdev = true; 1143 *get_dev_p(pvc, type) = dev; 1144 if (!used) { 1145 state(hdlc)->dce_changed = 1; 1146 state(hdlc)->dce_pvc_count++; 1147 } 1148 return 0; 1149 } 1150 1151 1152 1153 static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type) 1154 { 1155 struct pvc_device *pvc; 1156 struct net_device *dev; 1157 1158 if ((pvc = find_pvc(hdlc, dlci)) == NULL) 1159 return -ENOENT; 1160 1161 if ((dev = *get_dev_p(pvc, type)) == NULL) 1162 return -ENOENT; 1163 1164 if (dev->flags & IFF_UP) 1165 return -EBUSY; /* PVC in use */ 1166 1167 unregister_netdevice(dev); /* the destructor will free_netdev(dev) */ 1168 *get_dev_p(pvc, type) = NULL; 1169 1170 if (!pvc_is_used(pvc)) { 1171 state(hdlc)->dce_pvc_count--; 1172 state(hdlc)->dce_changed = 1; 1173 } 1174 delete_unused_pvcs(hdlc); 1175 return 0; 1176 } 1177 1178 1179 1180 static void fr_destroy(struct net_device *frad) 1181 { 1182 hdlc_device *hdlc = dev_to_hdlc(frad); 1183 struct pvc_device *pvc = state(hdlc)->first_pvc; 1184 state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */ 1185 state(hdlc)->dce_pvc_count = 0; 1186 state(hdlc)->dce_changed = 1; 1187 1188 while (pvc) { 1189 struct pvc_device *next = pvc->next; 1190 /* destructors will free_netdev() main and ether */ 1191 if (pvc->main) 1192 unregister_netdevice(pvc->main); 1193 1194 if (pvc->ether) 1195 unregister_netdevice(pvc->ether); 1196 1197 kfree(pvc); 1198 pvc = next; 1199 } 1200 } 1201 1202 1203 static struct hdlc_proto proto = { 1204 .close = fr_close, 1205 .start = fr_start, 1206 .stop = fr_stop, 1207 .detach = fr_destroy, 1208 .ioctl = fr_ioctl, 1209 .netif_rx = fr_rx, 1210 .module = THIS_MODULE, 1211 }; 1212 1213 1214 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr) 1215 { 1216 fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr; 1217 const size_t size = sizeof(fr_proto); 1218 fr_proto new_settings; 1219 hdlc_device *hdlc = dev_to_hdlc(dev); 1220 fr_proto_pvc pvc; 1221 int result; 1222 1223 switch (ifr->ifr_settings.type) { 1224 case IF_GET_PROTO: 1225 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */ 1226 return -EINVAL; 1227 ifr->ifr_settings.type = IF_PROTO_FR; 1228 if (ifr->ifr_settings.size < size) { 1229 ifr->ifr_settings.size = size; /* data size wanted */ 1230 return -ENOBUFS; 1231 } 1232 if (copy_to_user(fr_s, &state(hdlc)->settings, size)) 1233 return -EFAULT; 1234 return 0; 1235 1236 case IF_PROTO_FR: 1237 if (!capable(CAP_NET_ADMIN)) 1238 return -EPERM; 1239 1240 if (dev->flags & IFF_UP) 1241 return -EBUSY; 1242 1243 if (copy_from_user(&new_settings, fr_s, size)) 1244 return -EFAULT; 1245 1246 if (new_settings.lmi == LMI_DEFAULT) 1247 new_settings.lmi = LMI_ANSI; 1248 1249 if ((new_settings.lmi != LMI_NONE && 1250 new_settings.lmi != LMI_ANSI && 1251 new_settings.lmi != LMI_CCITT && 1252 new_settings.lmi != LMI_CISCO) || 1253 new_settings.t391 < 1 || 1254 new_settings.t392 < 2 || 1255 new_settings.n391 < 1 || 1256 new_settings.n392 < 1 || 1257 new_settings.n393 < new_settings.n392 || 1258 new_settings.n393 > 32 || 1259 (new_settings.dce != 0 && 1260 new_settings.dce != 1)) 1261 return -EINVAL; 1262 1263 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); 1264 if (result) 1265 return result; 1266 1267 if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */ 1268 result = attach_hdlc_protocol(dev, &proto, 1269 sizeof(struct frad_state)); 1270 if (result) 1271 return result; 1272 state(hdlc)->first_pvc = NULL; 1273 state(hdlc)->dce_pvc_count = 0; 1274 } 1275 memcpy(&state(hdlc)->settings, &new_settings, size); 1276 dev->type = ARPHRD_FRAD; 1277 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); 1278 return 0; 1279 1280 case IF_PROTO_FR_ADD_PVC: 1281 case IF_PROTO_FR_DEL_PVC: 1282 case IF_PROTO_FR_ADD_ETH_PVC: 1283 case IF_PROTO_FR_DEL_ETH_PVC: 1284 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */ 1285 return -EINVAL; 1286 1287 if (!capable(CAP_NET_ADMIN)) 1288 return -EPERM; 1289 1290 if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc, 1291 sizeof(fr_proto_pvc))) 1292 return -EFAULT; 1293 1294 if (pvc.dlci <= 0 || pvc.dlci >= 1024) 1295 return -EINVAL; /* Only 10 bits, DLCI 0 reserved */ 1296 1297 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC || 1298 ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC) 1299 result = ARPHRD_ETHER; /* bridged Ethernet device */ 1300 else 1301 result = ARPHRD_DLCI; 1302 1303 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC || 1304 ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC) 1305 return fr_add_pvc(dev, pvc.dlci, result); 1306 else 1307 return fr_del_pvc(hdlc, pvc.dlci, result); 1308 } 1309 1310 return -EINVAL; 1311 } 1312 1313 1314 static int __init mod_init(void) 1315 { 1316 register_hdlc_protocol(&proto); 1317 return 0; 1318 } 1319 1320 1321 static void __exit mod_exit(void) 1322 { 1323 unregister_hdlc_protocol(&proto); 1324 } 1325 1326 1327 module_init(mod_init); 1328 module_exit(mod_exit); 1329 1330 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 1331 MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC"); 1332 MODULE_LICENSE("GPL v2"); 1333