1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic HDLC support routines for Linux 4 * Frame Relay support 5 * 6 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl> 7 * 8 9 Theory of PVC state 10 11 DCE mode: 12 13 (exist,new) -> 0,0 when "PVC create" or if "link unreliable" 14 0,x -> 1,1 if "link reliable" when sending FULL STATUS 15 1,1 -> 1,0 if received FULL STATUS ACK 16 17 (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create" 18 -> 1 when "PVC up" and (exist,new) = 1,0 19 20 DTE mode: 21 (exist,new,active) = FULL STATUS if "link reliable" 22 = 0, 0, 0 if "link unreliable" 23 No LMI: 24 active = open and "link reliable" 25 exist = new = not used 26 27 CCITT LMI: ITU-T Q.933 Annex A 28 ANSI LMI: ANSI T1.617 Annex D 29 CISCO LMI: the original, aka "Gang of Four" LMI 30 31 */ 32 33 #include <linux/errno.h> 34 #include <linux/etherdevice.h> 35 #include <linux/hdlc.h> 36 #include <linux/if_arp.h> 37 #include <linux/inetdevice.h> 38 #include <linux/init.h> 39 #include <linux/kernel.h> 40 #include <linux/module.h> 41 #include <linux/pkt_sched.h> 42 #include <linux/poll.h> 43 #include <linux/rtnetlink.h> 44 #include <linux/skbuff.h> 45 #include <linux/slab.h> 46 47 #undef DEBUG_PKT 48 #undef DEBUG_ECN 49 #undef DEBUG_LINK 50 #undef DEBUG_PROTO 51 #undef DEBUG_PVC 52 53 #define FR_UI 0x03 54 #define FR_PAD 0x00 55 56 #define NLPID_IP 0xCC 57 #define NLPID_IPV6 0x8E 58 #define NLPID_SNAP 0x80 59 #define NLPID_PAD 0x00 60 #define NLPID_CCITT_ANSI_LMI 0x08 61 #define NLPID_CISCO_LMI 0x09 62 63 64 #define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */ 65 #define LMI_CISCO_DLCI 1023 66 67 #define LMI_CALLREF 0x00 /* Call Reference */ 68 #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */ 69 #define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */ 70 #define LMI_CCITT_REPTYPE 0x51 71 #define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */ 72 #define LMI_CCITT_ALIVE 0x53 73 #define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */ 74 #define LMI_CCITT_PVCSTAT 0x57 75 76 #define LMI_FULLREP 0x00 /* full report */ 77 #define LMI_INTEGRITY 0x01 /* link integrity report */ 78 #define LMI_SINGLE 0x02 /* single PVC report */ 79 80 #define LMI_STATUS_ENQUIRY 0x75 81 #define LMI_STATUS 0x7D /* reply */ 82 83 #define LMI_REPT_LEN 1 /* report type element length */ 84 #define LMI_INTEG_LEN 2 /* link integrity element length */ 85 86 #define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */ 87 #define LMI_ANSI_LENGTH 14 88 89 90 struct fr_hdr { 91 #if defined(__LITTLE_ENDIAN_BITFIELD) 92 unsigned ea1: 1; 93 unsigned cr: 1; 94 unsigned dlcih: 6; 95 96 unsigned ea2: 1; 97 unsigned de: 1; 98 unsigned becn: 1; 99 unsigned fecn: 1; 100 unsigned dlcil: 4; 101 #else 102 unsigned dlcih: 6; 103 unsigned cr: 1; 104 unsigned ea1: 1; 105 106 unsigned dlcil: 4; 107 unsigned fecn: 1; 108 unsigned becn: 1; 109 unsigned de: 1; 110 unsigned ea2: 1; 111 #endif 112 } __packed; 113 114 115 struct pvc_device { 116 struct net_device *frad; 117 struct net_device *main; 118 struct net_device *ether; /* bridged Ethernet interface */ 119 struct pvc_device *next; /* Sorted in ascending DLCI order */ 120 int dlci; 121 int open_count; 122 123 struct { 124 unsigned int new: 1; 125 unsigned int active: 1; 126 unsigned int exist: 1; 127 unsigned int deleted: 1; 128 unsigned int fecn: 1; 129 unsigned int becn: 1; 130 unsigned int bandwidth; /* Cisco LMI reporting only */ 131 }state; 132 }; 133 134 struct frad_state { 135 fr_proto settings; 136 struct pvc_device *first_pvc; 137 int dce_pvc_count; 138 139 struct timer_list timer; 140 struct net_device *dev; 141 unsigned long last_poll; 142 int reliable; 143 int dce_changed; 144 int request; 145 int fullrep_sent; 146 u32 last_errors; /* last errors bit list */ 147 u8 n391cnt; 148 u8 txseq; /* TX sequence number */ 149 u8 rxseq; /* RX sequence number */ 150 }; 151 152 153 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr); 154 155 156 static inline u16 q922_to_dlci(u8 *hdr) 157 { 158 return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4); 159 } 160 161 162 static inline void dlci_to_q922(u8 *hdr, u16 dlci) 163 { 164 hdr[0] = (dlci >> 2) & 0xFC; 165 hdr[1] = ((dlci << 4) & 0xF0) | 0x01; 166 } 167 168 169 static inline struct frad_state* state(hdlc_device *hdlc) 170 { 171 return(struct frad_state *)(hdlc->state); 172 } 173 174 175 static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci) 176 { 177 struct pvc_device *pvc = state(hdlc)->first_pvc; 178 179 while (pvc) { 180 if (pvc->dlci == dlci) 181 return pvc; 182 if (pvc->dlci > dlci) 183 return NULL; /* the list is sorted */ 184 pvc = pvc->next; 185 } 186 187 return NULL; 188 } 189 190 191 static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci) 192 { 193 hdlc_device *hdlc = dev_to_hdlc(dev); 194 struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc; 195 196 while (*pvc_p) { 197 if ((*pvc_p)->dlci == dlci) 198 return *pvc_p; 199 if ((*pvc_p)->dlci > dlci) 200 break; /* the list is sorted */ 201 pvc_p = &(*pvc_p)->next; 202 } 203 204 pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC); 205 #ifdef DEBUG_PVC 206 printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev); 207 #endif 208 if (!pvc) 209 return NULL; 210 211 pvc->dlci = dlci; 212 pvc->frad = dev; 213 pvc->next = *pvc_p; /* Put it in the chain */ 214 *pvc_p = pvc; 215 return pvc; 216 } 217 218 219 static inline int pvc_is_used(struct pvc_device *pvc) 220 { 221 return pvc->main || pvc->ether; 222 } 223 224 225 static inline void pvc_carrier(int on, struct pvc_device *pvc) 226 { 227 if (on) { 228 if (pvc->main) 229 if (!netif_carrier_ok(pvc->main)) 230 netif_carrier_on(pvc->main); 231 if (pvc->ether) 232 if (!netif_carrier_ok(pvc->ether)) 233 netif_carrier_on(pvc->ether); 234 } else { 235 if (pvc->main) 236 if (netif_carrier_ok(pvc->main)) 237 netif_carrier_off(pvc->main); 238 if (pvc->ether) 239 if (netif_carrier_ok(pvc->ether)) 240 netif_carrier_off(pvc->ether); 241 } 242 } 243 244 245 static inline void delete_unused_pvcs(hdlc_device *hdlc) 246 { 247 struct pvc_device **pvc_p = &state(hdlc)->first_pvc; 248 249 while (*pvc_p) { 250 if (!pvc_is_used(*pvc_p)) { 251 struct pvc_device *pvc = *pvc_p; 252 #ifdef DEBUG_PVC 253 printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc); 254 #endif 255 *pvc_p = pvc->next; 256 kfree(pvc); 257 continue; 258 } 259 pvc_p = &(*pvc_p)->next; 260 } 261 } 262 263 264 static inline struct net_device **get_dev_p(struct pvc_device *pvc, 265 int type) 266 { 267 if (type == ARPHRD_ETHER) 268 return &pvc->ether; 269 else 270 return &pvc->main; 271 } 272 273 274 static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) 275 { 276 u16 head_len; 277 struct sk_buff *skb = *skb_p; 278 279 switch (skb->protocol) { 280 case cpu_to_be16(NLPID_CCITT_ANSI_LMI): 281 head_len = 4; 282 skb_push(skb, head_len); 283 skb->data[3] = NLPID_CCITT_ANSI_LMI; 284 break; 285 286 case cpu_to_be16(NLPID_CISCO_LMI): 287 head_len = 4; 288 skb_push(skb, head_len); 289 skb->data[3] = NLPID_CISCO_LMI; 290 break; 291 292 case cpu_to_be16(ETH_P_IP): 293 head_len = 4; 294 skb_push(skb, head_len); 295 skb->data[3] = NLPID_IP; 296 break; 297 298 case cpu_to_be16(ETH_P_IPV6): 299 head_len = 4; 300 skb_push(skb, head_len); 301 skb->data[3] = NLPID_IPV6; 302 break; 303 304 case cpu_to_be16(ETH_P_802_3): 305 head_len = 10; 306 if (skb_headroom(skb) < head_len) { 307 struct sk_buff *skb2 = skb_realloc_headroom(skb, 308 head_len); 309 if (!skb2) 310 return -ENOBUFS; 311 dev_kfree_skb(skb); 312 skb = *skb_p = skb2; 313 } 314 skb_push(skb, head_len); 315 skb->data[3] = FR_PAD; 316 skb->data[4] = NLPID_SNAP; 317 skb->data[5] = FR_PAD; 318 skb->data[6] = 0x80; 319 skb->data[7] = 0xC2; 320 skb->data[8] = 0x00; 321 skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */ 322 break; 323 324 default: 325 head_len = 10; 326 skb_push(skb, head_len); 327 skb->data[3] = FR_PAD; 328 skb->data[4] = NLPID_SNAP; 329 skb->data[5] = FR_PAD; 330 skb->data[6] = FR_PAD; 331 skb->data[7] = FR_PAD; 332 *(__be16*)(skb->data + 8) = skb->protocol; 333 } 334 335 dlci_to_q922(skb->data, dlci); 336 skb->data[2] = FR_UI; 337 return 0; 338 } 339 340 341 342 static int pvc_open(struct net_device *dev) 343 { 344 struct pvc_device *pvc = dev->ml_priv; 345 346 if ((pvc->frad->flags & IFF_UP) == 0) 347 return -EIO; /* Frad must be UP in order to activate PVC */ 348 349 if (pvc->open_count++ == 0) { 350 hdlc_device *hdlc = dev_to_hdlc(pvc->frad); 351 if (state(hdlc)->settings.lmi == LMI_NONE) 352 pvc->state.active = netif_carrier_ok(pvc->frad); 353 354 pvc_carrier(pvc->state.active, pvc); 355 state(hdlc)->dce_changed = 1; 356 } 357 return 0; 358 } 359 360 361 362 static int pvc_close(struct net_device *dev) 363 { 364 struct pvc_device *pvc = dev->ml_priv; 365 366 if (--pvc->open_count == 0) { 367 hdlc_device *hdlc = dev_to_hdlc(pvc->frad); 368 if (state(hdlc)->settings.lmi == LMI_NONE) 369 pvc->state.active = 0; 370 371 if (state(hdlc)->settings.dce) { 372 state(hdlc)->dce_changed = 1; 373 pvc->state.active = 0; 374 } 375 } 376 return 0; 377 } 378 379 380 381 static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 382 { 383 struct pvc_device *pvc = dev->ml_priv; 384 fr_proto_pvc_info info; 385 386 if (ifr->ifr_settings.type == IF_GET_PROTO) { 387 if (dev->type == ARPHRD_ETHER) 388 ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC; 389 else 390 ifr->ifr_settings.type = IF_PROTO_FR_PVC; 391 392 if (ifr->ifr_settings.size < sizeof(info)) { 393 /* data size wanted */ 394 ifr->ifr_settings.size = sizeof(info); 395 return -ENOBUFS; 396 } 397 398 info.dlci = pvc->dlci; 399 memcpy(info.master, pvc->frad->name, IFNAMSIZ); 400 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info, 401 &info, sizeof(info))) 402 return -EFAULT; 403 return 0; 404 } 405 406 return -EINVAL; 407 } 408 409 static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) 410 { 411 struct pvc_device *pvc = dev->ml_priv; 412 413 if (pvc->state.active) { 414 if (dev->type == ARPHRD_ETHER) { 415 int pad = ETH_ZLEN - skb->len; 416 if (pad > 0) { /* Pad the frame with zeros */ 417 int len = skb->len; 418 if (skb_tailroom(skb) < pad) 419 if (pskb_expand_head(skb, 0, pad, 420 GFP_ATOMIC)) { 421 dev->stats.tx_dropped++; 422 dev_kfree_skb(skb); 423 return NETDEV_TX_OK; 424 } 425 skb_put(skb, pad); 426 memset(skb->data + len, 0, pad); 427 } 428 skb->protocol = cpu_to_be16(ETH_P_802_3); 429 } 430 if (!fr_hard_header(&skb, pvc->dlci)) { 431 dev->stats.tx_bytes += skb->len; 432 dev->stats.tx_packets++; 433 if (pvc->state.fecn) /* TX Congestion counter */ 434 dev->stats.tx_compressed++; 435 skb->dev = pvc->frad; 436 dev_queue_xmit(skb); 437 return NETDEV_TX_OK; 438 } 439 } 440 441 dev->stats.tx_dropped++; 442 dev_kfree_skb(skb); 443 return NETDEV_TX_OK; 444 } 445 446 static inline void fr_log_dlci_active(struct pvc_device *pvc) 447 { 448 netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n", 449 pvc->dlci, 450 pvc->main ? pvc->main->name : "", 451 pvc->main && pvc->ether ? " " : "", 452 pvc->ether ? pvc->ether->name : "", 453 pvc->state.new ? " new" : "", 454 !pvc->state.exist ? "deleted" : 455 pvc->state.active ? "active" : "inactive"); 456 } 457 458 459 460 static inline u8 fr_lmi_nextseq(u8 x) 461 { 462 x++; 463 return x ? x : 1; 464 } 465 466 467 static void fr_lmi_send(struct net_device *dev, int fullrep) 468 { 469 hdlc_device *hdlc = dev_to_hdlc(dev); 470 struct sk_buff *skb; 471 struct pvc_device *pvc = state(hdlc)->first_pvc; 472 int lmi = state(hdlc)->settings.lmi; 473 int dce = state(hdlc)->settings.dce; 474 int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH; 475 int stat_len = (lmi == LMI_CISCO) ? 6 : 3; 476 u8 *data; 477 int i = 0; 478 479 if (dce && fullrep) { 480 len += state(hdlc)->dce_pvc_count * (2 + stat_len); 481 if (len > HDLC_MAX_MRU) { 482 netdev_warn(dev, "Too many PVCs while sending LMI full report\n"); 483 return; 484 } 485 } 486 487 skb = dev_alloc_skb(len); 488 if (!skb) { 489 netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n"); 490 return; 491 } 492 memset(skb->data, 0, len); 493 skb_reserve(skb, 4); 494 if (lmi == LMI_CISCO) { 495 skb->protocol = cpu_to_be16(NLPID_CISCO_LMI); 496 fr_hard_header(&skb, LMI_CISCO_DLCI); 497 } else { 498 skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI); 499 fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); 500 } 501 data = skb_tail_pointer(skb); 502 data[i++] = LMI_CALLREF; 503 data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY; 504 if (lmi == LMI_ANSI) 505 data[i++] = LMI_ANSI_LOCKSHIFT; 506 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE : 507 LMI_ANSI_CISCO_REPTYPE; 508 data[i++] = LMI_REPT_LEN; 509 data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY; 510 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE; 511 data[i++] = LMI_INTEG_LEN; 512 data[i++] = state(hdlc)->txseq = 513 fr_lmi_nextseq(state(hdlc)->txseq); 514 data[i++] = state(hdlc)->rxseq; 515 516 if (dce && fullrep) { 517 while (pvc) { 518 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT : 519 LMI_ANSI_CISCO_PVCSTAT; 520 data[i++] = stat_len; 521 522 /* LMI start/restart */ 523 if (state(hdlc)->reliable && !pvc->state.exist) { 524 pvc->state.exist = pvc->state.new = 1; 525 fr_log_dlci_active(pvc); 526 } 527 528 /* ifconfig PVC up */ 529 if (pvc->open_count && !pvc->state.active && 530 pvc->state.exist && !pvc->state.new) { 531 pvc_carrier(1, pvc); 532 pvc->state.active = 1; 533 fr_log_dlci_active(pvc); 534 } 535 536 if (lmi == LMI_CISCO) { 537 data[i] = pvc->dlci >> 8; 538 data[i + 1] = pvc->dlci & 0xFF; 539 } else { 540 data[i] = (pvc->dlci >> 4) & 0x3F; 541 data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80; 542 data[i + 2] = 0x80; 543 } 544 545 if (pvc->state.new) 546 data[i + 2] |= 0x08; 547 else if (pvc->state.active) 548 data[i + 2] |= 0x02; 549 550 i += stat_len; 551 pvc = pvc->next; 552 } 553 } 554 555 skb_put(skb, i); 556 skb->priority = TC_PRIO_CONTROL; 557 skb->dev = dev; 558 skb_reset_network_header(skb); 559 560 dev_queue_xmit(skb); 561 } 562 563 564 565 static void fr_set_link_state(int reliable, struct net_device *dev) 566 { 567 hdlc_device *hdlc = dev_to_hdlc(dev); 568 struct pvc_device *pvc = state(hdlc)->first_pvc; 569 570 state(hdlc)->reliable = reliable; 571 if (reliable) { 572 netif_dormant_off(dev); 573 state(hdlc)->n391cnt = 0; /* Request full status */ 574 state(hdlc)->dce_changed = 1; 575 576 if (state(hdlc)->settings.lmi == LMI_NONE) { 577 while (pvc) { /* Activate all PVCs */ 578 pvc_carrier(1, pvc); 579 pvc->state.exist = pvc->state.active = 1; 580 pvc->state.new = 0; 581 pvc = pvc->next; 582 } 583 } 584 } else { 585 netif_dormant_on(dev); 586 while (pvc) { /* Deactivate all PVCs */ 587 pvc_carrier(0, pvc); 588 pvc->state.exist = pvc->state.active = 0; 589 pvc->state.new = 0; 590 if (!state(hdlc)->settings.dce) 591 pvc->state.bandwidth = 0; 592 pvc = pvc->next; 593 } 594 } 595 } 596 597 598 static void fr_timer(struct timer_list *t) 599 { 600 struct frad_state *st = from_timer(st, t, timer); 601 struct net_device *dev = st->dev; 602 hdlc_device *hdlc = dev_to_hdlc(dev); 603 int i, cnt = 0, reliable; 604 u32 list; 605 606 if (state(hdlc)->settings.dce) { 607 reliable = state(hdlc)->request && 608 time_before(jiffies, state(hdlc)->last_poll + 609 state(hdlc)->settings.t392 * HZ); 610 state(hdlc)->request = 0; 611 } else { 612 state(hdlc)->last_errors <<= 1; /* Shift the list */ 613 if (state(hdlc)->request) { 614 if (state(hdlc)->reliable) 615 netdev_info(dev, "No LMI status reply received\n"); 616 state(hdlc)->last_errors |= 1; 617 } 618 619 list = state(hdlc)->last_errors; 620 for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1) 621 cnt += (list & 1); /* errors count */ 622 623 reliable = (cnt < state(hdlc)->settings.n392); 624 } 625 626 if (state(hdlc)->reliable != reliable) { 627 netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un"); 628 fr_set_link_state(reliable, dev); 629 } 630 631 if (state(hdlc)->settings.dce) 632 state(hdlc)->timer.expires = jiffies + 633 state(hdlc)->settings.t392 * HZ; 634 else { 635 if (state(hdlc)->n391cnt) 636 state(hdlc)->n391cnt--; 637 638 fr_lmi_send(dev, state(hdlc)->n391cnt == 0); 639 640 state(hdlc)->last_poll = jiffies; 641 state(hdlc)->request = 1; 642 state(hdlc)->timer.expires = jiffies + 643 state(hdlc)->settings.t391 * HZ; 644 } 645 646 add_timer(&state(hdlc)->timer); 647 } 648 649 650 static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb) 651 { 652 hdlc_device *hdlc = dev_to_hdlc(dev); 653 struct pvc_device *pvc; 654 u8 rxseq, txseq; 655 int lmi = state(hdlc)->settings.lmi; 656 int dce = state(hdlc)->settings.dce; 657 int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i; 658 659 if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH : 660 LMI_CCITT_CISCO_LENGTH)) { 661 netdev_info(dev, "Short LMI frame\n"); 662 return 1; 663 } 664 665 if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI : 666 NLPID_CCITT_ANSI_LMI)) { 667 netdev_info(dev, "Received non-LMI frame with LMI DLCI\n"); 668 return 1; 669 } 670 671 if (skb->data[4] != LMI_CALLREF) { 672 netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n", 673 skb->data[4]); 674 return 1; 675 } 676 677 if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) { 678 netdev_info(dev, "Invalid LMI Message type (0x%02X)\n", 679 skb->data[5]); 680 return 1; 681 } 682 683 if (lmi == LMI_ANSI) { 684 if (skb->data[6] != LMI_ANSI_LOCKSHIFT) { 685 netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n", 686 skb->data[6]); 687 return 1; 688 } 689 i = 7; 690 } else 691 i = 6; 692 693 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE : 694 LMI_ANSI_CISCO_REPTYPE)) { 695 netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n", 696 skb->data[i]); 697 return 1; 698 } 699 700 if (skb->data[++i] != LMI_REPT_LEN) { 701 netdev_info(dev, "Invalid LMI Report type IE length (%u)\n", 702 skb->data[i]); 703 return 1; 704 } 705 706 reptype = skb->data[++i]; 707 if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) { 708 netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n", 709 reptype); 710 return 1; 711 } 712 713 if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE : 714 LMI_ANSI_CISCO_ALIVE)) { 715 netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n", 716 skb->data[i]); 717 return 1; 718 } 719 720 if (skb->data[++i] != LMI_INTEG_LEN) { 721 netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n", 722 skb->data[i]); 723 return 1; 724 } 725 i++; 726 727 state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */ 728 rxseq = skb->data[i++]; /* Should confirm our sequence */ 729 730 txseq = state(hdlc)->txseq; 731 732 if (dce) 733 state(hdlc)->last_poll = jiffies; 734 735 error = 0; 736 if (!state(hdlc)->reliable) 737 error = 1; 738 739 if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */ 740 state(hdlc)->n391cnt = 0; 741 error = 1; 742 } 743 744 if (dce) { 745 if (state(hdlc)->fullrep_sent && !error) { 746 /* Stop sending full report - the last one has been confirmed by DTE */ 747 state(hdlc)->fullrep_sent = 0; 748 pvc = state(hdlc)->first_pvc; 749 while (pvc) { 750 if (pvc->state.new) { 751 pvc->state.new = 0; 752 753 /* Tell DTE that new PVC is now active */ 754 state(hdlc)->dce_changed = 1; 755 } 756 pvc = pvc->next; 757 } 758 } 759 760 if (state(hdlc)->dce_changed) { 761 reptype = LMI_FULLREP; 762 state(hdlc)->fullrep_sent = 1; 763 state(hdlc)->dce_changed = 0; 764 } 765 766 state(hdlc)->request = 1; /* got request */ 767 fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0); 768 return 0; 769 } 770 771 /* DTE */ 772 773 state(hdlc)->request = 0; /* got response, no request pending */ 774 775 if (error) 776 return 0; 777 778 if (reptype != LMI_FULLREP) 779 return 0; 780 781 pvc = state(hdlc)->first_pvc; 782 783 while (pvc) { 784 pvc->state.deleted = 1; 785 pvc = pvc->next; 786 } 787 788 no_ram = 0; 789 while (skb->len >= i + 2 + stat_len) { 790 u16 dlci; 791 u32 bw; 792 unsigned int active, new; 793 794 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT : 795 LMI_ANSI_CISCO_PVCSTAT)) { 796 netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n", 797 skb->data[i]); 798 return 1; 799 } 800 801 if (skb->data[++i] != stat_len) { 802 netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n", 803 skb->data[i]); 804 return 1; 805 } 806 i++; 807 808 new = !! (skb->data[i + 2] & 0x08); 809 active = !! (skb->data[i + 2] & 0x02); 810 if (lmi == LMI_CISCO) { 811 dlci = (skb->data[i] << 8) | skb->data[i + 1]; 812 bw = (skb->data[i + 3] << 16) | 813 (skb->data[i + 4] << 8) | 814 (skb->data[i + 5]); 815 } else { 816 dlci = ((skb->data[i] & 0x3F) << 4) | 817 ((skb->data[i + 1] & 0x78) >> 3); 818 bw = 0; 819 } 820 821 pvc = add_pvc(dev, dlci); 822 823 if (!pvc && !no_ram) { 824 netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n"); 825 no_ram = 1; 826 } 827 828 if (pvc) { 829 pvc->state.exist = 1; 830 pvc->state.deleted = 0; 831 if (active != pvc->state.active || 832 new != pvc->state.new || 833 bw != pvc->state.bandwidth || 834 !pvc->state.exist) { 835 pvc->state.new = new; 836 pvc->state.active = active; 837 pvc->state.bandwidth = bw; 838 pvc_carrier(active, pvc); 839 fr_log_dlci_active(pvc); 840 } 841 } 842 843 i += stat_len; 844 } 845 846 pvc = state(hdlc)->first_pvc; 847 848 while (pvc) { 849 if (pvc->state.deleted && pvc->state.exist) { 850 pvc_carrier(0, pvc); 851 pvc->state.active = pvc->state.new = 0; 852 pvc->state.exist = 0; 853 pvc->state.bandwidth = 0; 854 fr_log_dlci_active(pvc); 855 } 856 pvc = pvc->next; 857 } 858 859 /* Next full report after N391 polls */ 860 state(hdlc)->n391cnt = state(hdlc)->settings.n391; 861 862 return 0; 863 } 864 865 866 static int fr_rx(struct sk_buff *skb) 867 { 868 struct net_device *frad = skb->dev; 869 hdlc_device *hdlc = dev_to_hdlc(frad); 870 struct fr_hdr *fh = (struct fr_hdr *)skb->data; 871 u8 *data = skb->data; 872 u16 dlci; 873 struct pvc_device *pvc; 874 struct net_device *dev = NULL; 875 876 if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI) 877 goto rx_error; 878 879 dlci = q922_to_dlci(skb->data); 880 881 if ((dlci == LMI_CCITT_ANSI_DLCI && 882 (state(hdlc)->settings.lmi == LMI_ANSI || 883 state(hdlc)->settings.lmi == LMI_CCITT)) || 884 (dlci == LMI_CISCO_DLCI && 885 state(hdlc)->settings.lmi == LMI_CISCO)) { 886 if (fr_lmi_recv(frad, skb)) 887 goto rx_error; 888 dev_kfree_skb_any(skb); 889 return NET_RX_SUCCESS; 890 } 891 892 pvc = find_pvc(hdlc, dlci); 893 if (!pvc) { 894 #ifdef DEBUG_PKT 895 netdev_info(frad, "No PVC for received frame's DLCI %d\n", 896 dlci); 897 #endif 898 dev_kfree_skb_any(skb); 899 return NET_RX_DROP; 900 } 901 902 if (pvc->state.fecn != fh->fecn) { 903 #ifdef DEBUG_ECN 904 printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name, 905 dlci, fh->fecn ? "N" : "FF"); 906 #endif 907 pvc->state.fecn ^= 1; 908 } 909 910 if (pvc->state.becn != fh->becn) { 911 #ifdef DEBUG_ECN 912 printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name, 913 dlci, fh->becn ? "N" : "FF"); 914 #endif 915 pvc->state.becn ^= 1; 916 } 917 918 919 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 920 frad->stats.rx_dropped++; 921 return NET_RX_DROP; 922 } 923 924 if (data[3] == NLPID_IP) { 925 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ 926 dev = pvc->main; 927 skb->protocol = htons(ETH_P_IP); 928 929 } else if (data[3] == NLPID_IPV6) { 930 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ 931 dev = pvc->main; 932 skb->protocol = htons(ETH_P_IPV6); 933 934 } else if (skb->len > 10 && data[3] == FR_PAD && 935 data[4] == NLPID_SNAP && data[5] == FR_PAD) { 936 u16 oui = ntohs(*(__be16*)(data + 6)); 937 u16 pid = ntohs(*(__be16*)(data + 8)); 938 skb_pull(skb, 10); 939 940 switch ((((u32)oui) << 16) | pid) { 941 case ETH_P_ARP: /* routed frame with SNAP */ 942 case ETH_P_IPX: 943 case ETH_P_IP: /* a long variant */ 944 case ETH_P_IPV6: 945 dev = pvc->main; 946 skb->protocol = htons(pid); 947 break; 948 949 case 0x80C20007: /* bridged Ethernet frame */ 950 if ((dev = pvc->ether) != NULL) 951 skb->protocol = eth_type_trans(skb, dev); 952 break; 953 954 default: 955 netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n", 956 oui, pid); 957 dev_kfree_skb_any(skb); 958 return NET_RX_DROP; 959 } 960 } else { 961 netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n", 962 data[3], skb->len); 963 dev_kfree_skb_any(skb); 964 return NET_RX_DROP; 965 } 966 967 if (dev) { 968 dev->stats.rx_packets++; /* PVC traffic */ 969 dev->stats.rx_bytes += skb->len; 970 if (pvc->state.becn) 971 dev->stats.rx_compressed++; 972 skb->dev = dev; 973 netif_rx(skb); 974 return NET_RX_SUCCESS; 975 } else { 976 dev_kfree_skb_any(skb); 977 return NET_RX_DROP; 978 } 979 980 rx_error: 981 frad->stats.rx_errors++; /* Mark error */ 982 dev_kfree_skb_any(skb); 983 return NET_RX_DROP; 984 } 985 986 987 988 static void fr_start(struct net_device *dev) 989 { 990 hdlc_device *hdlc = dev_to_hdlc(dev); 991 #ifdef DEBUG_LINK 992 printk(KERN_DEBUG "fr_start\n"); 993 #endif 994 if (state(hdlc)->settings.lmi != LMI_NONE) { 995 state(hdlc)->reliable = 0; 996 state(hdlc)->dce_changed = 1; 997 state(hdlc)->request = 0; 998 state(hdlc)->fullrep_sent = 0; 999 state(hdlc)->last_errors = 0xFFFFFFFF; 1000 state(hdlc)->n391cnt = 0; 1001 state(hdlc)->txseq = state(hdlc)->rxseq = 0; 1002 1003 state(hdlc)->dev = dev; 1004 timer_setup(&state(hdlc)->timer, fr_timer, 0); 1005 /* First poll after 1 s */ 1006 state(hdlc)->timer.expires = jiffies + HZ; 1007 add_timer(&state(hdlc)->timer); 1008 } else 1009 fr_set_link_state(1, dev); 1010 } 1011 1012 1013 static void fr_stop(struct net_device *dev) 1014 { 1015 hdlc_device *hdlc = dev_to_hdlc(dev); 1016 #ifdef DEBUG_LINK 1017 printk(KERN_DEBUG "fr_stop\n"); 1018 #endif 1019 if (state(hdlc)->settings.lmi != LMI_NONE) 1020 del_timer_sync(&state(hdlc)->timer); 1021 fr_set_link_state(0, dev); 1022 } 1023 1024 1025 static void fr_close(struct net_device *dev) 1026 { 1027 hdlc_device *hdlc = dev_to_hdlc(dev); 1028 struct pvc_device *pvc = state(hdlc)->first_pvc; 1029 1030 while (pvc) { /* Shutdown all PVCs for this FRAD */ 1031 if (pvc->main) 1032 dev_close(pvc->main); 1033 if (pvc->ether) 1034 dev_close(pvc->ether); 1035 pvc = pvc->next; 1036 } 1037 } 1038 1039 1040 static void pvc_setup(struct net_device *dev) 1041 { 1042 dev->type = ARPHRD_DLCI; 1043 dev->flags = IFF_POINTOPOINT; 1044 dev->hard_header_len = 10; 1045 dev->addr_len = 2; 1046 netif_keep_dst(dev); 1047 } 1048 1049 static const struct net_device_ops pvc_ops = { 1050 .ndo_open = pvc_open, 1051 .ndo_stop = pvc_close, 1052 .ndo_start_xmit = pvc_xmit, 1053 .ndo_do_ioctl = pvc_ioctl, 1054 }; 1055 1056 static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) 1057 { 1058 hdlc_device *hdlc = dev_to_hdlc(frad); 1059 struct pvc_device *pvc; 1060 struct net_device *dev; 1061 int used; 1062 1063 if ((pvc = add_pvc(frad, dlci)) == NULL) { 1064 netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n"); 1065 return -ENOBUFS; 1066 } 1067 1068 if (*get_dev_p(pvc, type)) 1069 return -EEXIST; 1070 1071 used = pvc_is_used(pvc); 1072 1073 if (type == ARPHRD_ETHER) 1074 dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN, 1075 ether_setup); 1076 else 1077 dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup); 1078 1079 if (!dev) { 1080 netdev_warn(frad, "Memory squeeze on fr_pvc()\n"); 1081 delete_unused_pvcs(hdlc); 1082 return -ENOBUFS; 1083 } 1084 1085 if (type == ARPHRD_ETHER) { 1086 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1087 eth_hw_addr_random(dev); 1088 } else { 1089 *(__be16*)dev->dev_addr = htons(dlci); 1090 dlci_to_q922(dev->broadcast, dlci); 1091 } 1092 dev->netdev_ops = &pvc_ops; 1093 dev->mtu = HDLC_MAX_MTU; 1094 dev->min_mtu = 68; 1095 dev->max_mtu = HDLC_MAX_MTU; 1096 dev->priv_flags |= IFF_NO_QUEUE; 1097 dev->ml_priv = pvc; 1098 1099 if (register_netdevice(dev) != 0) { 1100 free_netdev(dev); 1101 delete_unused_pvcs(hdlc); 1102 return -EIO; 1103 } 1104 1105 dev->needs_free_netdev = true; 1106 *get_dev_p(pvc, type) = dev; 1107 if (!used) { 1108 state(hdlc)->dce_changed = 1; 1109 state(hdlc)->dce_pvc_count++; 1110 } 1111 return 0; 1112 } 1113 1114 1115 1116 static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type) 1117 { 1118 struct pvc_device *pvc; 1119 struct net_device *dev; 1120 1121 if ((pvc = find_pvc(hdlc, dlci)) == NULL) 1122 return -ENOENT; 1123 1124 if ((dev = *get_dev_p(pvc, type)) == NULL) 1125 return -ENOENT; 1126 1127 if (dev->flags & IFF_UP) 1128 return -EBUSY; /* PVC in use */ 1129 1130 unregister_netdevice(dev); /* the destructor will free_netdev(dev) */ 1131 *get_dev_p(pvc, type) = NULL; 1132 1133 if (!pvc_is_used(pvc)) { 1134 state(hdlc)->dce_pvc_count--; 1135 state(hdlc)->dce_changed = 1; 1136 } 1137 delete_unused_pvcs(hdlc); 1138 return 0; 1139 } 1140 1141 1142 1143 static void fr_destroy(struct net_device *frad) 1144 { 1145 hdlc_device *hdlc = dev_to_hdlc(frad); 1146 struct pvc_device *pvc = state(hdlc)->first_pvc; 1147 state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */ 1148 state(hdlc)->dce_pvc_count = 0; 1149 state(hdlc)->dce_changed = 1; 1150 1151 while (pvc) { 1152 struct pvc_device *next = pvc->next; 1153 /* destructors will free_netdev() main and ether */ 1154 if (pvc->main) 1155 unregister_netdevice(pvc->main); 1156 1157 if (pvc->ether) 1158 unregister_netdevice(pvc->ether); 1159 1160 kfree(pvc); 1161 pvc = next; 1162 } 1163 } 1164 1165 1166 static struct hdlc_proto proto = { 1167 .close = fr_close, 1168 .start = fr_start, 1169 .stop = fr_stop, 1170 .detach = fr_destroy, 1171 .ioctl = fr_ioctl, 1172 .netif_rx = fr_rx, 1173 .module = THIS_MODULE, 1174 }; 1175 1176 1177 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr) 1178 { 1179 fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr; 1180 const size_t size = sizeof(fr_proto); 1181 fr_proto new_settings; 1182 hdlc_device *hdlc = dev_to_hdlc(dev); 1183 fr_proto_pvc pvc; 1184 int result; 1185 1186 switch (ifr->ifr_settings.type) { 1187 case IF_GET_PROTO: 1188 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */ 1189 return -EINVAL; 1190 ifr->ifr_settings.type = IF_PROTO_FR; 1191 if (ifr->ifr_settings.size < size) { 1192 ifr->ifr_settings.size = size; /* data size wanted */ 1193 return -ENOBUFS; 1194 } 1195 if (copy_to_user(fr_s, &state(hdlc)->settings, size)) 1196 return -EFAULT; 1197 return 0; 1198 1199 case IF_PROTO_FR: 1200 if (!capable(CAP_NET_ADMIN)) 1201 return -EPERM; 1202 1203 if (dev->flags & IFF_UP) 1204 return -EBUSY; 1205 1206 if (copy_from_user(&new_settings, fr_s, size)) 1207 return -EFAULT; 1208 1209 if (new_settings.lmi == LMI_DEFAULT) 1210 new_settings.lmi = LMI_ANSI; 1211 1212 if ((new_settings.lmi != LMI_NONE && 1213 new_settings.lmi != LMI_ANSI && 1214 new_settings.lmi != LMI_CCITT && 1215 new_settings.lmi != LMI_CISCO) || 1216 new_settings.t391 < 1 || 1217 new_settings.t392 < 2 || 1218 new_settings.n391 < 1 || 1219 new_settings.n392 < 1 || 1220 new_settings.n393 < new_settings.n392 || 1221 new_settings.n393 > 32 || 1222 (new_settings.dce != 0 && 1223 new_settings.dce != 1)) 1224 return -EINVAL; 1225 1226 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); 1227 if (result) 1228 return result; 1229 1230 if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */ 1231 result = attach_hdlc_protocol(dev, &proto, 1232 sizeof(struct frad_state)); 1233 if (result) 1234 return result; 1235 state(hdlc)->first_pvc = NULL; 1236 state(hdlc)->dce_pvc_count = 0; 1237 } 1238 memcpy(&state(hdlc)->settings, &new_settings, size); 1239 dev->type = ARPHRD_FRAD; 1240 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); 1241 return 0; 1242 1243 case IF_PROTO_FR_ADD_PVC: 1244 case IF_PROTO_FR_DEL_PVC: 1245 case IF_PROTO_FR_ADD_ETH_PVC: 1246 case IF_PROTO_FR_DEL_ETH_PVC: 1247 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */ 1248 return -EINVAL; 1249 1250 if (!capable(CAP_NET_ADMIN)) 1251 return -EPERM; 1252 1253 if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc, 1254 sizeof(fr_proto_pvc))) 1255 return -EFAULT; 1256 1257 if (pvc.dlci <= 0 || pvc.dlci >= 1024) 1258 return -EINVAL; /* Only 10 bits, DLCI 0 reserved */ 1259 1260 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC || 1261 ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC) 1262 result = ARPHRD_ETHER; /* bridged Ethernet device */ 1263 else 1264 result = ARPHRD_DLCI; 1265 1266 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC || 1267 ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC) 1268 return fr_add_pvc(dev, pvc.dlci, result); 1269 else 1270 return fr_del_pvc(hdlc, pvc.dlci, result); 1271 } 1272 1273 return -EINVAL; 1274 } 1275 1276 1277 static int __init mod_init(void) 1278 { 1279 register_hdlc_protocol(&proto); 1280 return 0; 1281 } 1282 1283 1284 static void __exit mod_exit(void) 1285 { 1286 unregister_hdlc_protocol(&proto); 1287 } 1288 1289 1290 module_init(mod_init); 1291 module_exit(mod_exit); 1292 1293 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 1294 MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC"); 1295 MODULE_LICENSE("GPL v2"); 1296