xref: /freebsd/usr.sbin/ppp/lqr.c (revision 8881d206f4e68b564c2c5f50fc717086fc3e827a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1996 - 2001 Brian Somers <brian@Awfulhak.org>
5  *          based on work by Toshiharu OHNO <tony-o@iij.ad.jp>
6  *                           Internet Initiative Japan, Inc (IIJ)
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 
33 #include <sys/param.h>
34 
35 #ifdef __FreeBSD__
36 #include <netinet/in.h>
37 #endif
38 #include <sys/un.h>
39 
40 #include <string.h>
41 #include <termios.h>
42 
43 #include "layer.h"
44 #include "mbuf.h"
45 #include "log.h"
46 #include "defs.h"
47 #include "timer.h"
48 #include "fsm.h"
49 #include "acf.h"
50 #include "proto.h"
51 #include "lqr.h"
52 #include "hdlc.h"
53 #include "lcp.h"
54 #include "async.h"
55 #include "throughput.h"
56 #include "ccp.h"
57 #include "link.h"
58 #include "descriptor.h"
59 #include "physical.h"
60 #include "mp.h"
61 #include "chat.h"
62 #include "auth.h"
63 #include "chap.h"
64 #include "command.h"
65 #include "cbcp.h"
66 #include "datalink.h"
67 
68 struct echolqr {
69   u_int32_t magic;
70   u_int32_t signature;
71   u_int32_t sequence;
72 };
73 
74 #define	SIGNATURE  0x594e4f54
75 
76 static void
77 SendEchoReq(struct lcp *lcp)
78 {
79   struct hdlc *hdlc = &link2physical(lcp->fsm.link)->hdlc;
80   struct echolqr echo;
81 
82   echo.magic = htonl(lcp->want_magic);
83   echo.signature = htonl(SIGNATURE);
84   echo.sequence = htonl(hdlc->lqm.echo.seq_sent);
85   fsm_Output(&lcp->fsm, CODE_ECHOREQ, hdlc->lqm.echo.seq_sent++,
86             (u_char *)&echo, sizeof echo, MB_ECHOOUT);
87 }
88 
89 struct mbuf *
90 lqr_RecvEcho(struct fsm *fp, struct mbuf *bp)
91 {
92   struct hdlc *hdlc = &link2physical(fp->link)->hdlc;
93   struct lcp *lcp = fsm2lcp(fp);
94   struct echolqr lqr;
95 
96   if (m_length(bp) >= sizeof lqr) {
97     m_freem(mbuf_Read(bp, &lqr, sizeof lqr));
98     bp = NULL;
99     lqr.magic = ntohl(lqr.magic);
100     lqr.signature = ntohl(lqr.signature);
101     lqr.sequence = ntohl(lqr.sequence);
102 
103     /* Tolerate echo replies with either magic number */
104     if (lqr.magic != 0 && lqr.magic != lcp->his_magic &&
105         lqr.magic != lcp->want_magic) {
106       log_Printf(LogWARN, "%s: lqr_RecvEcho: Bad magic: expected 0x%08x,"
107                  " got 0x%08x\n", fp->link->name, lcp->his_magic, lqr.magic);
108       /*
109        * XXX: We should send a terminate request. But poor implementations may
110        *      die as a result.
111        */
112     }
113     if (lqr.signature == SIGNATURE
114 	|| lqr.signature == lcp->want_magic) {			/* some implementations return the wrong magic */
115       /* careful not to update lqm.echo.seq_recv with older values */
116       if ((hdlc->lqm.echo.seq_recv > (u_int32_t)0 - 5 && lqr.sequence < 5) ||
117           (hdlc->lqm.echo.seq_recv <= (u_int32_t)0 - 5 &&
118            lqr.sequence > hdlc->lqm.echo.seq_recv))
119         hdlc->lqm.echo.seq_recv = lqr.sequence;
120     } else
121       log_Printf(LogWARN, "lqr_RecvEcho: Got sig 0x%08lx, not 0x%08lx !\n",
122                 (u_long)lqr.signature, (u_long)SIGNATURE);
123   } else
124     log_Printf(LogWARN, "lqr_RecvEcho: Got packet size %zd, expecting %ld !\n",
125               m_length(bp), (long)sizeof(struct echolqr));
126   return bp;
127 }
128 
129 void
130 lqr_ChangeOrder(struct lqrdata *src, struct lqrdata *dst)
131 {
132   u_int32_t *sp, *dp;
133   unsigned n;
134 
135   sp = (u_int32_t *) src;
136   dp = (u_int32_t *) dst;
137   for (n = 0; n < sizeof(struct lqrdata) / sizeof(u_int32_t); n++, sp++, dp++)
138     *dp = ntohl(*sp);
139 }
140 
141 static void
142 SendLqrData(struct lcp *lcp)
143 {
144   struct mbuf *bp;
145   int extra;
146 
147   extra = proto_WrapperOctets(lcp, PROTO_LQR) +
148           acf_WrapperOctets(lcp, PROTO_LQR);
149   bp = m_get(sizeof(struct lqrdata) + extra, MB_LQROUT);
150   bp->m_len -= extra;
151   bp->m_offset += extra;
152 
153   /*
154    * Send on the highest priority queue.  We send garbage - the real data
155    * is written by lqr_LayerPush() where we know how to fill in all the
156    * fields.  Note, lqr_LayerPush() ``knows'' that we're pushing onto the
157    * highest priority queue, and factors out packet & octet values from
158    * other queues!
159    */
160   link_PushPacket(lcp->fsm.link, bp, lcp->fsm.bundle,
161                   LINK_QUEUES(lcp->fsm.link) - 1, PROTO_LQR);
162 }
163 
164 static void
165 SendLqrReport(void *v)
166 {
167   struct lcp *lcp = (struct lcp *)v;
168   struct physical *p = link2physical(lcp->fsm.link);
169 
170   timer_Stop(&p->hdlc.lqm.timer);
171 
172   if (p->hdlc.lqm.method & LQM_LQR) {
173     if (p->hdlc.lqm.lqr.resent > 5) {
174       /* XXX: Should implement LQM strategy */
175       log_Printf(LogPHASE, "%s: ** Too many LQR packets lost **\n",
176                 lcp->fsm.link->name);
177       log_Printf(LogLQM, "%s: Too many LQR packets lost\n",
178                 lcp->fsm.link->name);
179       p->hdlc.lqm.method = 0;
180       datalink_Down(p->dl, CLOSE_NORMAL);
181     } else {
182       SendLqrData(lcp);
183       p->hdlc.lqm.lqr.resent++;
184     }
185   } else if (p->hdlc.lqm.method & LQM_ECHO) {
186     if ((p->hdlc.lqm.echo.seq_sent > 5 &&
187          p->hdlc.lqm.echo.seq_sent - 5 > p->hdlc.lqm.echo.seq_recv) ||
188         (p->hdlc.lqm.echo.seq_sent <= 5 &&
189          p->hdlc.lqm.echo.seq_sent > p->hdlc.lqm.echo.seq_recv + 5)) {
190       log_Printf(LogPHASE, "%s: ** Too many LCP ECHO packets lost **\n",
191                 lcp->fsm.link->name);
192       log_Printf(LogLQM, "%s: Too many LCP ECHO packets lost\n",
193                 lcp->fsm.link->name);
194       p->hdlc.lqm.method = 0;
195       datalink_Down(p->dl, CLOSE_NORMAL);
196     } else
197       SendEchoReq(lcp);
198   }
199   if (p->hdlc.lqm.method && p->hdlc.lqm.timer.load)
200     timer_Start(&p->hdlc.lqm.timer);
201 }
202 
203 struct mbuf *
204 lqr_Input(struct bundle *bundle __unused, struct link *l, struct mbuf *bp)
205 {
206   struct physical *p = link2physical(l);
207   struct lcp *lcp = p->hdlc.lqm.owner;
208   int len;
209 
210   if (p == NULL) {
211     log_Printf(LogERROR, "lqr_Input: Not a physical link - dropped\n");
212     m_freem(bp);
213     return NULL;
214   }
215 
216   len = m_length(bp);
217   if (len != sizeof(struct lqrdata))
218     log_Printf(LogWARN, "lqr_Input: Got packet size %d, expecting %ld !\n",
219               len, (long)sizeof(struct lqrdata));
220   else if (!IsAccepted(l->lcp.cfg.lqr) && !(p->hdlc.lqm.method & LQM_LQR)) {
221     bp = m_pullup(proto_Prepend(bp, PROTO_LQR, 0, 0));
222     lcp_SendProtoRej(lcp, MBUF_CTOP(bp), bp->m_len);
223   } else {
224     struct lqrdata *lqr;
225 
226     bp = m_pullup(bp);
227     lqr = (struct lqrdata *)MBUF_CTOP(bp);
228     if (ntohl(lqr->MagicNumber) != lcp->his_magic)
229       log_Printf(LogWARN, "lqr_Input: magic 0x%08lx is wrong,"
230                  " expecting 0x%08lx\n",
231 		 (u_long)ntohl(lqr->MagicNumber), (u_long)lcp->his_magic);
232     else {
233       struct lqrdata lastlqr;
234 
235       memcpy(&lastlqr, &p->hdlc.lqm.lqr.peer, sizeof lastlqr);
236       lqr_ChangeOrder(lqr, &p->hdlc.lqm.lqr.peer);
237       lqr_Dump(l->name, "Input", &p->hdlc.lqm.lqr.peer);
238       /* we have received an LQR from our peer */
239       p->hdlc.lqm.lqr.resent = 0;
240 
241       /* Snapshot our state when the LQR packet was received */
242       memcpy(&p->hdlc.lqm.lqr.prevSave, &p->hdlc.lqm.lqr.Save,
243              sizeof p->hdlc.lqm.lqr.prevSave);
244       p->hdlc.lqm.lqr.Save.InLQRs = ++p->hdlc.lqm.lqr.InLQRs;
245       p->hdlc.lqm.lqr.Save.InPackets = p->hdlc.lqm.ifInUniPackets;
246       p->hdlc.lqm.lqr.Save.InDiscards = p->hdlc.lqm.ifInDiscards;
247       p->hdlc.lqm.lqr.Save.InErrors = p->hdlc.lqm.ifInErrors;
248       p->hdlc.lqm.lqr.Save.InOctets = p->hdlc.lqm.lqr.InGoodOctets;
249 
250       lqr_Analyse(&p->hdlc, &lastlqr, &p->hdlc.lqm.lqr.peer);
251 
252       /*
253        * Generate an LQR response if we're not running an LQR timer OR
254        * two successive LQR's PeerInLQRs are the same.
255        */
256       if (p->hdlc.lqm.timer.load == 0 || !(p->hdlc.lqm.method & LQM_LQR) ||
257           (lastlqr.PeerInLQRs &&
258            lastlqr.PeerInLQRs == p->hdlc.lqm.lqr.peer.PeerInLQRs))
259         SendLqrData(lcp);
260     }
261   }
262   m_freem(bp);
263   return NULL;
264 }
265 
266 /*
267  *  When LCP is reached to opened state, We'll start LQM activity.
268  */
269 static void
270 lqr_Setup(struct lcp *lcp)
271 {
272   struct physical *physical = link2physical(lcp->fsm.link);
273   int period;
274 
275   physical->hdlc.lqm.lqr.resent = 0;
276   physical->hdlc.lqm.echo.seq_sent = 0;
277   physical->hdlc.lqm.echo.seq_recv = 0;
278   memset(&physical->hdlc.lqm.lqr.peer, '\0',
279          sizeof physical->hdlc.lqm.lqr.peer);
280 
281   physical->hdlc.lqm.method = lcp->cfg.echo ? LQM_ECHO : 0;
282   if (IsEnabled(lcp->cfg.lqr) && !REJECTED(lcp, TY_QUALPROTO))
283     physical->hdlc.lqm.method |= LQM_LQR;
284   timer_Stop(&physical->hdlc.lqm.timer);
285 
286   physical->hdlc.lqm.lqr.peer_timeout = lcp->his_lqrperiod;
287   if (lcp->his_lqrperiod)
288     log_Printf(LogLQM, "%s: Expecting LQR every %d.%02d secs\n",
289               physical->link.name, lcp->his_lqrperiod / 100,
290               lcp->his_lqrperiod % 100);
291 
292   period = lcp->want_lqrperiod ?
293     lcp->want_lqrperiod : lcp->cfg.lqrperiod * 100;
294   physical->hdlc.lqm.timer.func = SendLqrReport;
295   physical->hdlc.lqm.timer.name = "lqm";
296   physical->hdlc.lqm.timer.arg = lcp;
297 
298   if (lcp->want_lqrperiod || physical->hdlc.lqm.method & LQM_ECHO) {
299     log_Printf(LogLQM, "%s: Will send %s every %d.%02d secs\n",
300               physical->link.name, lcp->want_lqrperiod ? "LQR" : "LCP ECHO",
301               period / 100, period % 100);
302     physical->hdlc.lqm.timer.load = period * SECTICKS / 100;
303   } else {
304     physical->hdlc.lqm.timer.load = 0;
305     if (!lcp->his_lqrperiod)
306       log_Printf(LogLQM, "%s: LQR/LCP ECHO not negotiated\n",
307                  physical->link.name);
308   }
309 }
310 
311 void
312 lqr_Start(struct lcp *lcp)
313 {
314   struct physical *p = link2physical(lcp->fsm.link);
315 
316   lqr_Setup(lcp);
317   if (p->hdlc.lqm.timer.load)
318     SendLqrReport(lcp);
319 }
320 
321 void
322 lqr_reStart(struct lcp *lcp)
323 {
324   struct physical *p = link2physical(lcp->fsm.link);
325 
326   lqr_Setup(lcp);
327   if (p->hdlc.lqm.timer.load)
328     timer_Start(&p->hdlc.lqm.timer);
329 }
330 
331 void
332 lqr_StopTimer(struct physical *physical)
333 {
334   timer_Stop(&physical->hdlc.lqm.timer);
335 }
336 
337 void
338 lqr_Stop(struct physical *physical, int method)
339 {
340   if (method == LQM_LQR)
341     log_Printf(LogLQM, "%s: Stop sending LQR, Use LCP ECHO instead.\n",
342                physical->link.name);
343   if (method == LQM_ECHO)
344     log_Printf(LogLQM, "%s: Stop sending LCP ECHO.\n",
345                physical->link.name);
346   physical->hdlc.lqm.method &= ~method;
347   if (physical->hdlc.lqm.method)
348     SendLqrReport(physical->hdlc.lqm.owner);
349   else
350     timer_Stop(&physical->hdlc.lqm.timer);
351 }
352 
353 void
354 lqr_Dump(const char *link, const char *message, const struct lqrdata *lqr)
355 {
356   if (log_IsKept(LogLQM)) {
357     log_Printf(LogLQM, "%s: %s:\n", link, message);
358     log_Printf(LogLQM, "  Magic:          %08x   LastOutLQRs:    %08x\n",
359 	      lqr->MagicNumber, lqr->LastOutLQRs);
360     log_Printf(LogLQM, "  LastOutPackets: %08x   LastOutOctets:  %08x\n",
361 	      lqr->LastOutPackets, lqr->LastOutOctets);
362     log_Printf(LogLQM, "  PeerInLQRs:     %08x   PeerInPackets:  %08x\n",
363 	      lqr->PeerInLQRs, lqr->PeerInPackets);
364     log_Printf(LogLQM, "  PeerInDiscards: %08x   PeerInErrors:   %08x\n",
365 	      lqr->PeerInDiscards, lqr->PeerInErrors);
366     log_Printf(LogLQM, "  PeerInOctets:   %08x   PeerOutLQRs:    %08x\n",
367 	      lqr->PeerInOctets, lqr->PeerOutLQRs);
368     log_Printf(LogLQM, "  PeerOutPackets: %08x   PeerOutOctets:  %08x\n",
369 	      lqr->PeerOutPackets, lqr->PeerOutOctets);
370   }
371 }
372 
373 void
374 lqr_Analyse(const struct hdlc *hdlc, const struct lqrdata *oldlqr,
375             const struct lqrdata *newlqr)
376 {
377   u_int32_t LQRs, transitLQRs, pkts, octets, disc, err;
378 
379   if (!newlqr->PeerInLQRs)	/* No analysis possible yet! */
380     return;
381 
382   log_Printf(LogLQM, "Analysis:\n");
383 
384   LQRs = (newlqr->LastOutLQRs - oldlqr->LastOutLQRs) -
385          (newlqr->PeerInLQRs - oldlqr->PeerInLQRs);
386   transitLQRs = hdlc->lqm.lqr.OutLQRs - newlqr->LastOutLQRs;
387   pkts = (newlqr->LastOutPackets - oldlqr->LastOutPackets) -
388          (newlqr->PeerInPackets - oldlqr->PeerInPackets);
389   octets = (newlqr->LastOutOctets - oldlqr->LastOutOctets) -
390            (newlqr->PeerInOctets - oldlqr->PeerInOctets);
391   log_Printf(LogLQM, "  Outbound lossage: %d LQR%s (%d en route), %d packet%s,"
392              " %d octet%s\n", (int)LQRs, LQRs == 1 ? "" : "s", (int)transitLQRs,
393 	     (int)pkts, pkts == 1 ? "" : "s",
394 	     (int)octets, octets == 1 ? "" : "s");
395 
396   pkts = (newlqr->PeerOutPackets - oldlqr->PeerOutPackets) -
397     (hdlc->lqm.lqr.Save.InPackets - hdlc->lqm.lqr.prevSave.InPackets);
398   octets = (newlqr->PeerOutOctets - oldlqr->PeerOutOctets) -
399     (hdlc->lqm.lqr.Save.InOctets - hdlc->lqm.lqr.prevSave.InOctets);
400   log_Printf(LogLQM, "  Inbound lossage: %d packet%s, %d octet%s\n",
401 	     (int)pkts, pkts == 1 ? "" : "s",
402 	     (int)octets, octets == 1 ? "" : "s");
403 
404   disc = newlqr->PeerInDiscards - oldlqr->PeerInDiscards;
405   err = newlqr->PeerInErrors - oldlqr->PeerInErrors;
406   if (disc && err)
407     log_Printf(LogLQM, "                   Likely due to both peer congestion"
408                " and physical errors\n");
409   else if (disc)
410     log_Printf(LogLQM, "                   Likely due to peer congestion\n");
411   else if (err)
412     log_Printf(LogLQM, "                   Likely due to physical errors\n");
413   else if (pkts)
414     log_Printf(LogLQM, "                   Likely due to transport "
415 	       "congestion\n");
416 }
417 
418 static struct mbuf *
419 lqr_LayerPush(struct bundle *b __unused, struct link *l, struct mbuf *bp,
420               int pri __unused, u_short *proto)
421 {
422   struct physical *p = link2physical(l);
423   int len, layer;
424 
425   if (!p) {
426     /* Oops - can't happen :-] */
427     m_freem(bp);
428     return NULL;
429   }
430 
431   bp = m_pullup(bp);
432   len = m_length(bp);
433 
434   /*-
435    * From rfc1989:
436    *
437    *  All octets which are included in the FCS calculation MUST be counted,
438    *  including the packet header, the information field, and any padding.
439    *  The FCS octets MUST also be counted, and one flag octet per frame
440    *  MUST be counted.  All other octets (such as additional flag
441    *  sequences, and escape bits or octets) MUST NOT be counted.
442    *
443    * As we're stacked higher than the HDLC layer (otherwise HDLC wouldn't be
444    * able to calculate the FCS), we must not forget about these additional
445    * bytes when we're asynchronous.
446    *
447    * We're also expecting to be stacked *before* the likes of the proto and
448    * acf layers (to avoid alignment issues), so deal with this too.
449    */
450 
451   p->hdlc.lqm.ifOutUniPackets++;
452   p->hdlc.lqm.ifOutOctets += len + 1;		/* plus 1 flag octet! */
453   for (layer = 0; layer < l->nlayers; layer++)
454     switch (l->layer[layer]->type) {
455       case LAYER_ACF:
456         p->hdlc.lqm.ifOutOctets += acf_WrapperOctets(&l->lcp, *proto);
457         break;
458       case LAYER_ASYNC:
459         /* Not included - see rfc1989 */
460         break;
461       case LAYER_HDLC:
462         p->hdlc.lqm.ifOutOctets += hdlc_WrapperOctets();
463         break;
464       case LAYER_LQR:
465         layer = l->nlayers;
466         break;
467       case LAYER_PROTO:
468         p->hdlc.lqm.ifOutOctets += proto_WrapperOctets(&l->lcp, *proto);
469         break;
470       case LAYER_SYNC:
471         /* Nothing to add on */
472         break;
473       default:
474         log_Printf(LogWARN, "Oops, don't know how to do octets for %s layer\n",
475                    l->layer[layer]->name);
476         break;
477     }
478 
479   if (*proto == PROTO_LQR) {
480     /* Overwrite the entire packet (created in SendLqrData()) */
481     struct lqrdata lqr;
482     size_t pending_pkts, pending_octets;
483 
484     p->hdlc.lqm.lqr.OutLQRs++;
485 
486     /*
487      * We need to compensate for the fact that we're pushing our data
488      * onto the highest priority queue by factoring out packet & octet
489      * values from other queues!
490      */
491     link_PendingLowPriorityData(l, &pending_pkts, &pending_octets);
492 
493     memset(&lqr, '\0', sizeof lqr);
494     lqr.MagicNumber = p->link.lcp.want_magic;
495     lqr.LastOutLQRs = p->hdlc.lqm.lqr.peer.PeerOutLQRs;
496     lqr.LastOutPackets = p->hdlc.lqm.lqr.peer.PeerOutPackets;
497     lqr.LastOutOctets = p->hdlc.lqm.lqr.peer.PeerOutOctets;
498     lqr.PeerInLQRs = p->hdlc.lqm.lqr.Save.InLQRs;
499     lqr.PeerInPackets = p->hdlc.lqm.lqr.Save.InPackets;
500     lqr.PeerInDiscards = p->hdlc.lqm.lqr.Save.InDiscards;
501     lqr.PeerInErrors = p->hdlc.lqm.lqr.Save.InErrors;
502     lqr.PeerInOctets = p->hdlc.lqm.lqr.Save.InOctets;
503     lqr.PeerOutLQRs = p->hdlc.lqm.lqr.OutLQRs;
504     lqr.PeerOutPackets = p->hdlc.lqm.ifOutUniPackets - pending_pkts;
505     /* Don't forget our ``flag'' octets.... */
506     lqr.PeerOutOctets = p->hdlc.lqm.ifOutOctets - pending_octets - pending_pkts;
507     lqr_Dump(l->name, "Output", &lqr);
508     lqr_ChangeOrder(&lqr, (struct lqrdata *)MBUF_CTOP(bp));
509   }
510 
511   return bp;
512 }
513 
514 static struct mbuf *
515 lqr_LayerPull(struct bundle *b __unused, struct link *l __unused,
516 	      struct mbuf *bp, u_short *proto)
517 {
518   /*
519    * This is the ``Rx'' process from rfc1989, although a part of it is
520    * actually performed by sync_LayerPull() & hdlc_LayerPull() so that
521    * our octet counts are correct.
522    */
523 
524   if (*proto == PROTO_LQR)
525     m_settype(bp, MB_LQRIN);
526   return bp;
527 }
528 
529 /*
530  * Statistics for pulled packets are recorded either in hdlc_PullPacket()
531  * or sync_PullPacket()
532  */
533 
534 struct layer lqrlayer = { LAYER_LQR, "lqr", lqr_LayerPush, lqr_LayerPull };
535