xref: /freebsd/contrib/ntp/ntpd/ntp_refclock.c (revision aa1a8ff2d6dbc51ef058f46f3db5a8bb77967145)
1 /*
2  * ntp_refclock - processing support for reference clocks
3  */
4 #ifdef HAVE_CONFIG_H
5 # include <config.h>
6 #endif
7 
8 #include "ntpd.h"
9 #include "ntp_io.h"
10 #include "ntp_unixtime.h"
11 #include "ntp_tty.h"
12 #include "ntp_refclock.h"
13 #include "ntp_clockdev.h"
14 #include "ntp_stdlib.h"
15 #include "ntp_assert.h"
16 #include "timespecops.h"
17 
18 #include <stdio.h>
19 
20 #ifdef HAVE_SYS_IOCTL_H
21 # include <sys/ioctl.h>
22 #endif /* HAVE_SYS_IOCTL_H */
23 
24 #ifdef REFCLOCK
25 
26 #ifdef KERNEL_PLL
27 #include "ntp_syscall.h"
28 #endif /* KERNEL_PLL */
29 
30 #ifdef HAVE_PPSAPI
31 #include "ppsapi_timepps.h"
32 #include "refclock_atom.h"
33 #endif /* HAVE_PPSAPI */
34 
35 /*
36  * Reference clock support is provided here by maintaining the fiction
37  * that the clock is actually a peer.  As no packets are exchanged with
38  * a reference clock, however, we replace the transmit, receive and
39  * packet procedures with separate code to simulate them.  Routines
40  * refclock_transmit() and refclock_receive() maintain the peer
41  * variables in a state analogous to an actual peer and pass reference
42  * clock data on through the filters.  Routines refclock_peer() and
43  * refclock_unpeer() are called to initialize and terminate reference
44  * clock associations.  A set of utility routines is included to open
45  * serial devices, process sample data, and to perform various debugging
46  * functions.
47  *
48  * The main interface used by these routines is the refclockproc
49  * structure, which contains for most drivers the decimal equivalants
50  * of the year, day, month, hour, second and millisecond/microsecond
51  * decoded from the ASCII timecode.  Additional information includes
52  * the receive timestamp, exception report, statistics tallies, etc.
53  * In addition, there may be a driver-specific unit structure used for
54  * local control of the device.
55  *
56  * The support routines are passed a pointer to the peer structure,
57  * which is used for all peer-specific processing and contains a
58  * pointer to the refclockproc structure, which in turn contains a
59  * pointer to the unit structure, if used.  The peer structure is
60  * identified by an interface address in the dotted quad form
61  * 127.127.t.u, where t is the clock type and u the unit.
62  */
63 #define FUDGEFAC	.1	/* fudge correction factor */
64 #define LF		0x0a	/* ASCII LF */
65 
66 int	cal_enable;		/* enable refclock calibrate */
67 
68 /*
69  * Forward declarations
70  */
71 static int  refclock_cmpl_fp (const void *, const void *);
72 static int  refclock_sample (struct refclockproc *);
73 static int  refclock_ioctl(int, u_int);
74 static void refclock_checkburst(struct peer *, struct refclockproc *);
75 static int  symBaud2numBaud(int symBaud);
76 static int  numBaud2symBaud(int numBaud);
77 
78 /* circular buffer functions
79  *
80  * circular buffer management comes in two flovours:
81  * for powers of two, and all others.
82  */
83 
84 #if MAXSTAGE & (MAXSTAGE - 1)
85 
86 static void clk_add_sample(
87 	struct refclockproc * const	pp,
88 	double				sv
89 	)
90 {
91 	pp->coderecv = (pp->coderecv + 1) % MAXSTAGE;
92 	if (pp->coderecv == pp->codeproc)
93 		pp->codeproc = (pp->codeproc + 1) % MAXSTAGE;
94 	pp->filter[pp->coderecv] = sv;
95 }
96 
97 static double clk_pop_sample(
98 	struct refclockproc * const	pp
99 	)
100 {
101 	if (pp->coderecv == pp->codeproc)
102 		return 0; /* Maybe a NaN would be better? */
103 	pp->codeproc = (pp->codeproc + 1) % MAXSTAGE;
104 	return pp->filter[pp->codeproc];
105 }
106 
107 static inline u_int clk_cnt_sample(
108 	struct refclockproc * const	pp
109 	)
110 {
111 	u_int retv = pp->coderecv - pp->codeproc;
112 	if (retv > MAXSTAGE)
113 		retv += MAXSTAGE;
114 	return retv;
115 }
116 
117 #else
118 
119 static inline void clk_add_sample(
120 	struct refclockproc * const	pp,
121 	double				sv
122 	)
123 {
124 	pp->coderecv  = (pp->coderecv + 1) & (MAXSTAGE - 1);
125 	if (pp->coderecv == pp->codeproc)
126 		pp->codeproc = (pp->codeproc + 1) & (MAXSTAGE - 1);
127 	pp->filter[pp->coderecv] = sv;
128 }
129 
130 static inline double clk_pop_sample(
131 	struct refclockproc * const	pp
132 	)
133 {
134 	if (pp->coderecv == pp->codeproc)
135 		return 0; /* Maybe a NaN would be better? */
136 	pp->codeproc = (pp->codeproc + 1) & (MAXSTAGE - 1);
137 	return pp->filter[pp->codeproc];
138 }
139 
140 static inline u_int clk_cnt_sample(
141 	struct refclockproc * const	pp
142 	)
143 {
144 	return (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1);
145 }
146 
147 #endif
148 
149 /*
150  * refclock_report - note the occurance of an event
151  *
152  * This routine presently just remembers the report and logs it, but
153  * does nothing heroic for the trap handler. It tries to be a good
154  * citizen and bothers the system log only if things change.
155  */
156 void
157 refclock_report(
158 	struct peer *peer,
159 	int code
160 	)
161 {
162 	struct refclockproc *pp;
163 
164 	pp = peer->procptr;
165 	if (pp == NULL)
166 		return;
167 
168 	switch (code) {
169 
170 	case CEVNT_TIMEOUT:
171 		pp->noreply++;
172 		break;
173 
174 	case CEVNT_BADREPLY:
175 		pp->badformat++;
176 		break;
177 
178 	case CEVNT_FAULT:
179 		break;
180 
181 	case CEVNT_BADDATE:
182 	case CEVNT_BADTIME:
183 		pp->baddata++;
184 		break;
185 
186 	default:
187 		/* ignore others */
188 		break;
189 	}
190 	if ((code != CEVNT_NOMINAL) && (pp->lastevent < 15))
191 		pp->lastevent++;
192 	if (pp->currentstatus != code) {
193 		pp->currentstatus = (u_char)code;
194 		report_event(PEVNT_CLOCK, peer, ceventstr(code));
195 	}
196 }
197 
198 
199 /*
200  * init_refclock - initialize the reference clock drivers
201  *
202  * This routine calls each of the drivers in turn to initialize internal
203  * variables, if necessary. Most drivers have nothing to say at this
204  * point.
205  */
206 void
207 init_refclock(void)
208 {
209 	int i;
210 
211 	for (i = 0; i < (int)num_refclock_conf; i++)
212 		if (refclock_conf[i]->clock_init != noentry)
213 			(refclock_conf[i]->clock_init)();
214 }
215 
216 
217 /*
218  * refclock_newpeer - initialize and start a reference clock
219  *
220  * This routine allocates and initializes the interface structure which
221  * supports a reference clock in the form of an ordinary NTP peer. A
222  * driver-specific support routine completes the initialization, if
223  * used. Default peer variables which identify the clock and establish
224  * its reference ID and stratum are set here. It returns one if success
225  * and zero if the clock address is invalid or already running,
226  * insufficient resources are available or the driver declares a bum
227  * rap.
228  */
229 int
230 refclock_newpeer(
231 	struct peer *peer	/* peer structure pointer */
232 	)
233 {
234 	struct refclockproc *pp;
235 	u_char clktype;
236 	int unit;
237 
238 	/*
239 	 * Check for valid clock address. If already running, shut it
240 	 * down first.
241 	 */
242 	if (!ISREFCLOCKADR(&peer->srcadr)) {
243 		msyslog(LOG_ERR,
244 			"refclock_newpeer: clock address %s invalid",
245 			stoa(&peer->srcadr));
246 		return (0);
247 	}
248 	clktype = (u_char)REFCLOCKTYPE(&peer->srcadr);
249 	unit = REFCLOCKUNIT(&peer->srcadr);
250 	if (clktype >= num_refclock_conf ||
251 		refclock_conf[clktype]->clock_start == noentry) {
252 		msyslog(LOG_ERR,
253 			"refclock_newpeer: clock type %d invalid\n",
254 			clktype);
255 		return (0);
256 	}
257 
258 	/*
259 	 * Allocate and initialize interface structure
260 	 */
261 	pp = emalloc_zero(sizeof(*pp));
262 	peer->procptr = pp;
263 
264 	/*
265 	 * Initialize structures
266 	 */
267 	peer->refclktype = clktype;
268 	peer->refclkunit = (u_char)unit;
269 	peer->flags |= FLAG_REFCLOCK;
270 	peer->leap = LEAP_NOTINSYNC;
271 	peer->stratum = STRATUM_REFCLOCK;
272 	peer->ppoll = peer->maxpoll;
273 	pp->type = clktype;
274 	pp->conf = refclock_conf[clktype];
275 	pp->timestarted = current_time;
276 	pp->io.fd = -1;
277 
278 	/*
279 	 * Set peer.pmode based on the hmode. For appearances only.
280 	 */
281 	switch (peer->hmode) {
282 	case MODE_ACTIVE:
283 		peer->pmode = MODE_PASSIVE;
284 		break;
285 
286 	default:
287 		peer->pmode = MODE_SERVER;
288 		break;
289 	}
290 
291 	/*
292 	 * Do driver dependent initialization. The above defaults
293 	 * can be wiggled, then finish up for consistency.
294 	 */
295 	if (!((refclock_conf[clktype]->clock_start)(unit, peer))) {
296 		refclock_unpeer(peer);
297 		return (0);
298 	}
299 	peer->refid = pp->refid;
300 	return (1);
301 }
302 
303 
304 /*
305  * refclock_unpeer - shut down a clock
306  */
307 void
308 refclock_unpeer(
309 	struct peer *peer	/* peer structure pointer */
310 	)
311 {
312 	u_char clktype;
313 	int unit;
314 
315 	/*
316 	 * Wiggle the driver to release its resources, then give back
317 	 * the interface structure.
318 	 */
319 	if (NULL == peer->procptr)
320 		return;
321 
322 	clktype = peer->refclktype;
323 	unit = peer->refclkunit;
324 	if (refclock_conf[clktype]->clock_shutdown != noentry)
325 		(refclock_conf[clktype]->clock_shutdown)(unit, peer);
326 	free(peer->procptr);
327 	peer->procptr = NULL;
328 }
329 
330 
331 /*
332  * refclock_timer - called once per second for housekeeping.
333  */
334 void
335 refclock_timer(
336 	struct peer *p
337 	)
338 {
339 	struct refclockproc *	pp;
340 	int			unit;
341 
342 	unit = p->refclkunit;
343 	pp = p->procptr;
344 	if (pp->conf->clock_timer != noentry)
345 		(*pp->conf->clock_timer)(unit, p);
346 	if (pp->action != NULL && pp->nextaction <= current_time)
347 		(*pp->action)(p);
348 }
349 
350 
351 /*
352  * refclock_transmit - simulate the transmit procedure
353  *
354  * This routine implements the NTP transmit procedure for a reference
355  * clock. This provides a mechanism to call the driver at the NTP poll
356  * interval, as well as provides a reachability mechanism to detect a
357  * broken radio or other madness.
358  */
359 void
360 refclock_transmit(
361 	struct peer *peer	/* peer structure pointer */
362 	)
363 {
364 	u_char clktype;
365 	int unit;
366 
367 	clktype = peer->refclktype;
368 	unit = peer->refclkunit;
369 	peer->sent++;
370 	get_systime(&peer->xmt);
371 
372 	/*
373 	 * This is a ripoff of the peer transmit routine, but
374 	 * specialized for reference clocks. We do a little less
375 	 * protocol here and call the driver-specific transmit routine.
376 	 */
377 	if (peer->burst == 0) {
378 		u_char oreach;
379 #ifdef DEBUG
380 		if (debug)
381 			printf("refclock_transmit: at %ld %s\n",
382 			    current_time, stoa(&(peer->srcadr)));
383 #endif
384 
385 		/*
386 		 * Update reachability and poll variables like the
387 		 * network code.
388 		 */
389 		oreach = peer->reach & 0xfe;
390 		peer->reach <<= 1;
391 		if (!(peer->reach & 0x0f))
392 			clock_filter(peer, 0., 0., MAXDISPERSE);
393 		peer->outdate = current_time;
394 		if (!peer->reach) {
395 			if (oreach) {
396 				report_event(PEVNT_UNREACH, peer, NULL);
397 				peer->timereachable = current_time;
398 			}
399 		} else {
400 			if (peer->flags & FLAG_BURST)
401 				peer->burst = NSTAGE;
402 		}
403 	} else {
404 		peer->burst--;
405 	}
406 	peer->procptr->inpoll = TRUE;
407 	if (refclock_conf[clktype]->clock_poll != noentry)
408 		(refclock_conf[clktype]->clock_poll)(unit, peer);
409 	poll_update(peer, peer->hpoll, 0);
410 }
411 
412 
413 /*
414  * Compare two doubles - used with qsort()
415  */
416 static int
417 refclock_cmpl_fp(
418 	const void *p1,
419 	const void *p2
420 	)
421 {
422 	const double *dp1 = (const double *)p1;
423 	const double *dp2 = (const double *)p2;
424 
425 	if (*dp1 < *dp2)
426 		return -1;
427 	if (*dp1 > *dp2)
428 		return 1;
429 	return 0;
430 }
431 
432 /*
433  * Get number of available samples
434  */
435 int
436 refclock_samples_avail(
437 	struct refclockproc const * pp
438 	)
439 {
440 	u_int	na;
441 
442 #   if MAXSTAGE & (MAXSTAGE - 1)
443 
444 	na = pp->coderecv - pp->codeproc;
445 	if (na > MAXSTAGE)
446 		na += MAXSTAGE;
447 
448 #   else
449 
450 	na = (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1);
451 
452 #   endif
453 	return na;
454 }
455 
456 /*
457  * Expire (remove) samples from the tail (oldest samples removed)
458  *
459  * Returns number of samples deleted
460  */
461 int
462 refclock_samples_expire(
463 	struct refclockproc * pp,
464 	int                   nd
465 	)
466 {
467 	u_int	na;
468 
469 	if (nd <= 0)
470 		return 0;
471 
472 #   if MAXSTAGE & (MAXSTAGE - 1)
473 
474 	na = pp->coderecv - pp->codeproc;
475 	if (na > MAXSTAGE)
476 		na += MAXSTAGE;
477 	if ((u_int)nd < na)
478 		nd = na;
479 	pp->codeproc = (pp->codeproc + nd) % MAXSTAGE;
480 
481 #   else
482 
483 	na = (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1);
484 	if ((u_int)nd > na)
485 		nd = (int)na;
486 	pp->codeproc = (pp->codeproc + nd) & (MAXSTAGE - 1);
487 
488 #   endif
489 	return nd;
490 }
491 
492 /*
493  * refclock_process_offset - update median filter
494  *
495  * This routine uses the given offset and timestamps to construct a new
496  * entry in the median filter circular buffer. Samples that overflow the
497  * filter are quietly discarded.
498  */
499 void
500 refclock_process_offset(
501 	struct refclockproc *pp,	/* refclock structure pointer */
502 	l_fp lasttim,			/* last timecode timestamp */
503 	l_fp lastrec,			/* last receive timestamp */
504 	double fudge
505 	)
506 {
507 	l_fp lftemp;
508 	double doffset;
509 
510 	pp->lastrec = lastrec;
511 	lftemp = lasttim;
512 	L_SUB(&lftemp, &lastrec);
513 	LFPTOD(&lftemp, doffset);
514 	clk_add_sample(pp, doffset + fudge);
515 	refclock_checkburst(pp->io.srcclock, pp);
516 }
517 
518 
519 /*
520  * refclock_process - process a sample from the clock
521  * refclock_process_f - refclock_process with other than time1 fudge
522  *
523  * This routine converts the timecode in the form days, hours, minutes,
524  * seconds and milliseconds/microseconds to internal timestamp format,
525  * then constructs a new entry in the median filter circular buffer.
526  * Return success (1) if the data are correct and consistent with the
527  * conventional calendar.
528  *
529  * Important for PPS users: Normally, the pp->lastrec is set to the
530  * system time when the on-time character is received and the pp->year,
531  * ..., pp->second decoded and the seconds fraction pp->nsec in
532  * nanoseconds). When a PPS offset is available, pp->nsec is forced to
533  * zero and the fraction for pp->lastrec is set to the PPS offset.
534  */
535 int
536 refclock_process_f(
537 	struct refclockproc *pp,	/* refclock structure pointer */
538 	double fudge
539 	)
540 {
541 	l_fp offset, ltemp;
542 
543 	/*
544 	 * Compute the timecode timestamp from the days, hours, minutes,
545 	 * seconds and milliseconds/microseconds of the timecode. Use
546 	 * clocktime() for the aggregate seconds and the msec/usec for
547 	 * the fraction, when present. Note that this code relies on the
548 	 * file system time for the years and does not use the years of
549 	 * the timecode.
550 	 */
551 	if (!clocktime(pp->day, pp->hour, pp->minute, pp->second, GMT,
552 		pp->lastrec.l_ui, &pp->yearstart, &offset.l_ui))
553 		return (0);
554 
555 	offset.l_uf = 0;
556 	DTOLFP(pp->nsec / 1e9, &ltemp);
557 	L_ADD(&offset, &ltemp);
558 	refclock_process_offset(pp, offset, pp->lastrec, fudge);
559 	return (1);
560 }
561 
562 
563 int
564 refclock_process(
565 	struct refclockproc *pp		/* refclock structure pointer */
566 )
567 {
568 	return refclock_process_f(pp, pp->fudgetime1);
569 }
570 
571 
572 /*
573  * refclock_sample - process a pile of samples from the clock
574  *
575  * This routine implements a recursive median filter to suppress spikes
576  * in the data, as well as determine a performance statistic. It
577  * calculates the mean offset and RMS jitter. A time adjustment
578  * fudgetime1 can be added to the final offset to compensate for various
579  * systematic errors. The routine returns the number of samples
580  * processed, which could be zero.
581  */
582 static int
583 refclock_sample(
584 	struct refclockproc *pp		/* refclock structure pointer */
585 	)
586 {
587 	size_t	i, j, k, m, n;
588 	double	off[MAXSTAGE];
589 
590 	/*
591 	 * Copy the raw offsets and sort into ascending order. Don't do
592 	 * anything if the buffer is empty.
593 	 */
594 	n = 0;
595 	while (pp->codeproc != pp->coderecv)
596 		off[n++] = clk_pop_sample(pp);
597 	if (n == 0)
598 		return (0);
599 
600 	if (n > 1)
601 		qsort(off, n, sizeof(off[0]), refclock_cmpl_fp);
602 
603 	/*
604 	 * Reject the furthest from the median of the samples until
605 	 * approximately 60 percent of the samples remain.
606 	 *
607 	 * [Bug 3672] The elimination is now based on the proper
608 	 * definition of the median. The true median is not calculated
609 	 * directly, though.
610 	 */
611 	i = 0; j = n;
612 	m = n - (n * 4) / 10;
613 	while ((k = j - i) > m) {
614 		k = (k - 1) >> 1;
615 		if ((off[j - 1] - off[j - k - 1]) < (off[i + k] - off[i]))
616 			i++;	/* reject low end */
617 		else
618 			j--;	/* reject high end */
619 	}
620 
621 	/*
622 	 * Determine the offset and jitter.
623 	 */
624 	pp->offset = off[i];
625 	pp->jitter = 0;
626 	for (k = i + 1; k < j; k++) {
627 		pp->offset += off[k];
628 		pp->jitter += SQUARE(off[k] - off[k - 1]);
629 	}
630 	pp->offset /= m;
631 	m -= (m > 1);	/* only (m-1) terms attribute to jitter! */
632 	pp->jitter = max(SQRT(pp->jitter / m), LOGTOD(sys_precision));
633 
634 	/*
635 	 * If the source has a jitter that cannot be estimated, because
636 	 * it is not statistic jitter, the source will be detected as
637 	 * falseticker sooner or later.  Enforcing a minimal jitter value
638 	 * avoids a too low estimation while still detecting higher jitter.
639 	 *
640 	 * Note that this changes the refclock samples and ends up in the
641 	 * clock dispersion, not the clock jitter, despite being called
642 	 * jitter.  To see the modified values, check the NTP clock variable
643 	 * "filtdisp", not "jitter".
644 	 */
645 	pp->jitter = max(pp->jitter, pp->fudgeminjitter);
646 
647 #ifdef DEBUG
648 	if (debug)
649 		printf(
650 		    "refclock_sample: n %d offset %.6f disp %.6f jitter %.6f\n",
651 		    (int)n, pp->offset, pp->disp, pp->jitter);
652 #endif
653 	return (int)n;
654 }
655 
656 
657 /*
658  * refclock_receive - simulate the receive and packet procedures
659  *
660  * This routine simulates the NTP receive and packet procedures for a
661  * reference clock. This provides a mechanism in which the ordinary NTP
662  * filter, selection and combining algorithms can be used to suppress
663  * misbehaving radios and to mitigate between them when more than one is
664  * available for backup.
665  */
666 void
667 refclock_receive(
668 	struct peer *peer	/* peer structure pointer */
669 	)
670 {
671 	struct refclockproc *pp;
672 
673 #ifdef DEBUG
674 	if (debug)
675 		printf("refclock_receive: at %lu %s\n",
676 		    current_time, stoa(&peer->srcadr));
677 #endif
678 
679 	/*
680 	 * Do a little sanity dance and update the peer structure. Groom
681 	 * the median filter samples and give the data to the clock
682 	 * filter.
683 	 */
684 	pp = peer->procptr;
685 	pp->inpoll = FALSE;
686 	peer->leap = pp->leap;
687 	if (peer->leap == LEAP_NOTINSYNC)
688 		return;
689 
690 	peer->received++;
691 	peer->timereceived = current_time;
692 	if (!peer->reach) {
693 		report_event(PEVNT_REACH, peer, NULL);
694 		peer->timereachable = current_time;
695 	}
696 	peer->reach = (peer->reach << (peer->reach & 1)) | 1;
697 	peer->reftime = pp->lastref;
698 	peer->aorg = pp->lastrec;
699 	peer->rootdisp = pp->disp;
700 	get_systime(&peer->dst);
701 	if (!refclock_sample(pp))
702 		return;
703 
704 	clock_filter(peer, pp->offset, 0., pp->jitter);
705 	if (cal_enable && fabs(last_offset) < sys_mindisp && sys_peer !=
706 	    NULL) {
707 		if (sys_peer->refclktype == REFCLK_ATOM_PPS &&
708 		    peer->refclktype != REFCLK_ATOM_PPS)
709 			pp->fudgetime1 -= pp->offset * FUDGEFAC;
710 	}
711 }
712 
713 
714 /*
715  * refclock_gtlin - groom next input line and extract timestamp
716  *
717  * This routine processes the timecode received from the clock and
718  * strips the parity bit and control characters. It returns the number
719  * of characters in the line followed by a NULL character ('\0'), which
720  * is not included in the count. In case of an empty line, the previous
721  * line is preserved.
722  */
723 int
724 refclock_gtlin(
725 	struct recvbuf *rbufp,	/* receive buffer pointer */
726 	char	*lineptr,	/* current line pointer */
727 	int	bmax,		/* remaining characters in line */
728 	l_fp	*tsptr		/* pointer to timestamp returned */
729 	)
730 {
731 	const char *sp, *spend;
732 	char	   *dp, *dpend;
733 	int         dlen;
734 
735 	if (bmax <= 0)
736 		return (0);
737 
738 	dp    = lineptr;
739 	dpend = dp + bmax - 1; /* leave room for NUL pad */
740 	sp    = (const char *)rbufp->recv_buffer;
741 	spend = sp + rbufp->recv_length;
742 
743 	while (sp != spend && dp != dpend) {
744 		char c;
745 
746 		c = *sp++ & 0x7f;
747 		if (c >= 0x20 && c < 0x7f)
748 			*dp++ = c;
749 	}
750 	/* Get length of data written to the destination buffer. If
751 	 * zero, do *not* place a NUL byte to preserve the previous
752 	 * buffer content.
753 	 */
754 	dlen = dp - lineptr;
755 	if (dlen)
756 	    *dp  = '\0';
757 	*tsptr = rbufp->recv_time;
758 	DPRINTF(2, ("refclock_gtlin: fd %d time %s timecode %d %s\n",
759 		    rbufp->fd, ulfptoa(&rbufp->recv_time, 6), dlen,
760 		    (dlen != 0)
761 			? lineptr
762 			: ""));
763 	return (dlen);
764 }
765 
766 
767 /*
768  * refclock_gtraw - get next line/chunk of data
769  *
770  * This routine returns the raw data received from the clock in both
771  * canonical or raw modes. The terminal interface routines map CR to LF.
772  * In canonical mode this results in two lines, one containing data
773  * followed by LF and another containing only LF. In raw mode the
774  * interface routines can deliver arbitraty chunks of data from one
775  * character to a maximum specified by the calling routine. In either
776  * mode the routine returns the number of characters in the line
777  * followed by a NULL character ('\0'), which is not included in the
778  * count.
779  *
780  * *tsptr receives a copy of the buffer timestamp.
781  */
782 int
783 refclock_gtraw(
784 	struct recvbuf *rbufp,	/* receive buffer pointer */
785 	char	*lineptr,	/* current line pointer */
786 	int	bmax,		/* remaining characters in line */
787 	l_fp	*tsptr		/* pointer to timestamp returned */
788 	)
789 {
790 	if (bmax <= 0)
791 		return (0);
792 	bmax -= 1; /* leave room for trailing NUL */
793 	if (bmax > rbufp->recv_length)
794 		bmax = rbufp->recv_length;
795 	memcpy(lineptr, rbufp->recv_buffer, bmax);
796 	lineptr[bmax] = '\0';
797 
798 	*tsptr = rbufp->recv_time;
799 	DPRINTF(2, ("refclock_gtraw: fd %d time %s timecode %d %s\n",
800 		    rbufp->fd, ulfptoa(&rbufp->recv_time, 6), bmax,
801 		    lineptr));
802 	return (bmax);
803 }
804 
805 /*
806  * refclock_fdwrite()
807  *
808  * Write data to a clock device. Does the necessary result checks and
809  * logging, and encapsulates OS dependencies.
810  */
811 #ifdef SYS_WINNT
812 extern int async_write(int fd, const void * buf, unsigned int len);
813 #endif
814 
815 size_t
816 refclock_fdwrite(
817 	const struct peer *	peer,
818 	int			fd,
819 	const void *		buf,
820 	size_t			len,
821 	const char *		what
822 	)
823 {
824 	size_t	nret, nout;
825 	int	nerr;
826 
827 	nout = (INT_MAX > len) ? len : INT_MAX;
828 #   ifdef SYS_WINNT
829 	nret = (size_t)async_write(fd, buf, (unsigned int)nout);
830 #   else
831 	nret = (size_t)write(fd, buf, nout);
832 #   endif
833 	if (NULL != what) {
834 		if (nret == FDWRITE_ERROR) {
835 			nerr = errno;
836 			msyslog(LOG_INFO,
837 				"%s: write %s failed, fd=%d, %m",
838 				refnumtoa(&peer->srcadr), what,
839 				fd);
840 			errno = nerr;
841 		} else if (nret != len) {
842 			nerr = errno;
843 			msyslog(LOG_NOTICE,
844 				"%s: %s shortened, fd=%d, wrote %zu of %zu bytes",
845 				refnumtoa(&peer->srcadr), what,
846 				fd, nret, len);
847 			errno = nerr;
848 		}
849 	}
850 	return nret;
851 }
852 
853 size_t
854 refclock_write(
855 	const struct peer *	peer,
856 	const void *		buf,
857 	size_t			len,
858 	const char *		what
859 	)
860 {
861 	if ( ! (peer && peer->procptr)) {
862 		if (NULL != what)
863 			msyslog(LOG_INFO,
864 				"%s: write %s failed, invalid clock peer",
865 				refnumtoa(&peer->srcadr), what);
866 		errno = EINVAL;
867 		return FDWRITE_ERROR;
868 	}
869 	return refclock_fdwrite(peer, peer->procptr->io.fd,
870 				buf, len, what);
871 }
872 
873 /*
874  * indicate_refclock_packet()
875  *
876  * Passes a fragment of refclock input read from the device to the
877  * driver direct input routine, which may consume it (batch it for
878  * queuing once a logical unit is assembled).  If it is not so
879  * consumed, queue it for the driver's receive entrypoint.
880  *
881  * The return value is TRUE if the data has been consumed as a fragment
882  * and should not be counted as a received packet.
883  */
884 int
885 indicate_refclock_packet(
886 	struct refclockio *	rio,
887 	struct recvbuf *	rb
888 	)
889 {
890 	/* Does this refclock use direct input routine? */
891 	if (rio->io_input != NULL && (*rio->io_input)(rb) == 0) {
892 		/*
893 		 * data was consumed - nothing to pass up
894 		 * into block input machine
895 		 */
896 		freerecvbuf(rb);
897 
898 		return TRUE;
899 	}
900 	add_full_recv_buffer(rb);
901 
902 	return FALSE;
903 }
904 
905 
906 /*
907  * process_refclock_packet()
908  *
909  * Used for deferred processing of 'io_input' on systems where threading
910  * is used (notably Windows). This is acting as a trampoline to make the
911  * real calls to the refclock functions.
912  */
913 #ifdef HAVE_IO_COMPLETION_PORT
914 void
915 process_refclock_packet(
916 	struct recvbuf * rb
917 	)
918 {
919 	struct refclockio * rio;
920 
921 	/* get the refclockio structure from the receive buffer */
922 	rio  = &rb->recv_peer->procptr->io;
923 
924 	/* call 'clock_recv' if either there is no input function or the
925 	 * raw input function tells us to feed the packet to the
926 	 * receiver.
927 	 */
928 	if (rio->io_input == NULL || (*rio->io_input)(rb) != 0) {
929 		rio->recvcount++;
930 		packets_received++;
931 		handler_pkts++;
932 		(*rio->clock_recv)(rb);
933 	}
934 }
935 #endif	/* HAVE_IO_COMPLETION_PORT */
936 
937 
938 /*
939  * The following code does not apply to WINNT & VMS ...
940  */
941 #if !defined(SYS_VXWORKS) && !defined(SYS_WINNT)
942 #if defined(HAVE_TERMIOS) || defined(HAVE_SYSV_TTYS) || defined(HAVE_BSD_TTYS)
943 
944 /*
945  * refclock_open - open serial port for reference clock
946  *
947  * This routine opens a serial port for I/O and sets default options. It
948  * returns the file descriptor if successful, or logs an error and
949  * returns -1.
950  */
951 int
952 refclock_open(
953 	const sockaddr_u *srcadr,
954  	const char	*dev,	/* device name pointer */
955 	u_int		speed,	/* serial port speed (code) */
956 	u_int		lflags	/* line discipline flags */
957 	)
958 {
959 	const char *cdev;
960 	int	fd;
961 	int	omode;
962 #ifdef O_NONBLOCK
963 	char	trash[128];	/* litter bin for old input data */
964 #endif
965 
966 	/*
967 	 * Open serial port and set default options
968 	 */
969 	omode = O_RDWR;
970 #ifdef O_NONBLOCK
971 	omode |= O_NONBLOCK;
972 #endif
973 #ifdef O_NOCTTY
974 	omode |= O_NOCTTY;
975 #endif
976 
977 	if (NULL != (cdev = clockdev_lookup(srcadr, 0)))
978 		dev = cdev;
979 
980 	fd = open(dev, omode, 0777);
981 	/* refclock_open() long returned 0 on failure, avoid it. */
982 	if (0 == fd) {
983 		fd = dup(0);
984 		SAVE_ERRNO(
985 			close(0);
986 		)
987 	}
988 	if (fd < 0) {
989 		SAVE_ERRNO(
990 			msyslog(LOG_ERR, "refclock_open %s: %m", dev);
991 		)
992 		return -1;
993 	}
994 	if (!refclock_setup(fd, speed, lflags)) {
995 		close(fd);
996 		return -1;
997 	}
998 	if (!refclock_ioctl(fd, lflags)) {
999 		close(fd);
1000 		return -1;
1001 	}
1002 	msyslog(LOG_NOTICE, "%s serial %s open at %d bps",
1003 		refnumtoa(srcadr), dev, symBaud2numBaud(speed));
1004 
1005 #ifdef O_NONBLOCK
1006 	/*
1007 	 * We want to make sure there is no pending trash in the input
1008 	 * buffer. Since we have non-blocking IO available, this is a
1009 	 * good moment to read and dump all available outdated stuff
1010 	 * that might have become toxic for the driver.
1011 	 */
1012 	while (read(fd, trash, sizeof(trash)) > 0 || errno == EINTR)
1013 		/*NOP*/;
1014 #endif
1015 	return fd;
1016 }
1017 
1018 
1019 /*
1020  * refclock_setup - initialize terminal interface structure
1021  */
1022 int
1023 refclock_setup(
1024 	int	fd,		/* file descriptor */
1025 	u_int	speed,		/* serial port speed (code) */
1026 	u_int	lflags		/* line discipline flags */
1027 	)
1028 {
1029 	int	i;
1030 	TTY	ttyb, *ttyp;
1031 
1032 	/*
1033 	 * By default, the serial line port is initialized in canonical
1034 	 * (line-oriented) mode at specified line speed, 8 bits and no
1035 	 * parity. LF ends the line and CR is mapped to LF. The break,
1036 	 * erase and kill functions are disabled. There is a different
1037 	 * section for each terminal interface, as selected at compile
1038 	 * time. The flag bits can be used to set raw mode and echo.
1039 	 */
1040 	ttyp = &ttyb;
1041 #ifdef HAVE_TERMIOS
1042 
1043 	/*
1044 	 * POSIX serial line parameters (termios interface)
1045 	 */
1046 	if (tcgetattr(fd, ttyp) < 0) {
1047 		SAVE_ERRNO(
1048 			msyslog(LOG_ERR,
1049 				"refclock_setup fd %d tcgetattr: %m",
1050 				fd);
1051 		)
1052 		return FALSE;
1053 	}
1054 
1055 	/*
1056 	 * Set canonical mode and local connection; set specified speed,
1057 	 * 8 bits and no parity; map CR to NL; ignore break.
1058 	 */
1059 	if (speed) {
1060 		u_int	ltemp = 0;
1061 
1062 		ttyp->c_iflag = IGNBRK | IGNPAR | ICRNL;
1063 		ttyp->c_oflag = 0;
1064 		ttyp->c_cflag = CS8 | CLOCAL | CREAD;
1065 		if (lflags & LDISC_7O1) {
1066 			/* HP Z3801A needs 7-bit, odd parity */
1067 			ttyp->c_cflag = CS7 | PARENB | PARODD | CLOCAL | CREAD;
1068 		}
1069 		cfsetispeed(&ttyb, speed);
1070 		cfsetospeed(&ttyb, speed);
1071 		for (i = 0; i < NCCS; ++i)
1072 			ttyp->c_cc[i] = '\0';
1073 
1074 #if defined(TIOCMGET) && !defined(SCO5_CLOCK)
1075 
1076 		/*
1077 		 * If we have modem control, check to see if modem leads
1078 		 * are active; if so, set remote connection. This is
1079 		 * necessary for the kernel pps mods to work.
1080 		 */
1081 		if (ioctl(fd, TIOCMGET, (char *)&ltemp) < 0)
1082 			msyslog(LOG_ERR,
1083 			    "refclock_setup fd %d TIOCMGET: %m", fd);
1084 #ifdef DEBUG
1085 		if (debug)
1086 			printf("refclock_setup fd %d modem status: 0x%x\n",
1087 			    fd, ltemp);
1088 #endif
1089 		if (ltemp & TIOCM_DSR && lflags & LDISC_REMOTE)
1090 			ttyp->c_cflag &= ~CLOCAL;
1091 #endif /* TIOCMGET */
1092 	}
1093 
1094 	/*
1095 	 * Set raw and echo modes. These can be changed on-fly.
1096 	 */
1097 	ttyp->c_lflag = ICANON;
1098 	if (lflags & LDISC_RAW) {
1099 		ttyp->c_lflag = 0;
1100 		ttyp->c_iflag = 0;
1101 		ttyp->c_cc[VMIN] = 1;
1102 	}
1103 	if (lflags & LDISC_ECHO)
1104 		ttyp->c_lflag |= ECHO;
1105 	if (tcsetattr(fd, TCSANOW, ttyp) < 0) {
1106 		SAVE_ERRNO(
1107 			msyslog(LOG_ERR,
1108 				"refclock_setup fd %d TCSANOW: %m",
1109 				fd);
1110 		)
1111 		return FALSE;
1112 	}
1113 
1114 	/*
1115 	 * flush input and output buffers to discard any outdated stuff
1116 	 * that might have become toxic for the driver. Failing to do so
1117 	 * is logged, but we keep our fingers crossed otherwise.
1118 	 */
1119 	if (tcflush(fd, TCIOFLUSH) < 0)
1120 		msyslog(LOG_ERR, "refclock_setup fd %d tcflush(): %m",
1121 			fd);
1122 #endif /* HAVE_TERMIOS */
1123 
1124 #ifdef HAVE_SYSV_TTYS
1125 
1126 	/*
1127 	 * System V serial line parameters (termio interface)
1128 	 *
1129 	 */
1130 	if (ioctl(fd, TCGETA, ttyp) < 0) {
1131 		SAVE_ERRNO(
1132 			msyslog(LOG_ERR,
1133 				"refclock_setup fd %d TCGETA: %m",
1134 				fd);
1135 		)
1136 		return FALSE;
1137 	}
1138 
1139 	/*
1140 	 * Set canonical mode and local connection; set specified speed,
1141 	 * 8 bits and no parity; map CR to NL; ignore break.
1142 	 */
1143 	if (speed) {
1144 		u_int	ltemp = 0;
1145 
1146 		ttyp->c_iflag = IGNBRK | IGNPAR | ICRNL;
1147 		ttyp->c_oflag = 0;
1148 		ttyp->c_cflag = speed | CS8 | CLOCAL | CREAD;
1149 		for (i = 0; i < NCCS; ++i)
1150 			ttyp->c_cc[i] = '\0';
1151 
1152 #if defined(TIOCMGET) && !defined(SCO5_CLOCK)
1153 
1154 		/*
1155 		 * If we have modem control, check to see if modem leads
1156 		 * are active; if so, set remote connection. This is
1157 		 * necessary for the kernel pps mods to work.
1158 		 */
1159 		if (ioctl(fd, TIOCMGET, (char *)&ltemp) < 0)
1160 			msyslog(LOG_ERR,
1161 			    "refclock_setup fd %d TIOCMGET: %m", fd);
1162 #ifdef DEBUG
1163 		if (debug)
1164 			printf("refclock_setup fd %d modem status: %x\n",
1165 			    fd, ltemp);
1166 #endif
1167 		if (ltemp & TIOCM_DSR)
1168 			ttyp->c_cflag &= ~CLOCAL;
1169 #endif /* TIOCMGET */
1170 	}
1171 
1172 	/*
1173 	 * Set raw and echo modes. These can be changed on-fly.
1174 	 */
1175 	ttyp->c_lflag = ICANON;
1176 	if (lflags & LDISC_RAW) {
1177 		ttyp->c_lflag = 0;
1178 		ttyp->c_iflag = 0;
1179 		ttyp->c_cc[VMIN] = 1;
1180 	}
1181 	if (ioctl(fd, TCSETA, ttyp) < 0) {
1182 		SAVE_ERRNO(
1183 			msyslog(LOG_ERR,
1184 				"refclock_setup fd %d TCSETA: %m", fd);
1185 		)
1186 		return FALSE;
1187 	}
1188 #endif /* HAVE_SYSV_TTYS */
1189 
1190 #ifdef HAVE_BSD_TTYS
1191 
1192 	/*
1193 	 * 4.3bsd serial line parameters (sgttyb interface)
1194 	 */
1195 	if (ioctl(fd, TIOCGETP, (char *)ttyp) < 0) {
1196 		SAVE_ERRNO(
1197 			msyslog(LOG_ERR,
1198 				"refclock_setup fd %d TIOCGETP: %m",
1199 				fd);
1200 		)
1201 		return FALSE;
1202 	}
1203 	if (speed)
1204 		ttyp->sg_ispeed = ttyp->sg_ospeed = speed;
1205 	ttyp->sg_flags = EVENP | ODDP | CRMOD;
1206 	if (ioctl(fd, TIOCSETP, (char *)ttyp) < 0) {
1207 		SAVE_ERRNO(
1208 			msyslog(LOG_ERR, "refclock_setup TIOCSETP: %m");
1209 		)
1210 		return FALSE;
1211 	}
1212 #endif /* HAVE_BSD_TTYS */
1213 	return(1);
1214 }
1215 #endif /* HAVE_TERMIOS || HAVE_SYSV_TTYS || HAVE_BSD_TTYS */
1216 
1217 
1218 /*
1219  * refclock_ioctl - set serial port control functions
1220  *
1221  * This routine attempts to hide the internal, system-specific details
1222  * of serial ports. It can handle POSIX (termios), SYSV (termio) and BSD
1223  * (sgtty) interfaces with varying degrees of success. The routine sets
1224  * up optional features such as tty_clk. The routine returns TRUE if
1225  * successful.
1226  */
1227 int
1228 refclock_ioctl(
1229 	int	fd, 		/* file descriptor */
1230 	u_int	lflags		/* line discipline flags */
1231 	)
1232 {
1233 	/*
1234 	 * simply return TRUE if no UNIX line discipline is supported
1235 	 */
1236 	DPRINTF(1, ("refclock_ioctl: fd %d flags 0x%x\n", fd, lflags));
1237 
1238 	return TRUE;
1239 }
1240 #endif /* !defined(SYS_VXWORKS) && !defined(SYS_WINNT) */
1241 
1242 
1243 /*
1244  * refclock_control - set and/or return clock values
1245  *
1246  * This routine is used mainly for debugging. It returns designated
1247  * values from the interface structure that can be displayed using
1248  * ntpdc and the clockstat command. It can also be used to initialize
1249  * configuration variables, such as fudgetimes, fudgevalues, reference
1250  * ID and stratum.
1251  */
1252 void
1253 refclock_control(
1254 	sockaddr_u *srcadr,
1255 	const struct refclockstat *in,
1256 	struct refclockstat *out
1257 	)
1258 {
1259 	struct peer *peer;
1260 	struct refclockproc *pp;
1261 	u_char clktype;
1262 	int unit;
1263 
1264 	/*
1265 	 * Check for valid address and running peer
1266 	 */
1267 	if (!ISREFCLOCKADR(srcadr))
1268 		return;
1269 
1270 	clktype = (u_char)REFCLOCKTYPE(srcadr);
1271 	unit = REFCLOCKUNIT(srcadr);
1272 
1273 	peer = findexistingpeer(srcadr, NULL, NULL, -1, 0, NULL);
1274 
1275 	if (NULL == peer)
1276 		return;
1277 
1278 	INSIST(peer->procptr != NULL);
1279 	pp = peer->procptr;
1280 
1281 	/*
1282 	 * Initialize requested data
1283 	 */
1284 	if (in != NULL) {
1285 		if (in->haveflags & CLK_HAVETIME1)
1286 			pp->fudgetime1 = in->fudgetime1;
1287 		if (in->haveflags & CLK_HAVETIME2)
1288 			pp->fudgetime2 = in->fudgetime2;
1289 		if (in->haveflags & CLK_HAVEVAL1)
1290 			peer->stratum = pp->stratum = (u_char)in->fudgeval1;
1291 		if (in->haveflags & CLK_HAVEVAL2)
1292 			peer->refid = pp->refid = in->fudgeval2;
1293 		if (in->haveflags & CLK_HAVEFLAG1) {
1294 			pp->sloppyclockflag &= ~CLK_FLAG1;
1295 			pp->sloppyclockflag |= in->flags & CLK_FLAG1;
1296 		}
1297 		if (in->haveflags & CLK_HAVEFLAG2) {
1298 			pp->sloppyclockflag &= ~CLK_FLAG2;
1299 			pp->sloppyclockflag |= in->flags & CLK_FLAG2;
1300 		}
1301 		if (in->haveflags & CLK_HAVEFLAG3) {
1302 			pp->sloppyclockflag &= ~CLK_FLAG3;
1303 			pp->sloppyclockflag |= in->flags & CLK_FLAG3;
1304 		}
1305 		if (in->haveflags & CLK_HAVEFLAG4) {
1306 			pp->sloppyclockflag &= ~CLK_FLAG4;
1307 			pp->sloppyclockflag |= in->flags & CLK_FLAG4;
1308 		}
1309 		if (in->haveflags & CLK_HAVEMINJIT)
1310 			pp->fudgeminjitter = in->fudgeminjitter;
1311 	}
1312 
1313 	/*
1314 	 * Readback requested data
1315 	 */
1316 	if (out != NULL) {
1317 		out->fudgeval1 = pp->stratum;
1318 		out->fudgeval2 = pp->refid;
1319 		out->haveflags = CLK_HAVEVAL1 | CLK_HAVEVAL2;
1320 		out->fudgetime1 = pp->fudgetime1;
1321 		if (0.0 != out->fudgetime1)
1322 			out->haveflags |= CLK_HAVETIME1;
1323 		out->fudgetime2 = pp->fudgetime2;
1324 		if (0.0 != out->fudgetime2)
1325 			out->haveflags |= CLK_HAVETIME2;
1326 		out->flags = (u_char) pp->sloppyclockflag;
1327 		if (CLK_FLAG1 & out->flags)
1328 			out->haveflags |= CLK_HAVEFLAG1;
1329 		if (CLK_FLAG2 & out->flags)
1330 			out->haveflags |= CLK_HAVEFLAG2;
1331 		if (CLK_FLAG3 & out->flags)
1332 			out->haveflags |= CLK_HAVEFLAG3;
1333 		if (CLK_FLAG4 & out->flags)
1334 			out->haveflags |= CLK_HAVEFLAG4;
1335 		out->fudgeminjitter = pp->fudgeminjitter;
1336 		if (0.0 != out->fudgeminjitter)
1337 			out->haveflags |= CLK_HAVEMINJIT;
1338 
1339 		out->timereset = current_time - pp->timestarted;
1340 		out->polls = pp->polls;
1341 		out->noresponse = pp->noreply;
1342 		out->badformat = pp->badformat;
1343 		out->baddata = pp->baddata;
1344 
1345 		out->lastevent = pp->lastevent;
1346 		out->currentstatus = pp->currentstatus;
1347 		out->type = pp->type;
1348 		out->clockdesc = pp->clockdesc;
1349 		out->lencode = (u_short)pp->lencode;
1350 		out->p_lastcode = pp->a_lastcode;
1351 	}
1352 
1353 	/*
1354 	 * Give the stuff to the clock
1355 	 */
1356 	if (refclock_conf[clktype]->clock_control != noentry)
1357 		(refclock_conf[clktype]->clock_control)(unit, in, out, peer);
1358 }
1359 
1360 
1361 /*
1362  * refclock_buginfo - return debugging info
1363  *
1364  * This routine is used mainly for debugging. It returns designated
1365  * values from the interface structure that can be displayed using
1366  * ntpdc and the clkbug command.
1367  */
1368 void
1369 refclock_buginfo(
1370 	sockaddr_u *srcadr,	/* clock address */
1371 	struct refclockbug *bug /* output structure */
1372 	)
1373 {
1374 	struct peer *peer;
1375 	struct refclockproc *pp;
1376 	int clktype;
1377 	int unit;
1378 	unsigned u;
1379 
1380 	/*
1381 	 * Check for valid address and peer structure
1382 	 */
1383 	if (!ISREFCLOCKADR(srcadr))
1384 		return;
1385 
1386 	clktype = (u_char) REFCLOCKTYPE(srcadr);
1387 	unit = REFCLOCKUNIT(srcadr);
1388 
1389 	peer = findexistingpeer(srcadr, NULL, NULL, -1, 0, NULL);
1390 
1391 	if (NULL == peer || NULL == peer->procptr)
1392 		return;
1393 
1394 	pp = peer->procptr;
1395 
1396 	/*
1397 	 * Copy structure values
1398 	 */
1399 	bug->nvalues = 8;
1400 	bug->svalues = 0x0000003f;
1401 	bug->values[0] = pp->year;
1402 	bug->values[1] = pp->day;
1403 	bug->values[2] = pp->hour;
1404 	bug->values[3] = pp->minute;
1405 	bug->values[4] = pp->second;
1406 	bug->values[5] = pp->nsec;
1407 	bug->values[6] = pp->yearstart;
1408 	bug->values[7] = pp->coderecv;
1409 	bug->stimes = 0xfffffffc;
1410 	bug->times[0] = pp->lastref;
1411 	bug->times[1] = pp->lastrec;
1412 	for (u = 2; u < bug->ntimes; u++)
1413 		DTOLFP(pp->filter[u - 2], &bug->times[u]);
1414 
1415 	/*
1416 	 * Give the stuff to the clock
1417 	 */
1418 	if (refclock_conf[clktype]->clock_buginfo != noentry)
1419 		(refclock_conf[clktype]->clock_buginfo)(unit, bug, peer);
1420 }
1421 
1422 
1423 #ifdef HAVE_PPSAPI
1424 /*
1425  * refclock_ppsapi - initialize/update ppsapi
1426  *
1427  * This routine is called after the fudge command to open the PPSAPI
1428  * interface for later parameter setting after the fudge command.
1429  */
1430 int
1431 refclock_ppsapi(
1432 	int	fddev,			/* fd device */
1433 	struct refclock_atom *ap	/* atom structure pointer */
1434 	)
1435 {
1436 	if (ap->handle == 0) {
1437 		if (time_pps_create(fddev, &ap->handle) < 0) {
1438 			msyslog(LOG_ERR,
1439 			    "refclock_ppsapi: time_pps_create: %m");
1440 			return (0);
1441 		}
1442 		ZERO(ap->ts); /* [Bug 2689] defined INIT state */
1443 	}
1444 	return (1);
1445 }
1446 
1447 
1448 /*
1449  * refclock_params - set ppsapi parameters
1450  *
1451  * This routine is called to set the PPSAPI parameters after the fudge
1452  * command.
1453  */
1454 int
1455 refclock_params(
1456 	int	mode,			/* mode bits */
1457 	struct refclock_atom *ap	/* atom structure pointer */
1458 	)
1459 {
1460 	ZERO(ap->pps_params);
1461 	ap->pps_params.api_version = PPS_API_VERS_1;
1462 
1463 	/*
1464 	 * Solaris serial ports provide PPS pulse capture only on the
1465 	 * assert edge. FreeBSD serial ports provide capture on the
1466 	 * clear edge, while FreeBSD parallel ports provide capture
1467 	 * on the assert edge. Your mileage may vary.
1468 	 */
1469 	if (mode & CLK_FLAG2)
1470 		ap->pps_params.mode = PPS_TSFMT_TSPEC | PPS_CAPTURECLEAR;
1471 	else
1472 		ap->pps_params.mode = PPS_TSFMT_TSPEC | PPS_CAPTUREASSERT;
1473 	if (time_pps_setparams(ap->handle, &ap->pps_params) < 0) {
1474 		msyslog(LOG_ERR,
1475 		    "refclock_params: time_pps_setparams: %m");
1476 		return (0);
1477 	}
1478 
1479 	/*
1480 	 * If flag3 is lit, select the kernel PPS if we can.
1481 	 *
1482 	 * Note: EOPNOTSUPP is the only 'legal' error code we deal with;
1483 	 * it is part of the 'if we can' strategy.  Any other error
1484 	 * indicates something more sinister and makes this function fail.
1485 	 */
1486 	if (mode & CLK_FLAG3) {
1487 		if (time_pps_kcbind(ap->handle, PPS_KC_HARDPPS,
1488 		    ap->pps_params.mode & ~PPS_TSFMT_TSPEC,
1489 		    PPS_TSFMT_TSPEC) < 0)
1490 		{
1491 			if (errno != EOPNOTSUPP) {
1492 				msyslog(LOG_ERR,
1493 					"refclock_params: time_pps_kcbind: %m");
1494 				return (0);
1495 			}
1496 		} else {
1497 			hardpps_enable = 1;
1498 		}
1499 	}
1500 	return (1);
1501 }
1502 
1503 
1504 /*
1505  * refclock_pps - called once per second
1506  *
1507  * This routine is called once per second. It snatches the PPS
1508  * timestamp from the kernel and saves the sign-extended fraction in
1509  * a circular buffer for processing at the next poll event.
1510  */
1511 int
1512 refclock_pps(
1513 	struct peer *peer,		/* peer structure pointer */
1514 	struct refclock_atom *ap,	/* atom structure pointer */
1515 	int	mode			/* mode bits */
1516 	)
1517 {
1518 	struct refclockproc *pp;
1519 	pps_info_t pps_info;
1520 	struct timespec timeout;
1521 	double	dtemp, dcorr, trash;
1522 
1523 	/*
1524 	 * We require the clock to be synchronized before setting the
1525 	 * parameters. When the parameters have been set, fetch the
1526 	 * most recent PPS timestamp.
1527 	 */
1528 	pp = peer->procptr;
1529 	if (ap->handle == 0)
1530 		return (0);
1531 
1532 	if (ap->pps_params.mode == 0 && sys_leap != LEAP_NOTINSYNC) {
1533 		if (refclock_params(pp->sloppyclockflag, ap) < 1)
1534 			return (0);
1535 	}
1536 	ZERO(timeout);
1537 	ZERO(pps_info);
1538 	if (time_pps_fetch(ap->handle, PPS_TSFMT_TSPEC, &pps_info,
1539 	    &timeout) < 0) {
1540 		refclock_report(peer, CEVNT_FAULT);
1541 		return (0);
1542 	}
1543 	timeout = ap->ts;	/* save old timestamp for check */
1544 	if (ap->pps_params.mode & PPS_CAPTUREASSERT)
1545 		ap->ts = pps_info.assert_timestamp;
1546 	else if (ap->pps_params.mode & PPS_CAPTURECLEAR)
1547 		ap->ts = pps_info.clear_timestamp;
1548 	else
1549 		return (0);
1550 
1551 	/* [Bug 2689] Discard the first sample we read -- if the PPS
1552 	 * source is currently down / disconnected, we have read a
1553 	 * potentially *very* stale value here. So if our old TS value
1554 	 * is all-zero, we consider this sample unrealiable and drop it.
1555 	 *
1556 	 * Note 1: a better check would compare the PPS time stamp to
1557 	 * the current system time and drop it if it's more than say 3s
1558 	 * away.
1559 	 *
1560 	 * Note 2: If we ever again get an all-zero PPS sample, the next
1561 	 * one will be discarded. This can happen every 136yrs and is
1562 	 * unlikely to be ever observed.
1563 	 */
1564 	if (0 == (timeout.tv_sec | timeout.tv_nsec))
1565 		return (0);
1566 
1567 	/* If the PPS source fails to deliver a new sample between
1568 	 * polls, it regurgitates the last sample. We do not want to
1569 	 * process the same sample multiple times.
1570 	 */
1571 	if (0 == memcmp(&timeout, &ap->ts, sizeof(timeout)))
1572 		return (0);
1573 
1574 	/*
1575 	 * Convert to signed fraction offset, apply fudge and properly
1576 	 * fold the correction into the [-0.5s,0.5s] range. Handle
1577 	 * excessive fudge times, too.
1578 	 */
1579 	dtemp = ap->ts.tv_nsec / 1e9;
1580 	dcorr = modf((pp->fudgetime1 - dtemp), &trash);
1581 	if (dcorr > 0.5)
1582 		dcorr -= 1.0;
1583 	else if (dcorr < -0.5)
1584 		dcorr += 1.0;
1585 
1586 	/* phase gate check: avoid wobbling by +/-1s when too close to
1587 	 * the switch-over point. We allow +/-400ms max phase deviation.
1588 	 * The trade-off is clear: The smaller the limit, the less
1589 	 * sensitive to sampling noise the clock becomes. OTOH the
1590 	 * system must get into phase gate range by other means for the
1591 	 * PPS clock to lock in.
1592 	 */
1593 	if (fabs(dcorr) > 0.4)
1594 		return (0);
1595 
1596 	/*
1597 	 * record this time stamp and stuff in median filter
1598 	 */
1599 	pp->lastrec.l_ui = (u_int32)ap->ts.tv_sec + JAN_1970;
1600 	pp->lastrec.l_uf = (u_int32)(dtemp * FRAC);
1601 	clk_add_sample(pp, dcorr);
1602 	refclock_checkburst(peer, pp);
1603 
1604 #ifdef DEBUG
1605 	if (debug > 1)
1606 		printf("refclock_pps: %lu %f %f\n", current_time,
1607 		    dcorr, pp->fudgetime1);
1608 #endif
1609 	return (1);
1610 }
1611 #endif /* HAVE_PPSAPI */
1612 
1613 
1614 /*
1615  * -------------------------------------------------------------------
1616  * refclock_ppsaugment(...) -- correlate with PPS edge
1617  *
1618  * This function is used to correlate a receive time stamp with a PPS
1619  * edge time stamp. It applies the necessary fudges and then tries to
1620  * move the receive time stamp to the corresponding edge. This can warp
1621  * into future, if a transmission delay of more than 500ms is not
1622  * compensated with a corresponding fudge time2 value, because then the
1623  * next PPS edge is nearer than the last. (Similiar to what the PPS ATOM
1624  * driver does, but we deal with full time stamps here, not just phase
1625  * shift information.) Likewise, a negative fudge time2 value must be
1626  * used if the reference time stamp correlates with the *following* PPS
1627  * pulse.
1628  *
1629  * Note that the receive time fudge value only needs to move the receive
1630  * stamp near a PPS edge but that close proximity is not required;
1631  * +/-100ms precision should be enough. But since the fudge value will
1632  * probably also be used to compensate the transmission delay when no
1633  * PPS edge can be related to the time stamp, it's best to get it as
1634  * close as possible.
1635  *
1636  * It should also be noted that the typical use case is matching to the
1637  * preceeding edge, as most units relate their sentences to the current
1638  * second.
1639  *
1640  * The function returns FALSE if there is no correlation possible, TRUE
1641  * otherwise.  Reason for failures are:
1642  *
1643  *  - no PPS/ATOM unit given
1644  *  - PPS stamp is stale (that is, the difference between the PPS stamp
1645  *    and the corrected time stamp would exceed two seconds)
1646  *  - The phase difference is too close to 0.5, and the decision wether
1647  *    to move up or down is too sensitive to noise.
1648  *
1649  * On output, the receive time stamp is updated with the 'fixed' receive
1650  * time.
1651  * -------------------------------------------------------------------
1652  */
1653 
1654 int/*BOOL*/
1655 refclock_ppsaugment(
1656 	const struct refclock_atom * ap	    ,	/* for PPS io	  */
1657 	l_fp 			   * rcvtime ,
1658 	double			     rcvfudge,	/* i/o read fudge */
1659 	double			     ppsfudge	/* pps fudge	  */
1660 	)
1661 {
1662 	l_fp		delta[1];
1663 
1664 #ifdef HAVE_PPSAPI
1665 
1666 	pps_info_t	pps_info;
1667 	struct timespec timeout;
1668 	l_fp		stamp[1];
1669 	uint32_t	phase;
1670 
1671 	static const uint32_t s_plim_hi = UINT32_C(1932735284);
1672 	static const uint32_t s_plim_lo = UINT32_C(2362232013);
1673 
1674 	/* fixup receive time in case we have to bail out early */
1675 	DTOLFP(rcvfudge, delta);
1676 	L_SUB(rcvtime, delta);
1677 
1678 	if (NULL == ap)
1679 		return FALSE;
1680 
1681 	ZERO(timeout);
1682 	ZERO(pps_info);
1683 
1684 	/* fetch PPS stamp from ATOM block */
1685 	if (time_pps_fetch(ap->handle, PPS_TSFMT_TSPEC,
1686 			   &pps_info, &timeout) < 0)
1687 		return FALSE; /* can't get time stamps */
1688 
1689 	/* get last active PPS edge before receive */
1690 	if (ap->pps_params.mode & PPS_CAPTUREASSERT)
1691 		timeout = pps_info.assert_timestamp;
1692 	else if (ap->pps_params.mode & PPS_CAPTURECLEAR)
1693 		timeout = pps_info.clear_timestamp;
1694 	else
1695 		return FALSE; /* WHICH edge, please?!? */
1696 
1697 	/* convert PPS stamp to l_fp and apply fudge */
1698 	*stamp = tspec_stamp_to_lfp(timeout);
1699 	DTOLFP(ppsfudge, delta);
1700 	L_SUB(stamp, delta);
1701 
1702 	/* Get difference between PPS stamp (--> yield) and receive time
1703 	 * (--> base)
1704 	 */
1705 	*delta = *stamp;
1706 	L_SUB(delta, rcvtime);
1707 
1708 	/* check if either the PPS or the STAMP is stale in relation
1709 	 * to each other. Bail if it is so...
1710 	 */
1711 	phase = delta->l_ui;
1712 	if (phase >= 2 && phase < (uint32_t)-2)
1713 		return FALSE; /* PPS is stale, don't use it */
1714 
1715 	/* If the phase is too close to 0.5, the decision whether to
1716 	 * move up or down is becoming noise sensitive. That is, we
1717 	 * might amplify usec noise between samples into seconds with a
1718 	 * simple threshold. This can be solved by a Schmitt Trigger
1719 	 * characteristic, but that would also require additional state
1720 	 * where we could remember previous decisions.  Easier to play
1721 	 * dead duck and wait for the conditions to become clear.
1722 	 */
1723 	phase = delta->l_uf;
1724 	if (phase > s_plim_hi && phase < s_plim_lo)
1725 		return FALSE; /* we're in the noise lock gap */
1726 
1727 	/* sign-extend fraction into seconds */
1728 	delta->l_ui = UINT32_C(0) - ((phase >> 31) & 1);
1729 	/* add it up now */
1730 	L_ADD(rcvtime, delta);
1731 	return TRUE;
1732 
1733 #   else /* have no PPS support at all */
1734 
1735 	/* just fixup receive time and fail */
1736 	UNUSED_ARG(ap);
1737 	UNUSED_ARG(ppsfudge);
1738 
1739 	DTOLFP(rcvfudge, delta);
1740 	L_SUB(rcvtime, delta);
1741 	return FALSE;
1742 
1743 #   endif
1744 }
1745 
1746 /*
1747  * -------------------------------------------------------------------
1748  * check if it makes sense to schedule an 'early' poll to get the clock
1749  * up fast after start or longer signal dropout.
1750  */
1751 static void
1752 refclock_checkburst(
1753 	struct peer *         peer,
1754 	struct refclockproc * pp
1755 	)
1756 {
1757 	uint32_t	limit;	/* when we should poll */
1758 	u_int		needs;	/* needed number of samples */
1759 
1760 	/* Paranoia: stop here if peer and clockproc don't match up.
1761 	 * And when a poll is actually pending, we don't have to do
1762 	 * anything, either. Likewise if the reach mask is full, of
1763 	 * course, and if the filter has stabilized.
1764 	 */
1765 	if (pp->inpoll || (peer->procptr != pp) ||
1766 	    ((peer->reach == 0xFF) && (peer->disp <= MAXDISTANCE)))
1767 		return;
1768 
1769 	/* If the next poll is soon enough, bail out, too: */
1770 	limit = current_time + 1;
1771 	if (peer->nextdate <= limit)
1772 		return;
1773 
1774 	/* Derive the number of samples needed from the popcount of the
1775 	 * reach mask.  With less samples available, we break away.
1776 	 */
1777 	needs  = peer->reach;
1778 	needs -= (needs >> 1) & 0x55;
1779 	needs  = (needs & 0x33) + ((needs >> 2) & 0x33);
1780 	needs  = (needs + (needs >> 4)) & 0x0F;
1781 	if (needs > 6)
1782 		needs = 6;
1783 	else if (needs < 3)
1784 		needs = 3;
1785 	if (clk_cnt_sample(pp) < needs)
1786 		return;
1787 
1788 	/* Get serious. Reduce the poll to minimum and schedule early.
1789 	 * (Changing the peer poll is probably in vain, as it will be
1790 	 * re-adjusted, but maybe some time the hint will work...)
1791 	 */
1792 	peer->hpoll = peer->minpoll;
1793 	peer->nextdate = limit;
1794 }
1795 
1796 /*
1797  * -------------------------------------------------------------------
1798  * Save the last timecode string, making sure it's properly truncated
1799  * if necessary and NUL terminated in any case.
1800  */
1801 void
1802 refclock_save_lcode(
1803 	struct refclockproc *	pp,
1804 	char const *		tc,
1805 	size_t			len
1806 	)
1807 {
1808 	if (len == (size_t)-1)
1809 		len = strnlen(tc,  sizeof(pp->a_lastcode) - 1);
1810 	else if (len >= sizeof(pp->a_lastcode))
1811 		len = sizeof(pp->a_lastcode) - 1;
1812 
1813 	pp->lencode = (u_short)len;
1814 	memcpy(pp->a_lastcode, tc, len);
1815 	pp->a_lastcode[len] = '\0';
1816 }
1817 
1818 /* format data into a_lastcode */
1819 void
1820 refclock_vformat_lcode(
1821 	struct refclockproc *	pp,
1822 	char const *		fmt,
1823 	va_list			va
1824 	)
1825 {
1826 	long len;
1827 
1828 	len = vsnprintf(pp->a_lastcode, sizeof(pp->a_lastcode), fmt, va);
1829 	if (len <= 0)
1830 		len = 0;
1831 	else if (len >= sizeof(pp->a_lastcode))
1832 		len = sizeof(pp->a_lastcode) - 1;
1833 
1834 	pp->lencode = (u_short)len;
1835 	pp->a_lastcode[len] = '\0';
1836 	/* !note! the NUL byte is needed in case vsnprintf() really fails */
1837 }
1838 
1839 void
1840 refclock_format_lcode(
1841 	struct refclockproc *	pp,
1842 	char const *		fmt,
1843 	...
1844 	)
1845 {
1846 	va_list va;
1847 
1848 	va_start(va, fmt);
1849 	refclock_vformat_lcode(pp, fmt, va);
1850 	va_end(va);
1851 }
1852 
1853 static const int baudTable[][2] = {
1854 	{B0, 0},
1855 	{B50, 50},
1856 	{B75, 75},
1857 	{B110, 110},
1858 	{B134, 134},
1859 	{B150, 150},
1860 	{B200, 200},
1861 	{B300, 300},
1862 	{B600, 600},
1863 	{B1200, 1200},
1864 	{B1800, 1800},
1865 	{B2400, 2400},
1866 	{B4800, 4800},
1867 	{B9600, 9600},
1868 	{B19200, 19200},
1869 	{B38400, 38400},
1870 #   ifdef B57600
1871 	{B57600, 57600 },
1872 #   endif
1873 #   ifdef B115200
1874 	{B115200, 115200},
1875 #   endif
1876 	{-1, -1}
1877 };
1878 
1879 
1880 static int  symBaud2numBaud(int symBaud)
1881 {
1882 	int i;
1883 	for (i = 0; baudTable[i][1] >= 0; ++i)
1884 		if (baudTable[i][0] == symBaud)
1885 			break;
1886 	return baudTable[i][1];
1887 }
1888 static int  numBaud2symBaud(int numBaud)
1889 {
1890 	int i;
1891 	for (i = 0; baudTable[i][1] >= 0; ++i)
1892 		if (baudTable[i][1] == numBaud)
1893 			break;
1894 	return baudTable[i][0];
1895 }
1896 #endif /* REFCLOCK */
1897