xref: /freebsd/contrib/ntp/ntpd/ntp_refclock.c (revision a4128aad8503277614f2d214011ef60a19447b83)
1 /*
2  * ntp_refclock - processing support for reference clocks
3  */
4 #ifdef HAVE_CONFIG_H
5 # include <config.h>
6 #endif
7 
8 #include "ntpd.h"
9 #include "ntp_io.h"
10 #include "ntp_unixtime.h"
11 #include "ntp_tty.h"
12 #include "ntp_refclock.h"
13 #include "ntp_clockdev.h"
14 #include "ntp_stdlib.h"
15 #include "ntp_assert.h"
16 #include "timespecops.h"
17 
18 #include <stdio.h>
19 
20 #ifdef HAVE_SYS_IOCTL_H
21 # include <sys/ioctl.h>
22 #endif /* HAVE_SYS_IOCTL_H */
23 
24 #ifdef REFCLOCK
25 
26 #ifdef KERNEL_PLL
27 #include "ntp_syscall.h"
28 #endif /* KERNEL_PLL */
29 
30 #ifdef HAVE_PPSAPI
31 #include "ppsapi_timepps.h"
32 #include "refclock_atom.h"
33 #endif /* HAVE_PPSAPI */
34 
35 /*
36  * Reference clock support is provided here by maintaining the fiction
37  * that the clock is actually a peer.  As no packets are exchanged with
38  * a reference clock, however, we replace the transmit, receive and
39  * packet procedures with separate code to simulate them.  Routines
40  * refclock_transmit() and refclock_receive() maintain the peer
41  * variables in a state analogous to an actual peer and pass reference
42  * clock data on through the filters.  Routines refclock_peer() and
43  * refclock_unpeer() are called to initialize and terminate reference
44  * clock associations.  A set of utility routines is included to open
45  * serial devices, process sample data, and to perform various debugging
46  * functions.
47  *
48  * The main interface used by these routines is the refclockproc
49  * structure, which contains for most drivers the decimal equivalants
50  * of the year, day, month, hour, second and millisecond/microsecond
51  * decoded from the ASCII timecode.  Additional information includes
52  * the receive timestamp, exception report, statistics tallies, etc.
53  * In addition, there may be a driver-specific unit structure used for
54  * local control of the device.
55  *
56  * The support routines are passed a pointer to the peer structure,
57  * which is used for all peer-specific processing and contains a
58  * pointer to the refclockproc structure, which in turn contains a
59  * pointer to the unit structure, if used.  The peer structure is
60  * identified by an interface address in the dotted quad form
61  * 127.127.t.u, where t is the clock type and u the unit.
62  */
63 #define FUDGEFAC	.1	/* fudge correction factor */
64 #define LF		0x0a	/* ASCII LF */
65 
66 int	cal_enable;		/* enable refclock calibrate */
67 
68 /*
69  * Forward declarations
70  */
71 static int  refclock_cmpl_fp (const void *, const void *);
72 static int  refclock_sample (struct refclockproc *);
73 static int  refclock_ioctl(int, u_int);
74 static void refclock_checkburst(struct peer *, struct refclockproc *);
75 
76 /* circular buffer functions
77  *
78  * circular buffer management comes in two flovours:
79  * for powers of two, and all others.
80  */
81 
82 #if MAXSTAGE & (MAXSTAGE - 1)
83 
84 static void clk_add_sample(
85 	struct refclockproc * const	pp,
86 	double				sv
87 	)
88 {
89 	pp->coderecv = (pp->coderecv + 1) % MAXSTAGE;
90 	if (pp->coderecv == pp->codeproc)
91 		pp->codeproc = (pp->codeproc + 1) % MAXSTAGE;
92 	pp->filter[pp->coderecv] = sv;
93 }
94 
95 static double clk_pop_sample(
96 	struct refclockproc * const	pp
97 	)
98 {
99 	if (pp->coderecv == pp->codeproc)
100 		return 0; /* Maybe a NaN would be better? */
101 	pp->codeproc = (pp->codeproc + 1) % MAXSTAGE;
102 	return pp->filter[pp->codeproc];
103 }
104 
105 static inline u_int clk_cnt_sample(
106 	struct refclockproc * const	pp
107 	)
108 {
109 	u_int retv = pp->coderecv - pp->codeproc;
110 	if (retv > MAXSTAGE)
111 		retv += MAXSTAGE;
112 	return retv;
113 }
114 
115 #else
116 
117 static inline void clk_add_sample(
118 	struct refclockproc * const	pp,
119 	double				sv
120 	)
121 {
122 	pp->coderecv  = (pp->coderecv + 1) & (MAXSTAGE - 1);
123 	if (pp->coderecv == pp->codeproc)
124 		pp->codeproc = (pp->codeproc + 1) & (MAXSTAGE - 1);
125 	pp->filter[pp->coderecv] = sv;
126 }
127 
128 static inline double clk_pop_sample(
129 	struct refclockproc * const	pp
130 	)
131 {
132 	if (pp->coderecv == pp->codeproc)
133 		return 0; /* Maybe a NaN would be better? */
134 	pp->codeproc = (pp->codeproc + 1) & (MAXSTAGE - 1);
135 	return pp->filter[pp->codeproc];
136 }
137 
138 static inline u_int clk_cnt_sample(
139 	struct refclockproc * const	pp
140 	)
141 {
142 	return (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1);
143 }
144 
145 #endif
146 
147 /*
148  * refclock_report - note the occurance of an event
149  *
150  * This routine presently just remembers the report and logs it, but
151  * does nothing heroic for the trap handler. It tries to be a good
152  * citizen and bothers the system log only if things change.
153  */
154 void
155 refclock_report(
156 	struct peer *peer,
157 	int code
158 	)
159 {
160 	struct refclockproc *pp;
161 
162 	pp = peer->procptr;
163 	if (pp == NULL)
164 		return;
165 
166 	switch (code) {
167 
168 	case CEVNT_TIMEOUT:
169 		pp->noreply++;
170 		break;
171 
172 	case CEVNT_BADREPLY:
173 		pp->badformat++;
174 		break;
175 
176 	case CEVNT_FAULT:
177 		break;
178 
179 	case CEVNT_BADDATE:
180 	case CEVNT_BADTIME:
181 		pp->baddata++;
182 		break;
183 
184 	default:
185 		/* ignore others */
186 		break;
187 	}
188 	if ((code != CEVNT_NOMINAL) && (pp->lastevent < 15))
189 		pp->lastevent++;
190 	if (pp->currentstatus != code) {
191 		pp->currentstatus = (u_char)code;
192 		report_event(PEVNT_CLOCK, peer, ceventstr(code));
193 	}
194 }
195 
196 
197 /*
198  * init_refclock - initialize the reference clock drivers
199  *
200  * This routine calls each of the drivers in turn to initialize internal
201  * variables, if necessary. Most drivers have nothing to say at this
202  * point.
203  */
204 void
205 init_refclock(void)
206 {
207 	int i;
208 
209 	for (i = 0; i < (int)num_refclock_conf; i++)
210 		if (refclock_conf[i]->clock_init != noentry)
211 			(refclock_conf[i]->clock_init)();
212 }
213 
214 
215 /*
216  * refclock_newpeer - initialize and start a reference clock
217  *
218  * This routine allocates and initializes the interface structure which
219  * supports a reference clock in the form of an ordinary NTP peer. A
220  * driver-specific support routine completes the initialization, if
221  * used. Default peer variables which identify the clock and establish
222  * its reference ID and stratum are set here. It returns one if success
223  * and zero if the clock address is invalid or already running,
224  * insufficient resources are available or the driver declares a bum
225  * rap.
226  */
227 int
228 refclock_newpeer(
229 	struct peer *peer	/* peer structure pointer */
230 	)
231 {
232 	struct refclockproc *pp;
233 	u_char clktype;
234 	int unit;
235 
236 	/*
237 	 * Check for valid clock address. If already running, shut it
238 	 * down first.
239 	 */
240 	if (!ISREFCLOCKADR(&peer->srcadr)) {
241 		msyslog(LOG_ERR,
242 			"refclock_newpeer: clock address %s invalid",
243 			stoa(&peer->srcadr));
244 		return (0);
245 	}
246 	clktype = (u_char)REFCLOCKTYPE(&peer->srcadr);
247 	unit = REFCLOCKUNIT(&peer->srcadr);
248 	if (clktype >= num_refclock_conf ||
249 		refclock_conf[clktype]->clock_start == noentry) {
250 		msyslog(LOG_ERR,
251 			"refclock_newpeer: clock type %d invalid\n",
252 			clktype);
253 		return (0);
254 	}
255 
256 	/*
257 	 * Allocate and initialize interface structure
258 	 */
259 	pp = emalloc_zero(sizeof(*pp));
260 	peer->procptr = pp;
261 
262 	/*
263 	 * Initialize structures
264 	 */
265 	peer->refclktype = clktype;
266 	peer->refclkunit = (u_char)unit;
267 	peer->flags |= FLAG_REFCLOCK;
268 	peer->leap = LEAP_NOTINSYNC;
269 	peer->stratum = STRATUM_REFCLOCK;
270 	peer->ppoll = peer->maxpoll;
271 	pp->type = clktype;
272 	pp->conf = refclock_conf[clktype];
273 	pp->timestarted = current_time;
274 	pp->io.fd = -1;
275 
276 	/*
277 	 * Set peer.pmode based on the hmode. For appearances only.
278 	 */
279 	switch (peer->hmode) {
280 	case MODE_ACTIVE:
281 		peer->pmode = MODE_PASSIVE;
282 		break;
283 
284 	default:
285 		peer->pmode = MODE_SERVER;
286 		break;
287 	}
288 
289 	/*
290 	 * Do driver dependent initialization. The above defaults
291 	 * can be wiggled, then finish up for consistency.
292 	 */
293 	if (!((refclock_conf[clktype]->clock_start)(unit, peer))) {
294 		refclock_unpeer(peer);
295 		return (0);
296 	}
297 	peer->refid = pp->refid;
298 	return (1);
299 }
300 
301 
302 /*
303  * refclock_unpeer - shut down a clock
304  */
305 void
306 refclock_unpeer(
307 	struct peer *peer	/* peer structure pointer */
308 	)
309 {
310 	u_char clktype;
311 	int unit;
312 
313 	/*
314 	 * Wiggle the driver to release its resources, then give back
315 	 * the interface structure.
316 	 */
317 	if (NULL == peer->procptr)
318 		return;
319 
320 	clktype = peer->refclktype;
321 	unit = peer->refclkunit;
322 	if (refclock_conf[clktype]->clock_shutdown != noentry)
323 		(refclock_conf[clktype]->clock_shutdown)(unit, peer);
324 	free(peer->procptr);
325 	peer->procptr = NULL;
326 }
327 
328 
329 /*
330  * refclock_timer - called once per second for housekeeping.
331  */
332 void
333 refclock_timer(
334 	struct peer *p
335 	)
336 {
337 	struct refclockproc *	pp;
338 	int			unit;
339 
340 	unit = p->refclkunit;
341 	pp = p->procptr;
342 	if (pp->conf->clock_timer != noentry)
343 		(*pp->conf->clock_timer)(unit, p);
344 	if (pp->action != NULL && pp->nextaction <= current_time)
345 		(*pp->action)(p);
346 }
347 
348 
349 /*
350  * refclock_transmit - simulate the transmit procedure
351  *
352  * This routine implements the NTP transmit procedure for a reference
353  * clock. This provides a mechanism to call the driver at the NTP poll
354  * interval, as well as provides a reachability mechanism to detect a
355  * broken radio or other madness.
356  */
357 void
358 refclock_transmit(
359 	struct peer *peer	/* peer structure pointer */
360 	)
361 {
362 	u_char clktype;
363 	int unit;
364 
365 	clktype = peer->refclktype;
366 	unit = peer->refclkunit;
367 	peer->sent++;
368 	get_systime(&peer->xmt);
369 
370 	/*
371 	 * This is a ripoff of the peer transmit routine, but
372 	 * specialized for reference clocks. We do a little less
373 	 * protocol here and call the driver-specific transmit routine.
374 	 */
375 	if (peer->burst == 0) {
376 		u_char oreach;
377 #ifdef DEBUG
378 		if (debug)
379 			printf("refclock_transmit: at %ld %s\n",
380 			    current_time, stoa(&(peer->srcadr)));
381 #endif
382 
383 		/*
384 		 * Update reachability and poll variables like the
385 		 * network code.
386 		 */
387 		oreach = peer->reach & 0xfe;
388 		peer->reach <<= 1;
389 		if (!(peer->reach & 0x0f))
390 			clock_filter(peer, 0., 0., MAXDISPERSE);
391 		peer->outdate = current_time;
392 		if (!peer->reach) {
393 			if (oreach) {
394 				report_event(PEVNT_UNREACH, peer, NULL);
395 				peer->timereachable = current_time;
396 			}
397 		} else {
398 			if (peer->flags & FLAG_BURST)
399 				peer->burst = NSTAGE;
400 		}
401 	} else {
402 		peer->burst--;
403 	}
404 	peer->procptr->inpoll = TRUE;
405 	if (refclock_conf[clktype]->clock_poll != noentry)
406 		(refclock_conf[clktype]->clock_poll)(unit, peer);
407 	poll_update(peer, peer->hpoll, 0);
408 }
409 
410 
411 /*
412  * Compare two doubles - used with qsort()
413  */
414 static int
415 refclock_cmpl_fp(
416 	const void *p1,
417 	const void *p2
418 	)
419 {
420 	const double *dp1 = (const double *)p1;
421 	const double *dp2 = (const double *)p2;
422 
423 	if (*dp1 < *dp2)
424 		return -1;
425 	if (*dp1 > *dp2)
426 		return 1;
427 	return 0;
428 }
429 
430 /*
431  * Get number of available samples
432  */
433 int
434 refclock_samples_avail(
435 	struct refclockproc const * pp
436 	)
437 {
438 	u_int	na;
439 
440 #   if MAXSTAGE & (MAXSTAGE - 1)
441 
442 	na = pp->coderecv - pp->codeproc;
443 	if (na > MAXSTAGE)
444 		na += MAXSTAGE;
445 
446 #   else
447 
448 	na = (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1);
449 
450 #   endif
451 	return na;
452 }
453 
454 /*
455  * Expire (remove) samples from the tail (oldest samples removed)
456  *
457  * Returns number of samples deleted
458  */
459 int
460 refclock_samples_expire(
461 	struct refclockproc * pp,
462 	int                   nd
463 	)
464 {
465 	u_int	na;
466 
467 	if (nd <= 0)
468 		return 0;
469 
470 #   if MAXSTAGE & (MAXSTAGE - 1)
471 
472 	na = pp->coderecv - pp->codeproc;
473 	if (na > MAXSTAGE)
474 		na += MAXSTAGE;
475 	if ((u_int)nd < na)
476 		nd = na;
477 	pp->codeproc = (pp->codeproc + nd) % MAXSTAGE;
478 
479 #   else
480 
481 	na = (pp->coderecv - pp->codeproc) & (MAXSTAGE - 1);
482 	if ((u_int)nd > na)
483 		nd = (int)na;
484 	pp->codeproc = (pp->codeproc + nd) & (MAXSTAGE - 1);
485 
486 #   endif
487 	return nd;
488 }
489 
490 /*
491  * refclock_process_offset - update median filter
492  *
493  * This routine uses the given offset and timestamps to construct a new
494  * entry in the median filter circular buffer. Samples that overflow the
495  * filter are quietly discarded.
496  */
497 void
498 refclock_process_offset(
499 	struct refclockproc *pp,	/* refclock structure pointer */
500 	l_fp lasttim,			/* last timecode timestamp */
501 	l_fp lastrec,			/* last receive timestamp */
502 	double fudge
503 	)
504 {
505 	l_fp lftemp;
506 	double doffset;
507 
508 	pp->lastrec = lastrec;
509 	lftemp = lasttim;
510 	L_SUB(&lftemp, &lastrec);
511 	LFPTOD(&lftemp, doffset);
512 	clk_add_sample(pp, doffset + fudge);
513 	refclock_checkburst(pp->io.srcclock, pp);
514 }
515 
516 
517 /*
518  * refclock_process - process a sample from the clock
519  * refclock_process_f - refclock_process with other than time1 fudge
520  *
521  * This routine converts the timecode in the form days, hours, minutes,
522  * seconds and milliseconds/microseconds to internal timestamp format,
523  * then constructs a new entry in the median filter circular buffer.
524  * Return success (1) if the data are correct and consistent with the
525  * conventional calendar.
526  *
527  * Important for PPS users: Normally, the pp->lastrec is set to the
528  * system time when the on-time character is received and the pp->year,
529  * ..., pp->second decoded and the seconds fraction pp->nsec in
530  * nanoseconds). When a PPS offset is available, pp->nsec is forced to
531  * zero and the fraction for pp->lastrec is set to the PPS offset.
532  */
533 int
534 refclock_process_f(
535 	struct refclockproc *pp,	/* refclock structure pointer */
536 	double fudge
537 	)
538 {
539 	l_fp offset, ltemp;
540 
541 	/*
542 	 * Compute the timecode timestamp from the days, hours, minutes,
543 	 * seconds and milliseconds/microseconds of the timecode. Use
544 	 * clocktime() for the aggregate seconds and the msec/usec for
545 	 * the fraction, when present. Note that this code relies on the
546 	 * file system time for the years and does not use the years of
547 	 * the timecode.
548 	 */
549 	if (!clocktime(pp->day, pp->hour, pp->minute, pp->second, GMT,
550 		pp->lastrec.l_ui, &pp->yearstart, &offset.l_ui))
551 		return (0);
552 
553 	offset.l_uf = 0;
554 	DTOLFP(pp->nsec / 1e9, &ltemp);
555 	L_ADD(&offset, &ltemp);
556 	refclock_process_offset(pp, offset, pp->lastrec, fudge);
557 	return (1);
558 }
559 
560 
561 int
562 refclock_process(
563 	struct refclockproc *pp		/* refclock structure pointer */
564 )
565 {
566 	return refclock_process_f(pp, pp->fudgetime1);
567 }
568 
569 
570 /*
571  * refclock_sample - process a pile of samples from the clock
572  *
573  * This routine implements a recursive median filter to suppress spikes
574  * in the data, as well as determine a performance statistic. It
575  * calculates the mean offset and RMS jitter. A time adjustment
576  * fudgetime1 can be added to the final offset to compensate for various
577  * systematic errors. The routine returns the number of samples
578  * processed, which could be zero.
579  */
580 static int
581 refclock_sample(
582 	struct refclockproc *pp		/* refclock structure pointer */
583 	)
584 {
585 	size_t	i, j, k, m, n;
586 	double	off[MAXSTAGE];
587 
588 	/*
589 	 * Copy the raw offsets and sort into ascending order. Don't do
590 	 * anything if the buffer is empty.
591 	 */
592 	n = 0;
593 	while (pp->codeproc != pp->coderecv)
594 		off[n++] = clk_pop_sample(pp);
595 	if (n == 0)
596 		return (0);
597 
598 	if (n > 1)
599 		qsort(off, n, sizeof(off[0]), refclock_cmpl_fp);
600 
601 	/*
602 	 * Reject the furthest from the median of the samples until
603 	 * approximately 60 percent of the samples remain.
604 	 *
605 	 * [Bug 3672] The elimination is now based on the proper
606 	 * definition of the median. The true median is not calculated
607 	 * directly, though.
608 	 */
609 	i = 0; j = n;
610 	m = n - (n * 4) / 10;
611 	while ((k = j - i) > m) {
612 		k = (k - 1) >> 1;
613 		if ((off[j - 1] - off[j - k - 1]) < (off[i + k] - off[i]))
614 			i++;	/* reject low end */
615 		else
616 			j--;	/* reject high end */
617 	}
618 
619 	/*
620 	 * Determine the offset and jitter.
621 	 */
622 	pp->offset = off[i];
623 	pp->jitter = 0;
624 	for (k = i + 1; k < j; k++) {
625 		pp->offset += off[k];
626 		pp->jitter += SQUARE(off[k] - off[k - 1]);
627 	}
628 	pp->offset /= m;
629 	m -= (m > 1);	/* only (m-1) terms attribute to jitter! */
630 	pp->jitter = max(SQRT(pp->jitter / m), LOGTOD(sys_precision));
631 
632 	/*
633 	 * If the source has a jitter that cannot be estimated, because
634 	 * it is not statistic jitter, the source will be detected as
635 	 * falseticker sooner or later.  Enforcing a minimal jitter value
636 	 * avoids a too low estimation while still detecting higher jitter.
637 	 *
638 	 * Note that this changes the refclock samples and ends up in the
639 	 * clock dispersion, not the clock jitter, despite being called
640 	 * jitter.  To see the modified values, check the NTP clock variable
641 	 * "filtdisp", not "jitter".
642 	 */
643 	pp->jitter = max(pp->jitter, pp->fudgeminjitter);
644 
645 #ifdef DEBUG
646 	if (debug)
647 		printf(
648 		    "refclock_sample: n %d offset %.6f disp %.6f jitter %.6f\n",
649 		    (int)n, pp->offset, pp->disp, pp->jitter);
650 #endif
651 	return (int)n;
652 }
653 
654 
655 /*
656  * refclock_receive - simulate the receive and packet procedures
657  *
658  * This routine simulates the NTP receive and packet procedures for a
659  * reference clock. This provides a mechanism in which the ordinary NTP
660  * filter, selection and combining algorithms can be used to suppress
661  * misbehaving radios and to mitigate between them when more than one is
662  * available for backup.
663  */
664 void
665 refclock_receive(
666 	struct peer *peer	/* peer structure pointer */
667 	)
668 {
669 	struct refclockproc *pp;
670 
671 #ifdef DEBUG
672 	if (debug)
673 		printf("refclock_receive: at %lu %s\n",
674 		    current_time, stoa(&peer->srcadr));
675 #endif
676 
677 	/*
678 	 * Do a little sanity dance and update the peer structure. Groom
679 	 * the median filter samples and give the data to the clock
680 	 * filter.
681 	 */
682 	pp = peer->procptr;
683 	pp->inpoll = FALSE;
684 	peer->leap = pp->leap;
685 	if (peer->leap == LEAP_NOTINSYNC)
686 		return;
687 
688 	peer->received++;
689 	peer->timereceived = current_time;
690 	if (!peer->reach) {
691 		report_event(PEVNT_REACH, peer, NULL);
692 		peer->timereachable = current_time;
693 	}
694 	peer->reach = (peer->reach << (peer->reach & 1)) | 1;
695 	peer->reftime = pp->lastref;
696 	peer->aorg = pp->lastrec;
697 	peer->rootdisp = pp->disp;
698 	get_systime(&peer->dst);
699 	if (!refclock_sample(pp))
700 		return;
701 
702 	clock_filter(peer, pp->offset, 0., pp->jitter);
703 	if (cal_enable && fabs(last_offset) < sys_mindisp && sys_peer !=
704 	    NULL) {
705 		if (sys_peer->refclktype == REFCLK_ATOM_PPS &&
706 		    peer->refclktype != REFCLK_ATOM_PPS)
707 			pp->fudgetime1 -= pp->offset * FUDGEFAC;
708 	}
709 }
710 
711 
712 /*
713  * refclock_gtlin - groom next input line and extract timestamp
714  *
715  * This routine processes the timecode received from the clock and
716  * strips the parity bit and control characters. It returns the number
717  * of characters in the line followed by a NULL character ('\0'), which
718  * is not included in the count. In case of an empty line, the previous
719  * line is preserved.
720  */
721 int
722 refclock_gtlin(
723 	struct recvbuf *rbufp,	/* receive buffer pointer */
724 	char	*lineptr,	/* current line pointer */
725 	int	bmax,		/* remaining characters in line */
726 	l_fp	*tsptr		/* pointer to timestamp returned */
727 	)
728 {
729 	const char *sp, *spend;
730 	char	   *dp, *dpend;
731 	int         dlen;
732 
733 	if (bmax <= 0)
734 		return (0);
735 
736 	dp    = lineptr;
737 	dpend = dp + bmax - 1; /* leave room for NUL pad */
738 	sp    = (const char *)rbufp->recv_buffer;
739 	spend = sp + rbufp->recv_length;
740 
741 	while (sp != spend && dp != dpend) {
742 		char c;
743 
744 		c = *sp++ & 0x7f;
745 		if (c >= 0x20 && c < 0x7f)
746 			*dp++ = c;
747 	}
748 	/* Get length of data written to the destination buffer. If
749 	 * zero, do *not* place a NUL byte to preserve the previous
750 	 * buffer content.
751 	 */
752 	dlen = dp - lineptr;
753 	if (dlen)
754 	    *dp  = '\0';
755 	*tsptr = rbufp->recv_time;
756 	DPRINTF(2, ("refclock_gtlin: fd %d time %s timecode %d %s\n",
757 		    rbufp->fd, ulfptoa(&rbufp->recv_time, 6), dlen,
758 		    (dlen != 0)
759 			? lineptr
760 			: ""));
761 	return (dlen);
762 }
763 
764 
765 /*
766  * refclock_gtraw - get next line/chunk of data
767  *
768  * This routine returns the raw data received from the clock in both
769  * canonical or raw modes. The terminal interface routines map CR to LF.
770  * In canonical mode this results in two lines, one containing data
771  * followed by LF and another containing only LF. In raw mode the
772  * interface routines can deliver arbitraty chunks of data from one
773  * character to a maximum specified by the calling routine. In either
774  * mode the routine returns the number of characters in the line
775  * followed by a NULL character ('\0'), which is not included in the
776  * count.
777  *
778  * *tsptr receives a copy of the buffer timestamp.
779  */
780 int
781 refclock_gtraw(
782 	struct recvbuf *rbufp,	/* receive buffer pointer */
783 	char	*lineptr,	/* current line pointer */
784 	int	bmax,		/* remaining characters in line */
785 	l_fp	*tsptr		/* pointer to timestamp returned */
786 	)
787 {
788 	if (bmax <= 0)
789 		return (0);
790 	bmax -= 1; /* leave room for trailing NUL */
791 	if (bmax > rbufp->recv_length)
792 		bmax = rbufp->recv_length;
793 	memcpy(lineptr, rbufp->recv_buffer, bmax);
794 	lineptr[bmax] = '\0';
795 
796 	*tsptr = rbufp->recv_time;
797 	DPRINTF(2, ("refclock_gtraw: fd %d time %s timecode %d %s\n",
798 		    rbufp->fd, ulfptoa(&rbufp->recv_time, 6), bmax,
799 		    lineptr));
800 	return (bmax);
801 }
802 
803 /*
804  * refclock_fdwrite()
805  *
806  * Write data to a clock device. Does the necessary result checks and
807  * logging, and encapsulates OS dependencies.
808  */
809 #ifdef SYS_WINNT
810 extern int async_write(int fd, const void * buf, unsigned int len);
811 #endif
812 
813 size_t
814 refclock_fdwrite(
815 	const struct peer *	peer,
816 	int			fd,
817 	const void *		buf,
818 	size_t			len,
819 	const char *		what
820 	)
821 {
822 	size_t	nret, nout;
823 	int	nerr;
824 
825 	nout = (INT_MAX > len) ? len : INT_MAX;
826 #   ifdef SYS_WINNT
827 	nret = (size_t)async_write(fd, buf, (unsigned int)nout);
828 #   else
829 	nret = (size_t)write(fd, buf, nout);
830 #   endif
831 	if (NULL != what) {
832 		if (nret == FDWRITE_ERROR) {
833 			nerr = errno;
834 			msyslog(LOG_INFO,
835 				"%s: write %s failed, fd=%d, %m",
836 				refnumtoa(&peer->srcadr), what,
837 				fd);
838 			errno = nerr;
839 		} else if (nret != len) {
840 			nerr = errno;
841 			msyslog(LOG_NOTICE,
842 				"%s: %s shortened, fd=%d, wrote %u of %u bytes",
843 				refnumtoa(&peer->srcadr), what,
844 				fd, (u_int)nret, (u_int)len);
845 			errno = nerr;
846 		}
847 	}
848 	return nret;
849 }
850 
851 size_t
852 refclock_write(
853 	const struct peer *	peer,
854 	const void *		buf,
855 	size_t			len,
856 	const char *		what
857 	)
858 {
859 	if ( ! (peer && peer->procptr)) {
860 		if (NULL != what)
861 			msyslog(LOG_INFO,
862 				"%s: write %s failed, invalid clock peer",
863 				refnumtoa(&peer->srcadr), what);
864 		errno = EINVAL;
865 		return FDWRITE_ERROR;
866 	}
867 	return refclock_fdwrite(peer, peer->procptr->io.fd,
868 				buf, len, what);
869 }
870 
871 /*
872  * indicate_refclock_packet()
873  *
874  * Passes a fragment of refclock input read from the device to the
875  * driver direct input routine, which may consume it (batch it for
876  * queuing once a logical unit is assembled).  If it is not so
877  * consumed, queue it for the driver's receive entrypoint.
878  *
879  * The return value is TRUE if the data has been consumed as a fragment
880  * and should not be counted as a received packet.
881  */
882 int
883 indicate_refclock_packet(
884 	struct refclockio *	rio,
885 	struct recvbuf *	rb
886 	)
887 {
888 	/* Does this refclock use direct input routine? */
889 	if (rio->io_input != NULL && (*rio->io_input)(rb) == 0) {
890 		/*
891 		 * data was consumed - nothing to pass up
892 		 * into block input machine
893 		 */
894 		freerecvbuf(rb);
895 
896 		return TRUE;
897 	}
898 	add_full_recv_buffer(rb);
899 
900 	return FALSE;
901 }
902 
903 
904 /*
905  * process_refclock_packet()
906  *
907  * Used for deferred processing of 'io_input' on systems where threading
908  * is used (notably Windows). This is acting as a trampoline to make the
909  * real calls to the refclock functions.
910  */
911 #ifdef HAVE_IO_COMPLETION_PORT
912 void
913 process_refclock_packet(
914 	struct recvbuf * rb
915 	)
916 {
917 	struct refclockio * rio;
918 
919 	/* get the refclockio structure from the receive buffer */
920 	rio  = &rb->recv_peer->procptr->io;
921 
922 	/* call 'clock_recv' if either there is no input function or the
923 	 * raw input function tells us to feed the packet to the
924 	 * receiver.
925 	 */
926 	if (rio->io_input == NULL || (*rio->io_input)(rb) != 0) {
927 		rio->recvcount++;
928 		packets_received++;
929 		handler_pkts++;
930 		(*rio->clock_recv)(rb);
931 	}
932 }
933 #endif	/* HAVE_IO_COMPLETION_PORT */
934 
935 
936 /*
937  * The following code does not apply to WINNT & VMS ...
938  */
939 #if !defined(SYS_VXWORKS) && !defined(SYS_WINNT)
940 #if defined(HAVE_TERMIOS) || defined(HAVE_SYSV_TTYS) || defined(HAVE_BSD_TTYS)
941 
942 /*
943  * refclock_open - open serial port for reference clock
944  *
945  * This routine opens a serial port for I/O and sets default options. It
946  * returns the file descriptor if successful, or logs an error and
947  * returns -1.
948  */
949 int
950 refclock_open(
951 	const sockaddr_u *srcadr,
952  	const char	*dev,	/* device name pointer */
953 	u_int		speed,	/* serial port speed (code) */
954 	u_int		lflags	/* line discipline flags */
955 	)
956 {
957 	const char *cdev;
958 	int	fd;
959 	int	omode;
960 #ifdef O_NONBLOCK
961 	char	trash[128];	/* litter bin for old input data */
962 #endif
963 
964 	/*
965 	 * Open serial port and set default options
966 	 */
967 	omode = O_RDWR;
968 #ifdef O_NONBLOCK
969 	omode |= O_NONBLOCK;
970 #endif
971 #ifdef O_NOCTTY
972 	omode |= O_NOCTTY;
973 #endif
974 
975 	if (NULL != (cdev = clockdev_lookup(srcadr, 0)))
976 		dev = cdev;
977 
978 	fd = open(dev, omode, 0777);
979 	/* refclock_open() long returned 0 on failure, avoid it. */
980 	if (0 == fd) {
981 		fd = dup(0);
982 		SAVE_ERRNO(
983 			close(0);
984 		)
985 	}
986 	if (fd < 0) {
987 		SAVE_ERRNO(
988 			msyslog(LOG_ERR, "refclock_open %s: %m", dev);
989 		)
990 		return -1;
991 	}
992 	if (!refclock_setup(fd, speed, lflags)) {
993 		close(fd);
994 		return -1;
995 	}
996 	if (!refclock_ioctl(fd, lflags)) {
997 		close(fd);
998 		return -1;
999 	}
1000 	msyslog(LOG_NOTICE, "%s serial %s open at %d bps",
1001 		refnumtoa(srcadr), dev, symBaud2numBaud(speed));
1002 
1003 #ifdef O_NONBLOCK
1004 	/*
1005 	 * We want to make sure there is no pending trash in the input
1006 	 * buffer. Since we have non-blocking IO available, this is a
1007 	 * good moment to read and dump all available outdated stuff
1008 	 * that might have become toxic for the driver.
1009 	 */
1010 	while (read(fd, trash, sizeof(trash)) > 0 || errno == EINTR)
1011 		/*NOP*/;
1012 #endif
1013 	return fd;
1014 }
1015 
1016 
1017 /*
1018  * refclock_setup - initialize terminal interface structure
1019  */
1020 int
1021 refclock_setup(
1022 	int	fd,		/* file descriptor */
1023 	u_int	speed,		/* serial port speed (code) */
1024 	u_int	lflags		/* line discipline flags */
1025 	)
1026 {
1027 	int	i;
1028 	TTY	ttyb, *ttyp;
1029 
1030 	/*
1031 	 * By default, the serial line port is initialized in canonical
1032 	 * (line-oriented) mode at specified line speed, 8 bits and no
1033 	 * parity. LF ends the line and CR is mapped to LF. The break,
1034 	 * erase and kill functions are disabled. There is a different
1035 	 * section for each terminal interface, as selected at compile
1036 	 * time. The flag bits can be used to set raw mode and echo.
1037 	 */
1038 	ttyp = &ttyb;
1039 #ifdef HAVE_TERMIOS
1040 
1041 	/*
1042 	 * POSIX serial line parameters (termios interface)
1043 	 */
1044 	if (tcgetattr(fd, ttyp) < 0) {
1045 		SAVE_ERRNO(
1046 			msyslog(LOG_ERR,
1047 				"refclock_setup fd %d tcgetattr: %m",
1048 				fd);
1049 		)
1050 		return FALSE;
1051 	}
1052 
1053 	/*
1054 	 * Set canonical mode and local connection; set specified speed,
1055 	 * 8 bits and no parity; map CR to NL; ignore break.
1056 	 */
1057 	if (speed) {
1058 		u_int	ltemp = 0;
1059 
1060 		ttyp->c_iflag = IGNBRK | IGNPAR | ICRNL;
1061 		ttyp->c_oflag = 0;
1062 		ttyp->c_cflag = CS8 | CLOCAL | CREAD;
1063 		if (lflags & LDISC_7O1) {
1064 			/* HP Z3801A needs 7-bit, odd parity */
1065 			ttyp->c_cflag = CS7 | PARENB | PARODD | CLOCAL | CREAD;
1066 		}
1067 		cfsetispeed(&ttyb, speed);
1068 		cfsetospeed(&ttyb, speed);
1069 		for (i = 0; i < NCCS; ++i)
1070 			ttyp->c_cc[i] = '\0';
1071 
1072 #if defined(TIOCMGET) && !defined(SCO5_CLOCK)
1073 
1074 		/*
1075 		 * If we have modem control, check to see if modem leads
1076 		 * are active; if so, set remote connection. This is
1077 		 * necessary for the kernel pps mods to work.
1078 		 */
1079 		if (ioctl(fd, TIOCMGET, (char *)&ltemp) < 0)
1080 			msyslog(LOG_ERR,
1081 			    "refclock_setup fd %d TIOCMGET: %m", fd);
1082 #ifdef DEBUG
1083 		if (debug)
1084 			printf("refclock_setup fd %d modem status: 0x%x\n",
1085 			    fd, ltemp);
1086 #endif
1087 		if (ltemp & TIOCM_DSR && lflags & LDISC_REMOTE)
1088 			ttyp->c_cflag &= ~CLOCAL;
1089 #endif /* TIOCMGET */
1090 	}
1091 
1092 	/*
1093 	 * Set raw and echo modes. These can be changed on-fly.
1094 	 */
1095 	ttyp->c_lflag = ICANON;
1096 	if (lflags & LDISC_RAW) {
1097 		ttyp->c_lflag = 0;
1098 		ttyp->c_iflag = 0;
1099 		ttyp->c_cc[VMIN] = 1;
1100 	}
1101 	if (lflags & LDISC_ECHO)
1102 		ttyp->c_lflag |= ECHO;
1103 	if (tcsetattr(fd, TCSANOW, ttyp) < 0) {
1104 		SAVE_ERRNO(
1105 			msyslog(LOG_ERR,
1106 				"refclock_setup fd %d TCSANOW: %m",
1107 				fd);
1108 		)
1109 		return FALSE;
1110 	}
1111 
1112 	/*
1113 	 * flush input and output buffers to discard any outdated stuff
1114 	 * that might have become toxic for the driver. Failing to do so
1115 	 * is logged, but we keep our fingers crossed otherwise.
1116 	 */
1117 	if (tcflush(fd, TCIOFLUSH) < 0)
1118 		msyslog(LOG_ERR, "refclock_setup fd %d tcflush(): %m",
1119 			fd);
1120 #endif /* HAVE_TERMIOS */
1121 
1122 #ifdef HAVE_SYSV_TTYS
1123 
1124 	/*
1125 	 * System V serial line parameters (termio interface)
1126 	 *
1127 	 */
1128 	if (ioctl(fd, TCGETA, ttyp) < 0) {
1129 		SAVE_ERRNO(
1130 			msyslog(LOG_ERR,
1131 				"refclock_setup fd %d TCGETA: %m",
1132 				fd);
1133 		)
1134 		return FALSE;
1135 	}
1136 
1137 	/*
1138 	 * Set canonical mode and local connection; set specified speed,
1139 	 * 8 bits and no parity; map CR to NL; ignore break.
1140 	 */
1141 	if (speed) {
1142 		u_int	ltemp = 0;
1143 
1144 		ttyp->c_iflag = IGNBRK | IGNPAR | ICRNL;
1145 		ttyp->c_oflag = 0;
1146 		ttyp->c_cflag = speed | CS8 | CLOCAL | CREAD;
1147 		for (i = 0; i < NCCS; ++i)
1148 			ttyp->c_cc[i] = '\0';
1149 
1150 #if defined(TIOCMGET) && !defined(SCO5_CLOCK)
1151 
1152 		/*
1153 		 * If we have modem control, check to see if modem leads
1154 		 * are active; if so, set remote connection. This is
1155 		 * necessary for the kernel pps mods to work.
1156 		 */
1157 		if (ioctl(fd, TIOCMGET, (char *)&ltemp) < 0)
1158 			msyslog(LOG_ERR,
1159 			    "refclock_setup fd %d TIOCMGET: %m", fd);
1160 #ifdef DEBUG
1161 		if (debug)
1162 			printf("refclock_setup fd %d modem status: %x\n",
1163 			    fd, ltemp);
1164 #endif
1165 		if (ltemp & TIOCM_DSR)
1166 			ttyp->c_cflag &= ~CLOCAL;
1167 #endif /* TIOCMGET */
1168 	}
1169 
1170 	/*
1171 	 * Set raw and echo modes. These can be changed on-fly.
1172 	 */
1173 	ttyp->c_lflag = ICANON;
1174 	if (lflags & LDISC_RAW) {
1175 		ttyp->c_lflag = 0;
1176 		ttyp->c_iflag = 0;
1177 		ttyp->c_cc[VMIN] = 1;
1178 	}
1179 	if (ioctl(fd, TCSETA, ttyp) < 0) {
1180 		SAVE_ERRNO(
1181 			msyslog(LOG_ERR,
1182 				"refclock_setup fd %d TCSETA: %m", fd);
1183 		)
1184 		return FALSE;
1185 	}
1186 #endif /* HAVE_SYSV_TTYS */
1187 
1188 #ifdef HAVE_BSD_TTYS
1189 
1190 	/*
1191 	 * 4.3bsd serial line parameters (sgttyb interface)
1192 	 */
1193 	if (ioctl(fd, TIOCGETP, (char *)ttyp) < 0) {
1194 		SAVE_ERRNO(
1195 			msyslog(LOG_ERR,
1196 				"refclock_setup fd %d TIOCGETP: %m",
1197 				fd);
1198 		)
1199 		return FALSE;
1200 	}
1201 	if (speed)
1202 		ttyp->sg_ispeed = ttyp->sg_ospeed = speed;
1203 	ttyp->sg_flags = EVENP | ODDP | CRMOD;
1204 	if (ioctl(fd, TIOCSETP, (char *)ttyp) < 0) {
1205 		SAVE_ERRNO(
1206 			msyslog(LOG_ERR, "refclock_setup TIOCSETP: %m");
1207 		)
1208 		return FALSE;
1209 	}
1210 #endif /* HAVE_BSD_TTYS */
1211 	return(1);
1212 }
1213 #endif /* HAVE_TERMIOS || HAVE_SYSV_TTYS || HAVE_BSD_TTYS */
1214 
1215 
1216 /*
1217  * refclock_ioctl - set serial port control functions
1218  *
1219  * This routine attempts to hide the internal, system-specific details
1220  * of serial ports. It can handle POSIX (termios), SYSV (termio) and BSD
1221  * (sgtty) interfaces with varying degrees of success. The routine sets
1222  * up optional features such as tty_clk. The routine returns TRUE if
1223  * successful.
1224  */
1225 int
1226 refclock_ioctl(
1227 	int	fd, 		/* file descriptor */
1228 	u_int	lflags		/* line discipline flags */
1229 	)
1230 {
1231 	/*
1232 	 * simply return TRUE if no UNIX line discipline is supported
1233 	 */
1234 	DPRINTF(1, ("refclock_ioctl: fd %d flags 0x%x\n", fd, lflags));
1235 
1236 	return TRUE;
1237 }
1238 #endif /* !defined(SYS_VXWORKS) && !defined(SYS_WINNT) */
1239 
1240 
1241 /*
1242  * refclock_control - set and/or return clock values
1243  *
1244  * This routine is used mainly for debugging. It returns designated
1245  * values from the interface structure that can be displayed using
1246  * ntpdc and the clockstat command. It can also be used to initialize
1247  * configuration variables, such as fudgetimes, fudgevalues, reference
1248  * ID and stratum.
1249  */
1250 void
1251 refclock_control(
1252 	sockaddr_u *srcadr,
1253 	const struct refclockstat *in,
1254 	struct refclockstat *out
1255 	)
1256 {
1257 	struct peer *peer;
1258 	struct refclockproc *pp;
1259 	u_char clktype;
1260 	int unit;
1261 
1262 	/*
1263 	 * Check for valid address and running peer
1264 	 */
1265 	if (!ISREFCLOCKADR(srcadr))
1266 		return;
1267 
1268 	clktype = (u_char)REFCLOCKTYPE(srcadr);
1269 	unit = REFCLOCKUNIT(srcadr);
1270 
1271 	peer = findexistingpeer(srcadr, NULL, NULL, -1, 0, NULL);
1272 
1273 	if (NULL == peer)
1274 		return;
1275 
1276 	INSIST(peer->procptr != NULL);
1277 	pp = peer->procptr;
1278 
1279 	/*
1280 	 * Initialize requested data
1281 	 */
1282 	if (in != NULL) {
1283 		if (in->haveflags & CLK_HAVETIME1)
1284 			pp->fudgetime1 = in->fudgetime1;
1285 		if (in->haveflags & CLK_HAVETIME2)
1286 			pp->fudgetime2 = in->fudgetime2;
1287 		if (in->haveflags & CLK_HAVEVAL1)
1288 			peer->stratum = pp->stratum = (u_char)in->fudgeval1;
1289 		if (in->haveflags & CLK_HAVEVAL2)
1290 			peer->refid = pp->refid = in->fudgeval2;
1291 		if (in->haveflags & CLK_HAVEFLAG1) {
1292 			pp->sloppyclockflag &= ~CLK_FLAG1;
1293 			pp->sloppyclockflag |= in->flags & CLK_FLAG1;
1294 		}
1295 		if (in->haveflags & CLK_HAVEFLAG2) {
1296 			pp->sloppyclockflag &= ~CLK_FLAG2;
1297 			pp->sloppyclockflag |= in->flags & CLK_FLAG2;
1298 		}
1299 		if (in->haveflags & CLK_HAVEFLAG3) {
1300 			pp->sloppyclockflag &= ~CLK_FLAG3;
1301 			pp->sloppyclockflag |= in->flags & CLK_FLAG3;
1302 		}
1303 		if (in->haveflags & CLK_HAVEFLAG4) {
1304 			pp->sloppyclockflag &= ~CLK_FLAG4;
1305 			pp->sloppyclockflag |= in->flags & CLK_FLAG4;
1306 		}
1307 		if (in->haveflags & CLK_HAVEMINJIT)
1308 			pp->fudgeminjitter = in->fudgeminjitter;
1309 	}
1310 
1311 	/*
1312 	 * Readback requested data
1313 	 */
1314 	if (out != NULL) {
1315 		out->fudgeval1 = pp->stratum;
1316 		out->fudgeval2 = pp->refid;
1317 		out->haveflags = CLK_HAVEVAL1 | CLK_HAVEVAL2;
1318 		out->fudgetime1 = pp->fudgetime1;
1319 		if (0.0 != out->fudgetime1)
1320 			out->haveflags |= CLK_HAVETIME1;
1321 		out->fudgetime2 = pp->fudgetime2;
1322 		if (0.0 != out->fudgetime2)
1323 			out->haveflags |= CLK_HAVETIME2;
1324 		out->flags = (u_char) pp->sloppyclockflag;
1325 		if (CLK_FLAG1 & out->flags)
1326 			out->haveflags |= CLK_HAVEFLAG1;
1327 		if (CLK_FLAG2 & out->flags)
1328 			out->haveflags |= CLK_HAVEFLAG2;
1329 		if (CLK_FLAG3 & out->flags)
1330 			out->haveflags |= CLK_HAVEFLAG3;
1331 		if (CLK_FLAG4 & out->flags)
1332 			out->haveflags |= CLK_HAVEFLAG4;
1333 		out->fudgeminjitter = pp->fudgeminjitter;
1334 		if (0.0 != out->fudgeminjitter)
1335 			out->haveflags |= CLK_HAVEMINJIT;
1336 
1337 		out->timereset = current_time - pp->timestarted;
1338 		out->polls = pp->polls;
1339 		out->noresponse = pp->noreply;
1340 		out->badformat = pp->badformat;
1341 		out->baddata = pp->baddata;
1342 
1343 		out->lastevent = pp->lastevent;
1344 		out->currentstatus = pp->currentstatus;
1345 		out->type = pp->type;
1346 		out->clockdesc = pp->clockdesc;
1347 		out->lencode = (u_short)pp->lencode;
1348 		out->p_lastcode = pp->a_lastcode;
1349 	}
1350 
1351 	/*
1352 	 * Give the stuff to the clock
1353 	 */
1354 	if (refclock_conf[clktype]->clock_control != noentry)
1355 		(refclock_conf[clktype]->clock_control)(unit, in, out, peer);
1356 }
1357 
1358 
1359 /*
1360  * refclock_buginfo - return debugging info
1361  *
1362  * This routine is used mainly for debugging. It returns designated
1363  * values from the interface structure that can be displayed using
1364  * ntpdc and the clkbug command.
1365  */
1366 void
1367 refclock_buginfo(
1368 	sockaddr_u *srcadr,	/* clock address */
1369 	struct refclockbug *bug /* output structure */
1370 	)
1371 {
1372 	struct peer *peer;
1373 	struct refclockproc *pp;
1374 	int clktype;
1375 	int unit;
1376 	unsigned u;
1377 
1378 	/*
1379 	 * Check for valid address and peer structure
1380 	 */
1381 	if (!ISREFCLOCKADR(srcadr))
1382 		return;
1383 
1384 	clktype = (u_char) REFCLOCKTYPE(srcadr);
1385 	unit = REFCLOCKUNIT(srcadr);
1386 
1387 	peer = findexistingpeer(srcadr, NULL, NULL, -1, 0, NULL);
1388 
1389 	if (NULL == peer || NULL == peer->procptr)
1390 		return;
1391 
1392 	pp = peer->procptr;
1393 
1394 	/*
1395 	 * Copy structure values
1396 	 */
1397 	bug->nvalues = 8;
1398 	bug->svalues = 0x0000003f;
1399 	bug->values[0] = pp->year;
1400 	bug->values[1] = pp->day;
1401 	bug->values[2] = pp->hour;
1402 	bug->values[3] = pp->minute;
1403 	bug->values[4] = pp->second;
1404 	bug->values[5] = pp->nsec;
1405 	bug->values[6] = pp->yearstart;
1406 	bug->values[7] = pp->coderecv;
1407 	bug->stimes = 0xfffffffc;
1408 	bug->times[0] = pp->lastref;
1409 	bug->times[1] = pp->lastrec;
1410 	for (u = 2; u < bug->ntimes; u++)
1411 		DTOLFP(pp->filter[u - 2], &bug->times[u]);
1412 
1413 	/*
1414 	 * Give the stuff to the clock
1415 	 */
1416 	if (refclock_conf[clktype]->clock_buginfo != noentry)
1417 		(refclock_conf[clktype]->clock_buginfo)(unit, bug, peer);
1418 }
1419 
1420 
1421 #ifdef HAVE_PPSAPI
1422 /*
1423  * refclock_ppsapi - initialize/update ppsapi
1424  *
1425  * This routine is called after the fudge command to open the PPSAPI
1426  * interface for later parameter setting after the fudge command.
1427  */
1428 int
1429 refclock_ppsapi(
1430 	int	fddev,			/* fd device */
1431 	struct refclock_atom *ap	/* atom structure pointer */
1432 	)
1433 {
1434 	if (ap->handle == 0) {
1435 		if (time_pps_create(fddev, &ap->handle) < 0) {
1436 			msyslog(LOG_ERR,
1437 			    "refclock_ppsapi: time_pps_create: %m");
1438 			return (0);
1439 		}
1440 		ZERO(ap->ts); /* [Bug 2689] defined INIT state */
1441 	}
1442 	return (1);
1443 }
1444 
1445 
1446 /*
1447  * refclock_params - set ppsapi parameters
1448  *
1449  * This routine is called to set the PPSAPI parameters after the fudge
1450  * command.
1451  */
1452 int
1453 refclock_params(
1454 	int	mode,			/* mode bits */
1455 	struct refclock_atom *ap	/* atom structure pointer */
1456 	)
1457 {
1458 	ZERO(ap->pps_params);
1459 	ap->pps_params.api_version = PPS_API_VERS_1;
1460 
1461 	/*
1462 	 * Solaris serial ports provide PPS pulse capture only on the
1463 	 * assert edge. FreeBSD serial ports provide capture on the
1464 	 * clear edge, while FreeBSD parallel ports provide capture
1465 	 * on the assert edge. Your mileage may vary.
1466 	 */
1467 	if (mode & CLK_FLAG2)
1468 		ap->pps_params.mode = PPS_TSFMT_TSPEC | PPS_CAPTURECLEAR;
1469 	else
1470 		ap->pps_params.mode = PPS_TSFMT_TSPEC | PPS_CAPTUREASSERT;
1471 	if (time_pps_setparams(ap->handle, &ap->pps_params) < 0) {
1472 		msyslog(LOG_ERR,
1473 		    "refclock_params: time_pps_setparams: %m");
1474 		return (0);
1475 	}
1476 
1477 	/*
1478 	 * If flag3 is lit, select the kernel PPS if we can.
1479 	 *
1480 	 * Note: EOPNOTSUPP is the only 'legal' error code we deal with;
1481 	 * it is part of the 'if we can' strategy.  Any other error
1482 	 * indicates something more sinister and makes this function fail.
1483 	 */
1484 	if (mode & CLK_FLAG3) {
1485 		if (time_pps_kcbind(ap->handle, PPS_KC_HARDPPS,
1486 		    ap->pps_params.mode & ~PPS_TSFMT_TSPEC,
1487 		    PPS_TSFMT_TSPEC) < 0)
1488 		{
1489 			if (errno != EOPNOTSUPP) {
1490 				msyslog(LOG_ERR,
1491 					"refclock_params: time_pps_kcbind: %m");
1492 				return (0);
1493 			}
1494 		} else {
1495 			hardpps_enable = 1;
1496 		}
1497 	}
1498 	return (1);
1499 }
1500 
1501 
1502 /*
1503  * refclock_pps - called once per second
1504  *
1505  * This routine is called once per second. It snatches the PPS
1506  * timestamp from the kernel and saves the sign-extended fraction in
1507  * a circular buffer for processing at the next poll event.
1508  */
1509 int
1510 refclock_pps(
1511 	struct peer *peer,		/* peer structure pointer */
1512 	struct refclock_atom *ap,	/* atom structure pointer */
1513 	int	mode			/* mode bits */
1514 	)
1515 {
1516 	struct refclockproc *pp;
1517 	pps_info_t pps_info;
1518 	struct timespec timeout;
1519 	double	dtemp, dcorr, trash;
1520 
1521 	/*
1522 	 * We require the clock to be synchronized before setting the
1523 	 * parameters. When the parameters have been set, fetch the
1524 	 * most recent PPS timestamp.
1525 	 */
1526 	pp = peer->procptr;
1527 	if (ap->handle == 0)
1528 		return (0);
1529 
1530 	if (ap->pps_params.mode == 0 && sys_leap != LEAP_NOTINSYNC) {
1531 		if (refclock_params(pp->sloppyclockflag, ap) < 1)
1532 			return (0);
1533 	}
1534 	ZERO(timeout);
1535 	ZERO(pps_info);
1536 	if (time_pps_fetch(ap->handle, PPS_TSFMT_TSPEC, &pps_info,
1537 	    &timeout) < 0) {
1538 		refclock_report(peer, CEVNT_FAULT);
1539 		return (0);
1540 	}
1541 	timeout = ap->ts;	/* save old timestamp for check */
1542 	if (ap->pps_params.mode & PPS_CAPTUREASSERT)
1543 		ap->ts = pps_info.assert_timestamp;
1544 	else if (ap->pps_params.mode & PPS_CAPTURECLEAR)
1545 		ap->ts = pps_info.clear_timestamp;
1546 	else
1547 		return (0);
1548 
1549 	/* [Bug 2689] Discard the first sample we read -- if the PPS
1550 	 * source is currently down / disconnected, we have read a
1551 	 * potentially *very* stale value here. So if our old TS value
1552 	 * is all-zero, we consider this sample unrealiable and drop it.
1553 	 *
1554 	 * Note 1: a better check would compare the PPS time stamp to
1555 	 * the current system time and drop it if it's more than say 3s
1556 	 * away.
1557 	 *
1558 	 * Note 2: If we ever again get an all-zero PPS sample, the next
1559 	 * one will be discarded. This can happen every 136yrs and is
1560 	 * unlikely to be ever observed.
1561 	 */
1562 	if (0 == (timeout.tv_sec | timeout.tv_nsec))
1563 		return (0);
1564 
1565 	/* If the PPS source fails to deliver a new sample between
1566 	 * polls, it regurgitates the last sample. We do not want to
1567 	 * process the same sample multiple times.
1568 	 */
1569 	if (0 == memcmp(&timeout, &ap->ts, sizeof(timeout)))
1570 		return (0);
1571 
1572 	/*
1573 	 * Convert to signed fraction offset, apply fudge and properly
1574 	 * fold the correction into the [-0.5s,0.5s] range. Handle
1575 	 * excessive fudge times, too.
1576 	 */
1577 	dtemp = ap->ts.tv_nsec / 1e9;
1578 	dcorr = modf((pp->fudgetime1 - dtemp), &trash);
1579 	if (dcorr > 0.5)
1580 		dcorr -= 1.0;
1581 	else if (dcorr < -0.5)
1582 		dcorr += 1.0;
1583 
1584 	/* phase gate check: avoid wobbling by +/-1s when too close to
1585 	 * the switch-over point. We allow +/-400ms max phase deviation.
1586 	 * The trade-off is clear: The smaller the limit, the less
1587 	 * sensitive to sampling noise the clock becomes. OTOH the
1588 	 * system must get into phase gate range by other means for the
1589 	 * PPS clock to lock in.
1590 	 */
1591 	if (fabs(dcorr) > 0.4)
1592 		return (0);
1593 
1594 	/*
1595 	 * record this time stamp and stuff in median filter
1596 	 */
1597 	pp->lastrec.l_ui = (u_int32)ap->ts.tv_sec + JAN_1970;
1598 	pp->lastrec.l_uf = (u_int32)(dtemp * FRAC);
1599 	clk_add_sample(pp, dcorr);
1600 	refclock_checkburst(peer, pp);
1601 
1602 #ifdef DEBUG
1603 	if (debug > 1)
1604 		printf("refclock_pps: %lu %f %f\n", current_time,
1605 		    dcorr, pp->fudgetime1);
1606 #endif
1607 	return (1);
1608 }
1609 #endif /* HAVE_PPSAPI */
1610 
1611 
1612 /*
1613  * -------------------------------------------------------------------
1614  * refclock_ppsaugment(...) -- correlate with PPS edge
1615  *
1616  * This function is used to correlate a receive time stamp with a PPS
1617  * edge time stamp. It applies the necessary fudges and then tries to
1618  * move the receive time stamp to the corresponding edge. This can warp
1619  * into future, if a transmission delay of more than 500ms is not
1620  * compensated with a corresponding fudge time2 value, because then the
1621  * next PPS edge is nearer than the last. (Similiar to what the PPS ATOM
1622  * driver does, but we deal with full time stamps here, not just phase
1623  * shift information.) Likewise, a negative fudge time2 value must be
1624  * used if the reference time stamp correlates with the *following* PPS
1625  * pulse.
1626  *
1627  * Note that the receive time fudge value only needs to move the receive
1628  * stamp near a PPS edge but that close proximity is not required;
1629  * +/-100ms precision should be enough. But since the fudge value will
1630  * probably also be used to compensate the transmission delay when no
1631  * PPS edge can be related to the time stamp, it's best to get it as
1632  * close as possible.
1633  *
1634  * It should also be noted that the typical use case is matching to the
1635  * preceeding edge, as most units relate their sentences to the current
1636  * second.
1637  *
1638  * The function returns FALSE if there is no correlation possible, TRUE
1639  * otherwise.  Reason for failures are:
1640  *
1641  *  - no PPS/ATOM unit given
1642  *  - PPS stamp is stale (that is, the difference between the PPS stamp
1643  *    and the corrected time stamp would exceed two seconds)
1644  *  - The phase difference is too close to 0.5, and the decision wether
1645  *    to move up or down is too sensitive to noise.
1646  *
1647  * On output, the receive time stamp is updated with the 'fixed' receive
1648  * time.
1649  * -------------------------------------------------------------------
1650  */
1651 
1652 int
1653 refclock_ppsaugment(
1654 	const struct refclock_atom * ap	    ,	/* for PPS io	  */
1655 	l_fp 			   * rcvtime ,
1656 	double			     rcvfudge,	/* i/o read fudge */
1657 	double			     ppsfudge	/* pps fudge	  */
1658 	)
1659 {
1660 	l_fp		delta[1];
1661 
1662 #ifdef HAVE_PPSAPI
1663 
1664 	pps_info_t	pps_info;
1665 	struct timespec timeout;
1666 	l_fp		stamp[1];
1667 	uint32_t	phase;
1668 
1669 	static const uint32_t s_plim_hi = UINT32_C(1932735284);
1670 	static const uint32_t s_plim_lo = UINT32_C(2362232013);
1671 
1672 	/* fixup receive time in case we have to bail out early */
1673 	DTOLFP(rcvfudge, delta);
1674 	L_SUB(rcvtime, delta);
1675 
1676 	if (NULL == ap)
1677 		return FALSE;
1678 
1679 	ZERO(timeout);
1680 	ZERO(pps_info);
1681 
1682 	/* fetch PPS stamp from ATOM block */
1683 	if (time_pps_fetch(ap->handle, PPS_TSFMT_TSPEC,
1684 			   &pps_info, &timeout) < 0)
1685 		return FALSE; /* can't get time stamps */
1686 
1687 	/* get last active PPS edge before receive */
1688 	if (ap->pps_params.mode & PPS_CAPTUREASSERT)
1689 		timeout = pps_info.assert_timestamp;
1690 	else if (ap->pps_params.mode & PPS_CAPTURECLEAR)
1691 		timeout = pps_info.clear_timestamp;
1692 	else
1693 		return FALSE; /* WHICH edge, please?!? */
1694 
1695 	/* convert PPS stamp to l_fp and apply fudge */
1696 	*stamp = tspec_stamp_to_lfp(timeout);
1697 	DTOLFP(ppsfudge, delta);
1698 	L_SUB(stamp, delta);
1699 
1700 	/* Get difference between PPS stamp (--> yield) and receive time
1701 	 * (--> base)
1702 	 */
1703 	*delta = *stamp;
1704 	L_SUB(delta, rcvtime);
1705 
1706 	/* check if either the PPS or the STAMP is stale in relation
1707 	 * to each other. Bail if it is so...
1708 	 */
1709 	phase = delta->l_ui;
1710 	if (phase >= 2 && phase < (uint32_t)-2)
1711 		return FALSE; /* PPS is stale, don't use it */
1712 
1713 	/* If the phase is too close to 0.5, the decision whether to
1714 	 * move up or down is becoming noise sensitive. That is, we
1715 	 * might amplify usec noise between samples into seconds with a
1716 	 * simple threshold. This can be solved by a Schmitt Trigger
1717 	 * characteristic, but that would also require additional state
1718 	 * where we could remember previous decisions.  Easier to play
1719 	 * dead duck and wait for the conditions to become clear.
1720 	 */
1721 	phase = delta->l_uf;
1722 	if (phase > s_plim_hi && phase < s_plim_lo)
1723 		return FALSE; /* we're in the noise lock gap */
1724 
1725 	/* sign-extend fraction into seconds */
1726 	delta->l_ui = UINT32_C(0) - ((phase >> 31) & 1);
1727 	/* add it up now */
1728 	L_ADD(rcvtime, delta);
1729 	return TRUE;
1730 
1731 #   else /* have no PPS support at all */
1732 
1733 	/* just fixup receive time and fail */
1734 	UNUSED_ARG(ap);
1735 	UNUSED_ARG(ppsfudge);
1736 
1737 	DTOLFP(rcvfudge, delta);
1738 	L_SUB(rcvtime, delta);
1739 	return FALSE;
1740 
1741 #   endif
1742 }
1743 
1744 /*
1745  * -------------------------------------------------------------------
1746  * check if it makes sense to schedule an 'early' poll to get the clock
1747  * up fast after start or longer signal dropout.
1748  */
1749 static void
1750 refclock_checkburst(
1751 	struct peer *         peer,
1752 	struct refclockproc * pp
1753 	)
1754 {
1755 	uint32_t	limit;	/* when we should poll */
1756 	u_int		needs;	/* needed number of samples */
1757 
1758 	/* Paranoia: stop here if peer and clockproc don't match up.
1759 	 * And when a poll is actually pending, we don't have to do
1760 	 * anything, either. Likewise if the reach mask is full, of
1761 	 * course, and if the filter has stabilized.
1762 	 */
1763 	if (pp->inpoll || (peer->procptr != pp) ||
1764 	    ((peer->reach == 0xFF) && (peer->disp <= MAXDISTANCE)))
1765 		return;
1766 
1767 	/* If the next poll is soon enough, bail out, too: */
1768 	limit = current_time + 1;
1769 	if (peer->nextdate <= limit)
1770 		return;
1771 
1772 	/* Derive the number of samples needed from the popcount of the
1773 	 * reach mask.  With less samples available, we break away.
1774 	 */
1775 	needs  = peer->reach;
1776 	needs -= (needs >> 1) & 0x55;
1777 	needs  = (needs & 0x33) + ((needs >> 2) & 0x33);
1778 	needs  = (needs + (needs >> 4)) & 0x0F;
1779 	if (needs > 6)
1780 		needs = 6;
1781 	else if (needs < 3)
1782 		needs = 3;
1783 	if (clk_cnt_sample(pp) < needs)
1784 		return;
1785 
1786 	/* Get serious. Reduce the poll to minimum and schedule early.
1787 	 * (Changing the peer poll is probably in vain, as it will be
1788 	 * re-adjusted, but maybe some time the hint will work...)
1789 	 */
1790 	peer->hpoll = peer->minpoll;
1791 	peer->nextdate = limit;
1792 }
1793 
1794 /*
1795  * -------------------------------------------------------------------
1796  * Save the last timecode string, making sure it's properly truncated
1797  * if necessary and NUL terminated in any case.
1798  */
1799 void
1800 refclock_save_lcode(
1801 	struct refclockproc *	pp,
1802 	char const *		tc,
1803 	size_t			len
1804 	)
1805 {
1806 	if (len == (size_t)-1)
1807 		len = strnlen(tc,  sizeof(pp->a_lastcode) - 1);
1808 	else if (len >= sizeof(pp->a_lastcode))
1809 		len = sizeof(pp->a_lastcode) - 1;
1810 
1811 	pp->lencode = (u_short)len;
1812 	memcpy(pp->a_lastcode, tc, len);
1813 	pp->a_lastcode[len] = '\0';
1814 }
1815 
1816 /* format data into a_lastcode */
1817 void
1818 refclock_vformat_lcode(
1819 	struct refclockproc *	pp,
1820 	char const *		fmt,
1821 	va_list			va
1822 	)
1823 {
1824 	long len;
1825 
1826 	len = vsnprintf(pp->a_lastcode, sizeof(pp->a_lastcode), fmt, va);
1827 	if (len <= 0) {
1828 		len = 0;
1829 	} else if (len >= sizeof(pp->a_lastcode)) {
1830 		len = sizeof(pp->a_lastcode) - 1;
1831 	}
1832 
1833 	pp->lencode = (u_short)len;
1834 	pp->a_lastcode[len] = '\0';
1835 	/* !note! the NUL byte is needed in case vsnprintf() really fails */
1836 }
1837 
1838 void
1839 refclock_format_lcode(
1840 	struct refclockproc *	pp,
1841 	char const *		fmt,
1842 	...
1843 	)
1844 {
1845 	va_list va;
1846 
1847 	va_start(va, fmt);
1848 	refclock_vformat_lcode(pp, fmt, va);
1849 	va_end(va);
1850 }
1851 
1852 #endif	/* REFCLOCK */
1853