xref: /linux/arch/s390/kernel/time.c (revision 0f69403d2535ffc7200a8414cf3ca66a49b0d741)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    Time of day based timer functions.
4  *
5  *  S390 version
6  *    Copyright IBM Corp. 1999, 2008
7  *    Author(s): Hartmut Penner (hp@de.ibm.com),
8  *               Martin Schwidefsky (schwidefsky@de.ibm.com),
9  *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
10  *
11  *  Derived from "arch/i386/kernel/time.c"
12  *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
13  */
14 
15 #define KMSG_COMPONENT "time"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 
18 #include <linux/kernel_stat.h>
19 #include <linux/errno.h>
20 #include <linux/export.h>
21 #include <linux/sched.h>
22 #include <linux/sched/clock.h>
23 #include <linux/kernel.h>
24 #include <linux/param.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/interrupt.h>
28 #include <linux/cpu.h>
29 #include <linux/stop_machine.h>
30 #include <linux/time.h>
31 #include <linux/device.h>
32 #include <linux/delay.h>
33 #include <linux/init.h>
34 #include <linux/smp.h>
35 #include <linux/types.h>
36 #include <linux/profile.h>
37 #include <linux/timex.h>
38 #include <linux/notifier.h>
39 #include <linux/timekeeper_internal.h>
40 #include <linux/clockchips.h>
41 #include <linux/gfp.h>
42 #include <linux/kprobes.h>
43 #include <linux/uaccess.h>
44 #include <asm/facility.h>
45 #include <asm/delay.h>
46 #include <asm/div64.h>
47 #include <asm/vdso.h>
48 #include <asm/irq.h>
49 #include <asm/irq_regs.h>
50 #include <asm/vtimer.h>
51 #include <asm/stp.h>
52 #include <asm/cio.h>
53 #include "entry.h"
54 
55 unsigned char tod_clock_base[16] __aligned(8) = {
56 	/* Force to data section. */
57 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
58 	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
59 };
60 EXPORT_SYMBOL_GPL(tod_clock_base);
61 
62 u64 clock_comparator_max = -1ULL;
63 EXPORT_SYMBOL_GPL(clock_comparator_max);
64 
65 static DEFINE_PER_CPU(struct clock_event_device, comparators);
66 
67 ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
68 EXPORT_SYMBOL(s390_epoch_delta_notifier);
69 
70 unsigned char ptff_function_mask[16];
71 
72 static unsigned long long lpar_offset;
73 static unsigned long long initial_leap_seconds;
74 static unsigned long long tod_steering_end;
75 static long long tod_steering_delta;
76 
77 /*
78  * Get time offsets with PTFF
79  */
80 void __init time_early_init(void)
81 {
82 	struct ptff_qto qto;
83 	struct ptff_qui qui;
84 
85 	/* Initialize TOD steering parameters */
86 	tod_steering_end = *(unsigned long long *) &tod_clock_base[1];
87 	vdso_data->ts_end = tod_steering_end;
88 
89 	if (!test_facility(28))
90 		return;
91 
92 	ptff(&ptff_function_mask, sizeof(ptff_function_mask), PTFF_QAF);
93 
94 	/* get LPAR offset */
95 	if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
96 		lpar_offset = qto.tod_epoch_difference;
97 
98 	/* get initial leap seconds */
99 	if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
100 		initial_leap_seconds = (unsigned long long)
101 			((long) qui.old_leap * 4096000000L);
102 }
103 
104 /*
105  * Scheduler clock - returns current time in nanosec units.
106  */
107 unsigned long long notrace sched_clock(void)
108 {
109 	return tod_to_ns(get_tod_clock_monotonic());
110 }
111 NOKPROBE_SYMBOL(sched_clock);
112 
113 static void ext_to_timespec64(unsigned char *clk, struct timespec64 *xt)
114 {
115 	unsigned long long high, low, rem, sec, nsec;
116 
117 	/* Split extendnd TOD clock to micro-seconds and sub-micro-seconds */
118 	high = (*(unsigned long long *) clk) >> 4;
119 	low = (*(unsigned long long *)&clk[7]) << 4;
120 	/* Calculate seconds and nano-seconds */
121 	sec = high;
122 	rem = do_div(sec, 1000000);
123 	nsec = (((low >> 32) + (rem << 32)) * 1000) >> 32;
124 
125 	xt->tv_sec = sec;
126 	xt->tv_nsec = nsec;
127 }
128 
129 void clock_comparator_work(void)
130 {
131 	struct clock_event_device *cd;
132 
133 	S390_lowcore.clock_comparator = clock_comparator_max;
134 	cd = this_cpu_ptr(&comparators);
135 	cd->event_handler(cd);
136 }
137 
138 static int s390_next_event(unsigned long delta,
139 			   struct clock_event_device *evt)
140 {
141 	S390_lowcore.clock_comparator = get_tod_clock() + delta;
142 	set_clock_comparator(S390_lowcore.clock_comparator);
143 	return 0;
144 }
145 
146 /*
147  * Set up lowcore and control register of the current cpu to
148  * enable TOD clock and clock comparator interrupts.
149  */
150 void init_cpu_timer(void)
151 {
152 	struct clock_event_device *cd;
153 	int cpu;
154 
155 	S390_lowcore.clock_comparator = clock_comparator_max;
156 	set_clock_comparator(S390_lowcore.clock_comparator);
157 
158 	cpu = smp_processor_id();
159 	cd = &per_cpu(comparators, cpu);
160 	cd->name		= "comparator";
161 	cd->features		= CLOCK_EVT_FEAT_ONESHOT;
162 	cd->mult		= 16777;
163 	cd->shift		= 12;
164 	cd->min_delta_ns	= 1;
165 	cd->min_delta_ticks	= 1;
166 	cd->max_delta_ns	= LONG_MAX;
167 	cd->max_delta_ticks	= ULONG_MAX;
168 	cd->rating		= 400;
169 	cd->cpumask		= cpumask_of(cpu);
170 	cd->set_next_event	= s390_next_event;
171 
172 	clockevents_register_device(cd);
173 
174 	/* Enable clock comparator timer interrupt. */
175 	__ctl_set_bit(0,11);
176 
177 	/* Always allow the timing alert external interrupt. */
178 	__ctl_set_bit(0, 4);
179 }
180 
181 static void clock_comparator_interrupt(struct ext_code ext_code,
182 				       unsigned int param32,
183 				       unsigned long param64)
184 {
185 	inc_irq_stat(IRQEXT_CLK);
186 	if (S390_lowcore.clock_comparator == clock_comparator_max)
187 		set_clock_comparator(S390_lowcore.clock_comparator);
188 }
189 
190 static void stp_timing_alert(struct stp_irq_parm *);
191 
192 static void timing_alert_interrupt(struct ext_code ext_code,
193 				   unsigned int param32, unsigned long param64)
194 {
195 	inc_irq_stat(IRQEXT_TLA);
196 	if (param32 & 0x00038000)
197 		stp_timing_alert((struct stp_irq_parm *) &param32);
198 }
199 
200 static void stp_reset(void);
201 
202 void read_persistent_clock64(struct timespec64 *ts)
203 {
204 	unsigned char clk[STORE_CLOCK_EXT_SIZE];
205 	__u64 delta;
206 
207 	delta = initial_leap_seconds + TOD_UNIX_EPOCH;
208 	get_tod_clock_ext(clk);
209 	*(__u64 *) &clk[1] -= delta;
210 	if (*(__u64 *) &clk[1] > delta)
211 		clk[0]--;
212 	ext_to_timespec64(clk, ts);
213 }
214 
215 void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
216 						 struct timespec64 *boot_offset)
217 {
218 	unsigned char clk[STORE_CLOCK_EXT_SIZE];
219 	struct timespec64 boot_time;
220 	__u64 delta;
221 
222 	delta = initial_leap_seconds + TOD_UNIX_EPOCH;
223 	memcpy(clk, tod_clock_base, STORE_CLOCK_EXT_SIZE);
224 	*(__u64 *)&clk[1] -= delta;
225 	if (*(__u64 *)&clk[1] > delta)
226 		clk[0]--;
227 	ext_to_timespec64(clk, &boot_time);
228 
229 	read_persistent_clock64(wall_time);
230 	*boot_offset = timespec64_sub(*wall_time, boot_time);
231 }
232 
233 static u64 read_tod_clock(struct clocksource *cs)
234 {
235 	unsigned long long now, adj;
236 
237 	preempt_disable(); /* protect from changes to steering parameters */
238 	now = get_tod_clock();
239 	adj = tod_steering_end - now;
240 	if (unlikely((s64) adj >= 0))
241 		/*
242 		 * manually steer by 1 cycle every 2^16 cycles. This
243 		 * corresponds to shifting the tod delta by 15. 1s is
244 		 * therefore steered in ~9h. The adjust will decrease
245 		 * over time, until it finally reaches 0.
246 		 */
247 		now += (tod_steering_delta < 0) ? (adj >> 15) : -(adj >> 15);
248 	preempt_enable();
249 	return now;
250 }
251 
252 static struct clocksource clocksource_tod = {
253 	.name		= "tod",
254 	.rating		= 400,
255 	.read		= read_tod_clock,
256 	.mask		= -1ULL,
257 	.mult		= 1000,
258 	.shift		= 12,
259 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
260 };
261 
262 struct clocksource * __init clocksource_default_clock(void)
263 {
264 	return &clocksource_tod;
265 }
266 
267 void update_vsyscall(struct timekeeper *tk)
268 {
269 	u64 nsecps;
270 
271 	if (tk->tkr_mono.clock != &clocksource_tod)
272 		return;
273 
274 	/* Make userspace gettimeofday spin until we're done. */
275 	++vdso_data->tb_update_count;
276 	smp_wmb();
277 	vdso_data->xtime_tod_stamp = tk->tkr_mono.cycle_last;
278 	vdso_data->xtime_clock_sec = tk->xtime_sec;
279 	vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
280 	vdso_data->wtom_clock_sec =
281 		tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
282 	vdso_data->wtom_clock_nsec = tk->tkr_mono.xtime_nsec +
283 		+ ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
284 	nsecps = (u64) NSEC_PER_SEC << tk->tkr_mono.shift;
285 	while (vdso_data->wtom_clock_nsec >= nsecps) {
286 		vdso_data->wtom_clock_nsec -= nsecps;
287 		vdso_data->wtom_clock_sec++;
288 	}
289 
290 	vdso_data->xtime_coarse_sec = tk->xtime_sec;
291 	vdso_data->xtime_coarse_nsec =
292 		(long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
293 	vdso_data->wtom_coarse_sec =
294 		vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec;
295 	vdso_data->wtom_coarse_nsec =
296 		vdso_data->xtime_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
297 	while (vdso_data->wtom_coarse_nsec >= NSEC_PER_SEC) {
298 		vdso_data->wtom_coarse_nsec -= NSEC_PER_SEC;
299 		vdso_data->wtom_coarse_sec++;
300 	}
301 
302 	vdso_data->tk_mult = tk->tkr_mono.mult;
303 	vdso_data->tk_shift = tk->tkr_mono.shift;
304 	vdso_data->hrtimer_res = hrtimer_resolution;
305 	smp_wmb();
306 	++vdso_data->tb_update_count;
307 }
308 
309 extern struct timezone sys_tz;
310 
311 void update_vsyscall_tz(void)
312 {
313 	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
314 	vdso_data->tz_dsttime = sys_tz.tz_dsttime;
315 }
316 
317 /*
318  * Initialize the TOD clock and the CPU timer of
319  * the boot cpu.
320  */
321 void __init time_init(void)
322 {
323 	/* Reset time synchronization interfaces. */
324 	stp_reset();
325 
326 	/* request the clock comparator external interrupt */
327 	if (register_external_irq(EXT_IRQ_CLK_COMP, clock_comparator_interrupt))
328 		panic("Couldn't request external interrupt 0x1004");
329 
330 	/* request the timing alert external interrupt */
331 	if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
332 		panic("Couldn't request external interrupt 0x1406");
333 
334 	if (__clocksource_register(&clocksource_tod) != 0)
335 		panic("Could not register TOD clock source");
336 
337 	/* Enable TOD clock interrupts on the boot cpu. */
338 	init_cpu_timer();
339 
340 	/* Enable cpu timer interrupts on the boot cpu. */
341 	vtime_init();
342 }
343 
344 static DEFINE_PER_CPU(atomic_t, clock_sync_word);
345 static DEFINE_MUTEX(clock_sync_mutex);
346 static unsigned long clock_sync_flags;
347 
348 #define CLOCK_SYNC_HAS_STP	0
349 #define CLOCK_SYNC_STP		1
350 
351 /*
352  * The get_clock function for the physical clock. It will get the current
353  * TOD clock, subtract the LPAR offset and write the result to *clock.
354  * The function returns 0 if the clock is in sync with the external time
355  * source. If the clock mode is local it will return -EOPNOTSUPP and
356  * -EAGAIN if the clock is not in sync with the external reference.
357  */
358 int get_phys_clock(unsigned long *clock)
359 {
360 	atomic_t *sw_ptr;
361 	unsigned int sw0, sw1;
362 
363 	sw_ptr = &get_cpu_var(clock_sync_word);
364 	sw0 = atomic_read(sw_ptr);
365 	*clock = get_tod_clock() - lpar_offset;
366 	sw1 = atomic_read(sw_ptr);
367 	put_cpu_var(clock_sync_word);
368 	if (sw0 == sw1 && (sw0 & 0x80000000U))
369 		/* Success: time is in sync. */
370 		return 0;
371 	if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
372 		return -EOPNOTSUPP;
373 	if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
374 		return -EACCES;
375 	return -EAGAIN;
376 }
377 EXPORT_SYMBOL(get_phys_clock);
378 
379 /*
380  * Make get_phys_clock() return -EAGAIN.
381  */
382 static void disable_sync_clock(void *dummy)
383 {
384 	atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
385 	/*
386 	 * Clear the in-sync bit 2^31. All get_phys_clock calls will
387 	 * fail until the sync bit is turned back on. In addition
388 	 * increase the "sequence" counter to avoid the race of an
389 	 * stp event and the complete recovery against get_phys_clock.
390 	 */
391 	atomic_andnot(0x80000000, sw_ptr);
392 	atomic_inc(sw_ptr);
393 }
394 
395 /*
396  * Make get_phys_clock() return 0 again.
397  * Needs to be called from a context disabled for preemption.
398  */
399 static void enable_sync_clock(void)
400 {
401 	atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
402 	atomic_or(0x80000000, sw_ptr);
403 }
404 
405 /*
406  * Function to check if the clock is in sync.
407  */
408 static inline int check_sync_clock(void)
409 {
410 	atomic_t *sw_ptr;
411 	int rc;
412 
413 	sw_ptr = &get_cpu_var(clock_sync_word);
414 	rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
415 	put_cpu_var(clock_sync_word);
416 	return rc;
417 }
418 
419 /*
420  * Apply clock delta to the global data structures.
421  * This is called once on the CPU that performed the clock sync.
422  */
423 static void clock_sync_global(unsigned long long delta)
424 {
425 	unsigned long now, adj;
426 	struct ptff_qto qto;
427 
428 	/* Fixup the monotonic sched clock. */
429 	*(unsigned long long *) &tod_clock_base[1] += delta;
430 	if (*(unsigned long long *) &tod_clock_base[1] < delta)
431 		/* Epoch overflow */
432 		tod_clock_base[0]++;
433 	/* Adjust TOD steering parameters. */
434 	vdso_data->tb_update_count++;
435 	now = get_tod_clock();
436 	adj = tod_steering_end - now;
437 	if (unlikely((s64) adj >= 0))
438 		/* Calculate how much of the old adjustment is left. */
439 		tod_steering_delta = (tod_steering_delta < 0) ?
440 			-(adj >> 15) : (adj >> 15);
441 	tod_steering_delta += delta;
442 	if ((abs(tod_steering_delta) >> 48) != 0)
443 		panic("TOD clock sync offset %lli is too large to drift\n",
444 		      tod_steering_delta);
445 	tod_steering_end = now + (abs(tod_steering_delta) << 15);
446 	vdso_data->ts_dir = (tod_steering_delta < 0) ? 0 : 1;
447 	vdso_data->ts_end = tod_steering_end;
448 	vdso_data->tb_update_count++;
449 	/* Update LPAR offset. */
450 	if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
451 		lpar_offset = qto.tod_epoch_difference;
452 	/* Call the TOD clock change notifier. */
453 	atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0, &delta);
454 }
455 
456 /*
457  * Apply clock delta to the per-CPU data structures of this CPU.
458  * This is called for each online CPU after the call to clock_sync_global.
459  */
460 static void clock_sync_local(unsigned long long delta)
461 {
462 	/* Add the delta to the clock comparator. */
463 	if (S390_lowcore.clock_comparator != clock_comparator_max) {
464 		S390_lowcore.clock_comparator += delta;
465 		set_clock_comparator(S390_lowcore.clock_comparator);
466 	}
467 	/* Adjust the last_update_clock time-stamp. */
468 	S390_lowcore.last_update_clock += delta;
469 }
470 
471 /* Single threaded workqueue used for stp sync events */
472 static struct workqueue_struct *time_sync_wq;
473 
474 static void __init time_init_wq(void)
475 {
476 	if (time_sync_wq)
477 		return;
478 	time_sync_wq = create_singlethread_workqueue("timesync");
479 }
480 
481 struct clock_sync_data {
482 	atomic_t cpus;
483 	int in_sync;
484 	unsigned long long clock_delta;
485 };
486 
487 /*
488  * Server Time Protocol (STP) code.
489  */
490 static bool stp_online;
491 static struct stp_sstpi stp_info;
492 static void *stp_page;
493 
494 static void stp_work_fn(struct work_struct *work);
495 static DEFINE_MUTEX(stp_work_mutex);
496 static DECLARE_WORK(stp_work, stp_work_fn);
497 static struct timer_list stp_timer;
498 
499 static int __init early_parse_stp(char *p)
500 {
501 	return kstrtobool(p, &stp_online);
502 }
503 early_param("stp", early_parse_stp);
504 
505 /*
506  * Reset STP attachment.
507  */
508 static void __init stp_reset(void)
509 {
510 	int rc;
511 
512 	stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
513 	rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
514 	if (rc == 0)
515 		set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
516 	else if (stp_online) {
517 		pr_warn("The real or virtual hardware system does not provide an STP interface\n");
518 		free_page((unsigned long) stp_page);
519 		stp_page = NULL;
520 		stp_online = false;
521 	}
522 }
523 
524 static void stp_timeout(struct timer_list *unused)
525 {
526 	queue_work(time_sync_wq, &stp_work);
527 }
528 
529 static int __init stp_init(void)
530 {
531 	if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
532 		return 0;
533 	timer_setup(&stp_timer, stp_timeout, 0);
534 	time_init_wq();
535 	if (!stp_online)
536 		return 0;
537 	queue_work(time_sync_wq, &stp_work);
538 	return 0;
539 }
540 
541 arch_initcall(stp_init);
542 
543 /*
544  * STP timing alert. There are three causes:
545  * 1) timing status change
546  * 2) link availability change
547  * 3) time control parameter change
548  * In all three cases we are only interested in the clock source state.
549  * If a STP clock source is now available use it.
550  */
551 static void stp_timing_alert(struct stp_irq_parm *intparm)
552 {
553 	if (intparm->tsc || intparm->lac || intparm->tcpc)
554 		queue_work(time_sync_wq, &stp_work);
555 }
556 
557 /*
558  * STP sync check machine check. This is called when the timing state
559  * changes from the synchronized state to the unsynchronized state.
560  * After a STP sync check the clock is not in sync. The machine check
561  * is broadcasted to all cpus at the same time.
562  */
563 int stp_sync_check(void)
564 {
565 	disable_sync_clock(NULL);
566 	return 1;
567 }
568 
569 /*
570  * STP island condition machine check. This is called when an attached
571  * server  attempts to communicate over an STP link and the servers
572  * have matching CTN ids and have a valid stratum-1 configuration
573  * but the configurations do not match.
574  */
575 int stp_island_check(void)
576 {
577 	disable_sync_clock(NULL);
578 	return 1;
579 }
580 
581 void stp_queue_work(void)
582 {
583 	queue_work(time_sync_wq, &stp_work);
584 }
585 
586 static int stp_sync_clock(void *data)
587 {
588 	struct clock_sync_data *sync = data;
589 	unsigned long long clock_delta;
590 	static int first;
591 	int rc;
592 
593 	enable_sync_clock();
594 	if (xchg(&first, 1) == 0) {
595 		/* Wait until all other cpus entered the sync function. */
596 		while (atomic_read(&sync->cpus) != 0)
597 			cpu_relax();
598 		rc = 0;
599 		if (stp_info.todoff[0] || stp_info.todoff[1] ||
600 		    stp_info.todoff[2] || stp_info.todoff[3] ||
601 		    stp_info.tmd != 2) {
602 			rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
603 					&clock_delta);
604 			if (rc == 0) {
605 				sync->clock_delta = clock_delta;
606 				clock_sync_global(clock_delta);
607 				rc = chsc_sstpi(stp_page, &stp_info,
608 						sizeof(struct stp_sstpi));
609 				if (rc == 0 && stp_info.tmd != 2)
610 					rc = -EAGAIN;
611 			}
612 		}
613 		sync->in_sync = rc ? -EAGAIN : 1;
614 		xchg(&first, 0);
615 	} else {
616 		/* Slave */
617 		atomic_dec(&sync->cpus);
618 		/* Wait for in_sync to be set. */
619 		while (READ_ONCE(sync->in_sync) == 0)
620 			__udelay(1);
621 	}
622 	if (sync->in_sync != 1)
623 		/* Didn't work. Clear per-cpu in sync bit again. */
624 		disable_sync_clock(NULL);
625 	/* Apply clock delta to per-CPU fields of this CPU. */
626 	clock_sync_local(sync->clock_delta);
627 
628 	return 0;
629 }
630 
631 /*
632  * STP work. Check for the STP state and take over the clock
633  * synchronization if the STP clock source is usable.
634  */
635 static void stp_work_fn(struct work_struct *work)
636 {
637 	struct clock_sync_data stp_sync;
638 	int rc;
639 
640 	/* prevent multiple execution. */
641 	mutex_lock(&stp_work_mutex);
642 
643 	if (!stp_online) {
644 		chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
645 		del_timer_sync(&stp_timer);
646 		goto out_unlock;
647 	}
648 
649 	rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0, NULL);
650 	if (rc)
651 		goto out_unlock;
652 
653 	rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
654 	if (rc || stp_info.c == 0)
655 		goto out_unlock;
656 
657 	/* Skip synchronization if the clock is already in sync. */
658 	if (check_sync_clock())
659 		goto out_unlock;
660 
661 	memset(&stp_sync, 0, sizeof(stp_sync));
662 	cpus_read_lock();
663 	atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
664 	stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask);
665 	cpus_read_unlock();
666 
667 	if (!check_sync_clock())
668 		/*
669 		 * There is a usable clock but the synchonization failed.
670 		 * Retry after a second.
671 		 */
672 		mod_timer(&stp_timer, jiffies + HZ);
673 
674 out_unlock:
675 	mutex_unlock(&stp_work_mutex);
676 }
677 
678 /*
679  * STP subsys sysfs interface functions
680  */
681 static struct bus_type stp_subsys = {
682 	.name		= "stp",
683 	.dev_name	= "stp",
684 };
685 
686 static ssize_t stp_ctn_id_show(struct device *dev,
687 				struct device_attribute *attr,
688 				char *buf)
689 {
690 	if (!stp_online)
691 		return -ENODATA;
692 	return sprintf(buf, "%016llx\n",
693 		       *(unsigned long long *) stp_info.ctnid);
694 }
695 
696 static DEVICE_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
697 
698 static ssize_t stp_ctn_type_show(struct device *dev,
699 				struct device_attribute *attr,
700 				char *buf)
701 {
702 	if (!stp_online)
703 		return -ENODATA;
704 	return sprintf(buf, "%i\n", stp_info.ctn);
705 }
706 
707 static DEVICE_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
708 
709 static ssize_t stp_dst_offset_show(struct device *dev,
710 				   struct device_attribute *attr,
711 				   char *buf)
712 {
713 	if (!stp_online || !(stp_info.vbits & 0x2000))
714 		return -ENODATA;
715 	return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
716 }
717 
718 static DEVICE_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
719 
720 static ssize_t stp_leap_seconds_show(struct device *dev,
721 					struct device_attribute *attr,
722 					char *buf)
723 {
724 	if (!stp_online || !(stp_info.vbits & 0x8000))
725 		return -ENODATA;
726 	return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
727 }
728 
729 static DEVICE_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
730 
731 static ssize_t stp_stratum_show(struct device *dev,
732 				struct device_attribute *attr,
733 				char *buf)
734 {
735 	if (!stp_online)
736 		return -ENODATA;
737 	return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
738 }
739 
740 static DEVICE_ATTR(stratum, 0400, stp_stratum_show, NULL);
741 
742 static ssize_t stp_time_offset_show(struct device *dev,
743 				struct device_attribute *attr,
744 				char *buf)
745 {
746 	if (!stp_online || !(stp_info.vbits & 0x0800))
747 		return -ENODATA;
748 	return sprintf(buf, "%i\n", (int) stp_info.tto);
749 }
750 
751 static DEVICE_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
752 
753 static ssize_t stp_time_zone_offset_show(struct device *dev,
754 				struct device_attribute *attr,
755 				char *buf)
756 {
757 	if (!stp_online || !(stp_info.vbits & 0x4000))
758 		return -ENODATA;
759 	return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
760 }
761 
762 static DEVICE_ATTR(time_zone_offset, 0400,
763 			 stp_time_zone_offset_show, NULL);
764 
765 static ssize_t stp_timing_mode_show(struct device *dev,
766 				struct device_attribute *attr,
767 				char *buf)
768 {
769 	if (!stp_online)
770 		return -ENODATA;
771 	return sprintf(buf, "%i\n", stp_info.tmd);
772 }
773 
774 static DEVICE_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
775 
776 static ssize_t stp_timing_state_show(struct device *dev,
777 				struct device_attribute *attr,
778 				char *buf)
779 {
780 	if (!stp_online)
781 		return -ENODATA;
782 	return sprintf(buf, "%i\n", stp_info.tst);
783 }
784 
785 static DEVICE_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
786 
787 static ssize_t stp_online_show(struct device *dev,
788 				struct device_attribute *attr,
789 				char *buf)
790 {
791 	return sprintf(buf, "%i\n", stp_online);
792 }
793 
794 static ssize_t stp_online_store(struct device *dev,
795 				struct device_attribute *attr,
796 				const char *buf, size_t count)
797 {
798 	unsigned int value;
799 
800 	value = simple_strtoul(buf, NULL, 0);
801 	if (value != 0 && value != 1)
802 		return -EINVAL;
803 	if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
804 		return -EOPNOTSUPP;
805 	mutex_lock(&clock_sync_mutex);
806 	stp_online = value;
807 	if (stp_online)
808 		set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
809 	else
810 		clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
811 	queue_work(time_sync_wq, &stp_work);
812 	mutex_unlock(&clock_sync_mutex);
813 	return count;
814 }
815 
816 /*
817  * Can't use DEVICE_ATTR because the attribute should be named
818  * stp/online but dev_attr_online already exists in this file ..
819  */
820 static struct device_attribute dev_attr_stp_online = {
821 	.attr = { .name = "online", .mode = 0600 },
822 	.show	= stp_online_show,
823 	.store	= stp_online_store,
824 };
825 
826 static struct device_attribute *stp_attributes[] = {
827 	&dev_attr_ctn_id,
828 	&dev_attr_ctn_type,
829 	&dev_attr_dst_offset,
830 	&dev_attr_leap_seconds,
831 	&dev_attr_stp_online,
832 	&dev_attr_stratum,
833 	&dev_attr_time_offset,
834 	&dev_attr_time_zone_offset,
835 	&dev_attr_timing_mode,
836 	&dev_attr_timing_state,
837 	NULL
838 };
839 
840 static int __init stp_init_sysfs(void)
841 {
842 	struct device_attribute **attr;
843 	int rc;
844 
845 	rc = subsys_system_register(&stp_subsys, NULL);
846 	if (rc)
847 		goto out;
848 	for (attr = stp_attributes; *attr; attr++) {
849 		rc = device_create_file(stp_subsys.dev_root, *attr);
850 		if (rc)
851 			goto out_unreg;
852 	}
853 	return 0;
854 out_unreg:
855 	for (; attr >= stp_attributes; attr--)
856 		device_remove_file(stp_subsys.dev_root, *attr);
857 	bus_unregister(&stp_subsys);
858 out:
859 	return rc;
860 }
861 
862 device_initcall(stp_init_sysfs);
863