xref: /linux/arch/x86/kernel/hpet.c (revision 6feb348783767e3f38d7612e6551ee8b580ac4e9)
1 #include <linux/clocksource.h>
2 #include <linux/clockchips.h>
3 #include <linux/interrupt.h>
4 #include <linux/sysdev.h>
5 #include <linux/delay.h>
6 #include <linux/errno.h>
7 #include <linux/hpet.h>
8 #include <linux/init.h>
9 #include <linux/cpu.h>
10 #include <linux/pm.h>
11 #include <linux/io.h>
12 
13 #include <asm/fixmap.h>
14 #include <asm/i8253.h>
15 #include <asm/hpet.h>
16 
17 #define HPET_MASK			CLOCKSOURCE_MASK(32)
18 #define HPET_SHIFT			22
19 
20 /* FSEC = 10^-15
21    NSEC = 10^-9 */
22 #define FSEC_PER_NSEC			1000000L
23 
24 #define HPET_DEV_USED_BIT		2
25 #define HPET_DEV_USED			(1 << HPET_DEV_USED_BIT)
26 #define HPET_DEV_VALID			0x8
27 #define HPET_DEV_FSB_CAP		0x1000
28 #define HPET_DEV_PERI_CAP		0x2000
29 
30 #define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)
31 
32 /*
33  * HPET address is set in acpi/boot.c, when an ACPI entry exists
34  */
35 unsigned long				hpet_address;
36 unsigned long				hpet_num_timers;
37 static void __iomem			*hpet_virt_address;
38 
39 struct hpet_dev {
40 	struct clock_event_device	evt;
41 	unsigned int			num;
42 	int				cpu;
43 	unsigned int			irq;
44 	unsigned int			flags;
45 	char				name[10];
46 };
47 
48 unsigned long hpet_readl(unsigned long a)
49 {
50 	return readl(hpet_virt_address + a);
51 }
52 
53 static inline void hpet_writel(unsigned long d, unsigned long a)
54 {
55 	writel(d, hpet_virt_address + a);
56 }
57 
58 #ifdef CONFIG_X86_64
59 #include <asm/pgtable.h>
60 #endif
61 
62 static inline void hpet_set_mapping(void)
63 {
64 	hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
65 #ifdef CONFIG_X86_64
66 	__set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
67 #endif
68 }
69 
70 static inline void hpet_clear_mapping(void)
71 {
72 	iounmap(hpet_virt_address);
73 	hpet_virt_address = NULL;
74 }
75 
76 /*
77  * HPET command line enable / disable
78  */
79 static int boot_hpet_disable;
80 int hpet_force_user;
81 
82 static int __init hpet_setup(char *str)
83 {
84 	if (str) {
85 		if (!strncmp("disable", str, 7))
86 			boot_hpet_disable = 1;
87 		if (!strncmp("force", str, 5))
88 			hpet_force_user = 1;
89 	}
90 	return 1;
91 }
92 __setup("hpet=", hpet_setup);
93 
94 static int __init disable_hpet(char *str)
95 {
96 	boot_hpet_disable = 1;
97 	return 1;
98 }
99 __setup("nohpet", disable_hpet);
100 
101 static inline int is_hpet_capable(void)
102 {
103 	return !boot_hpet_disable && hpet_address;
104 }
105 
106 /*
107  * HPET timer interrupt enable / disable
108  */
109 static int hpet_legacy_int_enabled;
110 
111 /**
112  * is_hpet_enabled - check whether the hpet timer interrupt is enabled
113  */
114 int is_hpet_enabled(void)
115 {
116 	return is_hpet_capable() && hpet_legacy_int_enabled;
117 }
118 EXPORT_SYMBOL_GPL(is_hpet_enabled);
119 
120 /*
121  * When the hpet driver (/dev/hpet) is enabled, we need to reserve
122  * timer 0 and timer 1 in case of RTC emulation.
123  */
124 #ifdef CONFIG_HPET
125 
126 static void hpet_reserve_msi_timers(struct hpet_data *hd);
127 
128 static void hpet_reserve_platform_timers(unsigned long id)
129 {
130 	struct hpet __iomem *hpet = hpet_virt_address;
131 	struct hpet_timer __iomem *timer = &hpet->hpet_timers[2];
132 	unsigned int nrtimers, i;
133 	struct hpet_data hd;
134 
135 	nrtimers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
136 
137 	memset(&hd, 0, sizeof(hd));
138 	hd.hd_phys_address	= hpet_address;
139 	hd.hd_address		= hpet;
140 	hd.hd_nirqs		= nrtimers;
141 	hpet_reserve_timer(&hd, 0);
142 
143 #ifdef CONFIG_HPET_EMULATE_RTC
144 	hpet_reserve_timer(&hd, 1);
145 #endif
146 
147 	/*
148 	 * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254
149 	 * is wrong for i8259!) not the output IRQ.  Many BIOS writers
150 	 * don't bother configuring *any* comparator interrupts.
151 	 */
152 	hd.hd_irq[0] = HPET_LEGACY_8254;
153 	hd.hd_irq[1] = HPET_LEGACY_RTC;
154 
155 	for (i = 2; i < nrtimers; timer++, i++) {
156 		hd.hd_irq[i] = (readl(&timer->hpet_config) &
157 			Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
158 	}
159 
160 	hpet_reserve_msi_timers(&hd);
161 
162 	hpet_alloc(&hd);
163 
164 }
165 #else
166 static void hpet_reserve_platform_timers(unsigned long id) { }
167 #endif
168 
169 /*
170  * Common hpet info
171  */
172 static unsigned long hpet_period;
173 
174 static void hpet_legacy_set_mode(enum clock_event_mode mode,
175 			  struct clock_event_device *evt);
176 static int hpet_legacy_next_event(unsigned long delta,
177 			   struct clock_event_device *evt);
178 
179 /*
180  * The hpet clock event device
181  */
182 static struct clock_event_device hpet_clockevent = {
183 	.name		= "hpet",
184 	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
185 	.set_mode	= hpet_legacy_set_mode,
186 	.set_next_event = hpet_legacy_next_event,
187 	.shift		= 32,
188 	.irq		= 0,
189 	.rating		= 50,
190 };
191 
192 static void hpet_start_counter(void)
193 {
194 	unsigned long cfg = hpet_readl(HPET_CFG);
195 
196 	cfg &= ~HPET_CFG_ENABLE;
197 	hpet_writel(cfg, HPET_CFG);
198 	hpet_writel(0, HPET_COUNTER);
199 	hpet_writel(0, HPET_COUNTER + 4);
200 	cfg |= HPET_CFG_ENABLE;
201 	hpet_writel(cfg, HPET_CFG);
202 }
203 
204 static void hpet_resume_device(void)
205 {
206 	force_hpet_resume();
207 }
208 
209 static void hpet_restart_counter(void)
210 {
211 	hpet_resume_device();
212 	hpet_start_counter();
213 }
214 
215 static void hpet_enable_legacy_int(void)
216 {
217 	unsigned long cfg = hpet_readl(HPET_CFG);
218 
219 	cfg |= HPET_CFG_LEGACY;
220 	hpet_writel(cfg, HPET_CFG);
221 	hpet_legacy_int_enabled = 1;
222 }
223 
224 static void hpet_legacy_clockevent_register(void)
225 {
226 	/* Start HPET legacy interrupts */
227 	hpet_enable_legacy_int();
228 
229 	/*
230 	 * The mult factor is defined as (include/linux/clockchips.h)
231 	 *  mult/2^shift = cyc/ns (in contrast to ns/cyc in clocksource.h)
232 	 * hpet_period is in units of femtoseconds (per cycle), so
233 	 *  mult/2^shift = cyc/ns = 10^6/hpet_period
234 	 *  mult = (10^6 * 2^shift)/hpet_period
235 	 *  mult = (FSEC_PER_NSEC << hpet_clockevent.shift)/hpet_period
236 	 */
237 	hpet_clockevent.mult = div_sc((unsigned long) FSEC_PER_NSEC,
238 				      hpet_period, hpet_clockevent.shift);
239 	/* Calculate the min / max delta */
240 	hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
241 							   &hpet_clockevent);
242 	/* 5 usec minimum reprogramming delta. */
243 	hpet_clockevent.min_delta_ns = 5000;
244 
245 	/*
246 	 * Start hpet with the boot cpu mask and make it
247 	 * global after the IO_APIC has been initialized.
248 	 */
249 	hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id());
250 	clockevents_register_device(&hpet_clockevent);
251 	global_clock_event = &hpet_clockevent;
252 	printk(KERN_DEBUG "hpet clockevent registered\n");
253 }
254 
255 static int hpet_setup_msi_irq(unsigned int irq);
256 
257 static void hpet_set_mode(enum clock_event_mode mode,
258 			  struct clock_event_device *evt, int timer)
259 {
260 	unsigned long cfg, cmp, now;
261 	uint64_t delta;
262 
263 	switch (mode) {
264 	case CLOCK_EVT_MODE_PERIODIC:
265 		delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
266 		delta >>= evt->shift;
267 		now = hpet_readl(HPET_COUNTER);
268 		cmp = now + (unsigned long) delta;
269 		cfg = hpet_readl(HPET_Tn_CFG(timer));
270 		cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
271 		       HPET_TN_SETVAL | HPET_TN_32BIT;
272 		hpet_writel(cfg, HPET_Tn_CFG(timer));
273 		/*
274 		 * The first write after writing TN_SETVAL to the
275 		 * config register sets the counter value, the second
276 		 * write sets the period.
277 		 */
278 		hpet_writel(cmp, HPET_Tn_CMP(timer));
279 		udelay(1);
280 		hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
281 		break;
282 
283 	case CLOCK_EVT_MODE_ONESHOT:
284 		cfg = hpet_readl(HPET_Tn_CFG(timer));
285 		cfg &= ~HPET_TN_PERIODIC;
286 		cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
287 		hpet_writel(cfg, HPET_Tn_CFG(timer));
288 		break;
289 
290 	case CLOCK_EVT_MODE_UNUSED:
291 	case CLOCK_EVT_MODE_SHUTDOWN:
292 		cfg = hpet_readl(HPET_Tn_CFG(timer));
293 		cfg &= ~HPET_TN_ENABLE;
294 		hpet_writel(cfg, HPET_Tn_CFG(timer));
295 		break;
296 
297 	case CLOCK_EVT_MODE_RESUME:
298 		if (timer == 0) {
299 			hpet_enable_legacy_int();
300 		} else {
301 			struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
302 			hpet_setup_msi_irq(hdev->irq);
303 			disable_irq(hdev->irq);
304 			irq_set_affinity(hdev->irq, cpumask_of_cpu(hdev->cpu));
305 			enable_irq(hdev->irq);
306 		}
307 		break;
308 	}
309 }
310 
311 static int hpet_next_event(unsigned long delta,
312 			   struct clock_event_device *evt, int timer)
313 {
314 	u32 cnt;
315 
316 	cnt = hpet_readl(HPET_COUNTER);
317 	cnt += (u32) delta;
318 	hpet_writel(cnt, HPET_Tn_CMP(timer));
319 
320 	/*
321 	 * We need to read back the CMP register to make sure that
322 	 * what we wrote hit the chip before we compare it to the
323 	 * counter.
324 	 */
325 	WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt);
326 
327 	return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0;
328 }
329 
330 static void hpet_legacy_set_mode(enum clock_event_mode mode,
331 			struct clock_event_device *evt)
332 {
333 	hpet_set_mode(mode, evt, 0);
334 }
335 
336 static int hpet_legacy_next_event(unsigned long delta,
337 			struct clock_event_device *evt)
338 {
339 	return hpet_next_event(delta, evt, 0);
340 }
341 
342 /*
343  * HPET MSI Support
344  */
345 #ifdef CONFIG_PCI_MSI
346 
347 static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
348 static struct hpet_dev	*hpet_devs;
349 
350 void hpet_msi_unmask(unsigned int irq)
351 {
352 	struct hpet_dev *hdev = get_irq_data(irq);
353 	unsigned long cfg;
354 
355 	/* unmask it */
356 	cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
357 	cfg |= HPET_TN_FSB;
358 	hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
359 }
360 
361 void hpet_msi_mask(unsigned int irq)
362 {
363 	unsigned long cfg;
364 	struct hpet_dev *hdev = get_irq_data(irq);
365 
366 	/* mask it */
367 	cfg = hpet_readl(HPET_Tn_CFG(hdev->num));
368 	cfg &= ~HPET_TN_FSB;
369 	hpet_writel(cfg, HPET_Tn_CFG(hdev->num));
370 }
371 
372 void hpet_msi_write(unsigned int irq, struct msi_msg *msg)
373 {
374 	struct hpet_dev *hdev = get_irq_data(irq);
375 
376 	hpet_writel(msg->data, HPET_Tn_ROUTE(hdev->num));
377 	hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hdev->num) + 4);
378 }
379 
380 void hpet_msi_read(unsigned int irq, struct msi_msg *msg)
381 {
382 	struct hpet_dev *hdev = get_irq_data(irq);
383 
384 	msg->data = hpet_readl(HPET_Tn_ROUTE(hdev->num));
385 	msg->address_lo = hpet_readl(HPET_Tn_ROUTE(hdev->num) + 4);
386 	msg->address_hi = 0;
387 }
388 
389 static void hpet_msi_set_mode(enum clock_event_mode mode,
390 				struct clock_event_device *evt)
391 {
392 	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
393 	hpet_set_mode(mode, evt, hdev->num);
394 }
395 
396 static int hpet_msi_next_event(unsigned long delta,
397 				struct clock_event_device *evt)
398 {
399 	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
400 	return hpet_next_event(delta, evt, hdev->num);
401 }
402 
403 static int hpet_setup_msi_irq(unsigned int irq)
404 {
405 	if (arch_setup_hpet_msi(irq)) {
406 		destroy_irq(irq);
407 		return -EINVAL;
408 	}
409 	return 0;
410 }
411 
412 static int hpet_assign_irq(struct hpet_dev *dev)
413 {
414 	unsigned int irq;
415 
416 	irq = create_irq();
417 	if (!irq)
418 		return -EINVAL;
419 
420 	set_irq_data(irq, dev);
421 
422 	if (hpet_setup_msi_irq(irq))
423 		return -EINVAL;
424 
425 	dev->irq = irq;
426 	return 0;
427 }
428 
429 static irqreturn_t hpet_interrupt_handler(int irq, void *data)
430 {
431 	struct hpet_dev *dev = (struct hpet_dev *)data;
432 	struct clock_event_device *hevt = &dev->evt;
433 
434 	if (!hevt->event_handler) {
435 		printk(KERN_INFO "Spurious HPET timer interrupt on HPET timer %d\n",
436 				dev->num);
437 		return IRQ_HANDLED;
438 	}
439 
440 	hevt->event_handler(hevt);
441 	return IRQ_HANDLED;
442 }
443 
444 static int hpet_setup_irq(struct hpet_dev *dev)
445 {
446 
447 	if (request_irq(dev->irq, hpet_interrupt_handler,
448 			IRQF_DISABLED|IRQF_NOBALANCING, dev->name, dev))
449 		return -1;
450 
451 	disable_irq(dev->irq);
452 	irq_set_affinity(dev->irq, cpumask_of_cpu(dev->cpu));
453 	enable_irq(dev->irq);
454 
455 	printk(KERN_DEBUG "hpet: %s irq %d for MSI\n",
456 			 dev->name, dev->irq);
457 
458 	return 0;
459 }
460 
461 /* This should be called in specific @cpu */
462 static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
463 {
464 	struct clock_event_device *evt = &hdev->evt;
465 	uint64_t hpet_freq;
466 
467 	WARN_ON(cpu != smp_processor_id());
468 	if (!(hdev->flags & HPET_DEV_VALID))
469 		return;
470 
471 	if (hpet_setup_msi_irq(hdev->irq))
472 		return;
473 
474 	hdev->cpu = cpu;
475 	per_cpu(cpu_hpet_dev, cpu) = hdev;
476 	evt->name = hdev->name;
477 	hpet_setup_irq(hdev);
478 	evt->irq = hdev->irq;
479 
480 	evt->rating = 110;
481 	evt->features = CLOCK_EVT_FEAT_ONESHOT;
482 	if (hdev->flags & HPET_DEV_PERI_CAP)
483 		evt->features |= CLOCK_EVT_FEAT_PERIODIC;
484 
485 	evt->set_mode = hpet_msi_set_mode;
486 	evt->set_next_event = hpet_msi_next_event;
487 	evt->shift = 32;
488 
489 	/*
490 	 * The period is a femto seconds value. We need to calculate the
491 	 * scaled math multiplication factor for nanosecond to hpet tick
492 	 * conversion.
493 	 */
494 	hpet_freq = 1000000000000000ULL;
495 	do_div(hpet_freq, hpet_period);
496 	evt->mult = div_sc((unsigned long) hpet_freq,
497 				      NSEC_PER_SEC, evt->shift);
498 	/* Calculate the max delta */
499 	evt->max_delta_ns = clockevent_delta2ns(0x7FFFFFFF, evt);
500 	/* 5 usec minimum reprogramming delta. */
501 	evt->min_delta_ns = 5000;
502 
503 	evt->cpumask = cpumask_of_cpu(hdev->cpu);
504 	clockevents_register_device(evt);
505 }
506 
507 #ifdef CONFIG_HPET
508 /* Reserve at least one timer for userspace (/dev/hpet) */
509 #define RESERVE_TIMERS 1
510 #else
511 #define RESERVE_TIMERS 0
512 #endif
513 
514 static void hpet_msi_capability_lookup(unsigned int start_timer)
515 {
516 	unsigned int id;
517 	unsigned int num_timers;
518 	unsigned int num_timers_used = 0;
519 	int i;
520 
521 	id = hpet_readl(HPET_ID);
522 
523 	num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
524 	num_timers++; /* Value read out starts from 0 */
525 
526 	hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
527 	if (!hpet_devs)
528 		return;
529 
530 	hpet_num_timers = num_timers;
531 
532 	for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) {
533 		struct hpet_dev *hdev = &hpet_devs[num_timers_used];
534 		unsigned long cfg = hpet_readl(HPET_Tn_CFG(i));
535 
536 		/* Only consider HPET timer with MSI support */
537 		if (!(cfg & HPET_TN_FSB_CAP))
538 			continue;
539 
540 		hdev->flags = 0;
541 		if (cfg & HPET_TN_PERIODIC_CAP)
542 			hdev->flags |= HPET_DEV_PERI_CAP;
543 		hdev->num = i;
544 
545 		sprintf(hdev->name, "hpet%d", i);
546 		if (hpet_assign_irq(hdev))
547 			continue;
548 
549 		hdev->flags |= HPET_DEV_FSB_CAP;
550 		hdev->flags |= HPET_DEV_VALID;
551 		num_timers_used++;
552 		if (num_timers_used == num_possible_cpus())
553 			break;
554 	}
555 
556 	printk(KERN_INFO "HPET: %d timers in total, %d timers will be used for per-cpu timer\n",
557 		num_timers, num_timers_used);
558 }
559 
560 #ifdef CONFIG_HPET
561 static void hpet_reserve_msi_timers(struct hpet_data *hd)
562 {
563 	int i;
564 
565 	if (!hpet_devs)
566 		return;
567 
568 	for (i = 0; i < hpet_num_timers; i++) {
569 		struct hpet_dev *hdev = &hpet_devs[i];
570 
571 		if (!(hdev->flags & HPET_DEV_VALID))
572 			continue;
573 
574 		hd->hd_irq[hdev->num] = hdev->irq;
575 		hpet_reserve_timer(hd, hdev->num);
576 	}
577 }
578 #endif
579 
580 static struct hpet_dev *hpet_get_unused_timer(void)
581 {
582 	int i;
583 
584 	if (!hpet_devs)
585 		return NULL;
586 
587 	for (i = 0; i < hpet_num_timers; i++) {
588 		struct hpet_dev *hdev = &hpet_devs[i];
589 
590 		if (!(hdev->flags & HPET_DEV_VALID))
591 			continue;
592 		if (test_and_set_bit(HPET_DEV_USED_BIT,
593 			(unsigned long *)&hdev->flags))
594 			continue;
595 		return hdev;
596 	}
597 	return NULL;
598 }
599 
600 struct hpet_work_struct {
601 	struct delayed_work work;
602 	struct completion complete;
603 };
604 
605 static void hpet_work(struct work_struct *w)
606 {
607 	struct hpet_dev *hdev;
608 	int cpu = smp_processor_id();
609 	struct hpet_work_struct *hpet_work;
610 
611 	hpet_work = container_of(w, struct hpet_work_struct, work.work);
612 
613 	hdev = hpet_get_unused_timer();
614 	if (hdev)
615 		init_one_hpet_msi_clockevent(hdev, cpu);
616 
617 	complete(&hpet_work->complete);
618 }
619 
620 static int hpet_cpuhp_notify(struct notifier_block *n,
621 		unsigned long action, void *hcpu)
622 {
623 	unsigned long cpu = (unsigned long)hcpu;
624 	struct hpet_work_struct work;
625 	struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
626 
627 	switch (action & 0xf) {
628 	case CPU_ONLINE:
629 		INIT_DELAYED_WORK(&work.work, hpet_work);
630 		init_completion(&work.complete);
631 		/* FIXME: add schedule_work_on() */
632 		schedule_delayed_work_on(cpu, &work.work, 0);
633 		wait_for_completion(&work.complete);
634 		break;
635 	case CPU_DEAD:
636 		if (hdev) {
637 			free_irq(hdev->irq, hdev);
638 			hdev->flags &= ~HPET_DEV_USED;
639 			per_cpu(cpu_hpet_dev, cpu) = NULL;
640 		}
641 		break;
642 	}
643 	return NOTIFY_OK;
644 }
645 #else
646 
647 static int hpet_setup_msi_irq(unsigned int irq)
648 {
649 	return 0;
650 }
651 static void hpet_msi_capability_lookup(unsigned int start_timer)
652 {
653 	return;
654 }
655 
656 #ifdef CONFIG_HPET
657 static void hpet_reserve_msi_timers(struct hpet_data *hd)
658 {
659 	return;
660 }
661 #endif
662 
663 static int hpet_cpuhp_notify(struct notifier_block *n,
664 		unsigned long action, void *hcpu)
665 {
666 	return NOTIFY_OK;
667 }
668 
669 #endif
670 
671 /*
672  * Clock source related code
673  */
674 static cycle_t read_hpet(void)
675 {
676 	return (cycle_t)hpet_readl(HPET_COUNTER);
677 }
678 
679 #ifdef CONFIG_X86_64
680 static cycle_t __vsyscall_fn vread_hpet(void)
681 {
682 	return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
683 }
684 #endif
685 
686 static struct clocksource clocksource_hpet = {
687 	.name		= "hpet",
688 	.rating		= 250,
689 	.read		= read_hpet,
690 	.mask		= HPET_MASK,
691 	.shift		= HPET_SHIFT,
692 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
693 	.resume		= hpet_restart_counter,
694 #ifdef CONFIG_X86_64
695 	.vread		= vread_hpet,
696 #endif
697 };
698 
699 static int hpet_clocksource_register(void)
700 {
701 	u64 start, now;
702 	cycle_t t1;
703 
704 	/* Start the counter */
705 	hpet_start_counter();
706 
707 	/* Verify whether hpet counter works */
708 	t1 = read_hpet();
709 	rdtscll(start);
710 
711 	/*
712 	 * We don't know the TSC frequency yet, but waiting for
713 	 * 200000 TSC cycles is safe:
714 	 * 4 GHz == 50us
715 	 * 1 GHz == 200us
716 	 */
717 	do {
718 		rep_nop();
719 		rdtscll(now);
720 	} while ((now - start) < 200000UL);
721 
722 	if (t1 == read_hpet()) {
723 		printk(KERN_WARNING
724 		       "HPET counter not counting. HPET disabled\n");
725 		return -ENODEV;
726 	}
727 
728 	/*
729 	 * The definition of mult is (include/linux/clocksource.h)
730 	 * mult/2^shift = ns/cyc and hpet_period is in units of fsec/cyc
731 	 * so we first need to convert hpet_period to ns/cyc units:
732 	 *  mult/2^shift = ns/cyc = hpet_period/10^6
733 	 *  mult = (hpet_period * 2^shift)/10^6
734 	 *  mult = (hpet_period << shift)/FSEC_PER_NSEC
735 	 */
736 	clocksource_hpet.mult = div_sc(hpet_period, FSEC_PER_NSEC, HPET_SHIFT);
737 
738 	clocksource_register(&clocksource_hpet);
739 
740 	return 0;
741 }
742 
743 /**
744  * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
745  */
746 int __init hpet_enable(void)
747 {
748 	unsigned long id;
749 	int i;
750 
751 	if (!is_hpet_capable())
752 		return 0;
753 
754 	hpet_set_mapping();
755 
756 	/*
757 	 * Read the period and check for a sane value:
758 	 */
759 	hpet_period = hpet_readl(HPET_PERIOD);
760 
761 	/*
762 	 * AMD SB700 based systems with spread spectrum enabled use a
763 	 * SMM based HPET emulation to provide proper frequency
764 	 * setting. The SMM code is initialized with the first HPET
765 	 * register access and takes some time to complete. During
766 	 * this time the config register reads 0xffffffff. We check
767 	 * for max. 1000 loops whether the config register reads a non
768 	 * 0xffffffff value to make sure that HPET is up and running
769 	 * before we go further. A counting loop is safe, as the HPET
770 	 * access takes thousands of CPU cycles. On non SB700 based
771 	 * machines this check is only done once and has no side
772 	 * effects.
773 	 */
774 	for (i = 0; hpet_readl(HPET_CFG) == 0xFFFFFFFF; i++) {
775 		if (i == 1000) {
776 			printk(KERN_WARNING
777 			       "HPET config register value = 0xFFFFFFFF. "
778 			       "Disabling HPET\n");
779 			goto out_nohpet;
780 		}
781 	}
782 
783 	if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
784 		goto out_nohpet;
785 
786 	/*
787 	 * Read the HPET ID register to retrieve the IRQ routing
788 	 * information and the number of channels
789 	 */
790 	id = hpet_readl(HPET_ID);
791 
792 #ifdef CONFIG_HPET_EMULATE_RTC
793 	/*
794 	 * The legacy routing mode needs at least two channels, tick timer
795 	 * and the rtc emulation channel.
796 	 */
797 	if (!(id & HPET_ID_NUMBER))
798 		goto out_nohpet;
799 #endif
800 
801 	if (hpet_clocksource_register())
802 		goto out_nohpet;
803 
804 	if (id & HPET_ID_LEGSUP) {
805 		hpet_legacy_clockevent_register();
806 		hpet_msi_capability_lookup(2);
807 		return 1;
808 	}
809 	hpet_msi_capability_lookup(0);
810 	return 0;
811 
812 out_nohpet:
813 	hpet_clear_mapping();
814 	boot_hpet_disable = 1;
815 	return 0;
816 }
817 
818 /*
819  * Needs to be late, as the reserve_timer code calls kalloc !
820  *
821  * Not a problem on i386 as hpet_enable is called from late_time_init,
822  * but on x86_64 it is necessary !
823  */
824 static __init int hpet_late_init(void)
825 {
826 	int cpu;
827 
828 	if (boot_hpet_disable)
829 		return -ENODEV;
830 
831 	if (!hpet_address) {
832 		if (!force_hpet_address)
833 			return -ENODEV;
834 
835 		hpet_address = force_hpet_address;
836 		hpet_enable();
837 		if (!hpet_virt_address)
838 			return -ENODEV;
839 	}
840 
841 	hpet_reserve_platform_timers(hpet_readl(HPET_ID));
842 
843 	for_each_online_cpu(cpu) {
844 		hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
845 	}
846 
847 	/* This notifier should be called after workqueue is ready */
848 	hotcpu_notifier(hpet_cpuhp_notify, -20);
849 
850 	return 0;
851 }
852 fs_initcall(hpet_late_init);
853 
854 void hpet_disable(void)
855 {
856 	if (is_hpet_capable()) {
857 		unsigned long cfg = hpet_readl(HPET_CFG);
858 
859 		if (hpet_legacy_int_enabled) {
860 			cfg &= ~HPET_CFG_LEGACY;
861 			hpet_legacy_int_enabled = 0;
862 		}
863 		cfg &= ~HPET_CFG_ENABLE;
864 		hpet_writel(cfg, HPET_CFG);
865 	}
866 }
867 
868 #ifdef CONFIG_HPET_EMULATE_RTC
869 
870 /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
871  * is enabled, we support RTC interrupt functionality in software.
872  * RTC has 3 kinds of interrupts:
873  * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
874  *    is updated
875  * 2) Alarm Interrupt - generate an interrupt at a specific time of day
876  * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
877  *    2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
878  * (1) and (2) above are implemented using polling at a frequency of
879  * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
880  * overhead. (DEFAULT_RTC_INT_FREQ)
881  * For (3), we use interrupts at 64Hz or user specified periodic
882  * frequency, whichever is higher.
883  */
884 #include <linux/mc146818rtc.h>
885 #include <linux/rtc.h>
886 #include <asm/rtc.h>
887 
888 #define DEFAULT_RTC_INT_FREQ	64
889 #define DEFAULT_RTC_SHIFT	6
890 #define RTC_NUM_INTS		1
891 
892 static unsigned long hpet_rtc_flags;
893 static int hpet_prev_update_sec;
894 static struct rtc_time hpet_alarm_time;
895 static unsigned long hpet_pie_count;
896 static unsigned long hpet_t1_cmp;
897 static unsigned long hpet_default_delta;
898 static unsigned long hpet_pie_delta;
899 static unsigned long hpet_pie_limit;
900 
901 static rtc_irq_handler irq_handler;
902 
903 /*
904  * Registers a IRQ handler.
905  */
906 int hpet_register_irq_handler(rtc_irq_handler handler)
907 {
908 	if (!is_hpet_enabled())
909 		return -ENODEV;
910 	if (irq_handler)
911 		return -EBUSY;
912 
913 	irq_handler = handler;
914 
915 	return 0;
916 }
917 EXPORT_SYMBOL_GPL(hpet_register_irq_handler);
918 
919 /*
920  * Deregisters the IRQ handler registered with hpet_register_irq_handler()
921  * and does cleanup.
922  */
923 void hpet_unregister_irq_handler(rtc_irq_handler handler)
924 {
925 	if (!is_hpet_enabled())
926 		return;
927 
928 	irq_handler = NULL;
929 	hpet_rtc_flags = 0;
930 }
931 EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler);
932 
933 /*
934  * Timer 1 for RTC emulation. We use one shot mode, as periodic mode
935  * is not supported by all HPET implementations for timer 1.
936  *
937  * hpet_rtc_timer_init() is called when the rtc is initialized.
938  */
939 int hpet_rtc_timer_init(void)
940 {
941 	unsigned long cfg, cnt, delta, flags;
942 
943 	if (!is_hpet_enabled())
944 		return 0;
945 
946 	if (!hpet_default_delta) {
947 		uint64_t clc;
948 
949 		clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
950 		clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT;
951 		hpet_default_delta = (unsigned long) clc;
952 	}
953 
954 	if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
955 		delta = hpet_default_delta;
956 	else
957 		delta = hpet_pie_delta;
958 
959 	local_irq_save(flags);
960 
961 	cnt = delta + hpet_readl(HPET_COUNTER);
962 	hpet_writel(cnt, HPET_T1_CMP);
963 	hpet_t1_cmp = cnt;
964 
965 	cfg = hpet_readl(HPET_T1_CFG);
966 	cfg &= ~HPET_TN_PERIODIC;
967 	cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
968 	hpet_writel(cfg, HPET_T1_CFG);
969 
970 	local_irq_restore(flags);
971 
972 	return 1;
973 }
974 EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
975 
976 /*
977  * The functions below are called from rtc driver.
978  * Return 0 if HPET is not being used.
979  * Otherwise do the necessary changes and return 1.
980  */
981 int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
982 {
983 	if (!is_hpet_enabled())
984 		return 0;
985 
986 	hpet_rtc_flags &= ~bit_mask;
987 	return 1;
988 }
989 EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
990 
991 int hpet_set_rtc_irq_bit(unsigned long bit_mask)
992 {
993 	unsigned long oldbits = hpet_rtc_flags;
994 
995 	if (!is_hpet_enabled())
996 		return 0;
997 
998 	hpet_rtc_flags |= bit_mask;
999 
1000 	if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE))
1001 		hpet_prev_update_sec = -1;
1002 
1003 	if (!oldbits)
1004 		hpet_rtc_timer_init();
1005 
1006 	return 1;
1007 }
1008 EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit);
1009 
1010 int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
1011 			unsigned char sec)
1012 {
1013 	if (!is_hpet_enabled())
1014 		return 0;
1015 
1016 	hpet_alarm_time.tm_hour = hrs;
1017 	hpet_alarm_time.tm_min = min;
1018 	hpet_alarm_time.tm_sec = sec;
1019 
1020 	return 1;
1021 }
1022 EXPORT_SYMBOL_GPL(hpet_set_alarm_time);
1023 
1024 int hpet_set_periodic_freq(unsigned long freq)
1025 {
1026 	uint64_t clc;
1027 
1028 	if (!is_hpet_enabled())
1029 		return 0;
1030 
1031 	if (freq <= DEFAULT_RTC_INT_FREQ)
1032 		hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
1033 	else {
1034 		clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC;
1035 		do_div(clc, freq);
1036 		clc >>= hpet_clockevent.shift;
1037 		hpet_pie_delta = (unsigned long) clc;
1038 	}
1039 	return 1;
1040 }
1041 EXPORT_SYMBOL_GPL(hpet_set_periodic_freq);
1042 
1043 int hpet_rtc_dropped_irq(void)
1044 {
1045 	return is_hpet_enabled();
1046 }
1047 EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
1048 
1049 static void hpet_rtc_timer_reinit(void)
1050 {
1051 	unsigned long cfg, delta;
1052 	int lost_ints = -1;
1053 
1054 	if (unlikely(!hpet_rtc_flags)) {
1055 		cfg = hpet_readl(HPET_T1_CFG);
1056 		cfg &= ~HPET_TN_ENABLE;
1057 		hpet_writel(cfg, HPET_T1_CFG);
1058 		return;
1059 	}
1060 
1061 	if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
1062 		delta = hpet_default_delta;
1063 	else
1064 		delta = hpet_pie_delta;
1065 
1066 	/*
1067 	 * Increment the comparator value until we are ahead of the
1068 	 * current count.
1069 	 */
1070 	do {
1071 		hpet_t1_cmp += delta;
1072 		hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
1073 		lost_ints++;
1074 	} while ((long)(hpet_readl(HPET_COUNTER) - hpet_t1_cmp) > 0);
1075 
1076 	if (lost_ints) {
1077 		if (hpet_rtc_flags & RTC_PIE)
1078 			hpet_pie_count += lost_ints;
1079 		if (printk_ratelimit())
1080 			printk(KERN_WARNING "hpet1: lost %d rtc interrupts\n",
1081 				lost_ints);
1082 	}
1083 }
1084 
1085 irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
1086 {
1087 	struct rtc_time curr_time;
1088 	unsigned long rtc_int_flag = 0;
1089 
1090 	hpet_rtc_timer_reinit();
1091 	memset(&curr_time, 0, sizeof(struct rtc_time));
1092 
1093 	if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
1094 		get_rtc_time(&curr_time);
1095 
1096 	if (hpet_rtc_flags & RTC_UIE &&
1097 	    curr_time.tm_sec != hpet_prev_update_sec) {
1098 		if (hpet_prev_update_sec >= 0)
1099 			rtc_int_flag = RTC_UF;
1100 		hpet_prev_update_sec = curr_time.tm_sec;
1101 	}
1102 
1103 	if (hpet_rtc_flags & RTC_PIE &&
1104 	    ++hpet_pie_count >= hpet_pie_limit) {
1105 		rtc_int_flag |= RTC_PF;
1106 		hpet_pie_count = 0;
1107 	}
1108 
1109 	if (hpet_rtc_flags & RTC_AIE &&
1110 	    (curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
1111 	    (curr_time.tm_min == hpet_alarm_time.tm_min) &&
1112 	    (curr_time.tm_hour == hpet_alarm_time.tm_hour))
1113 			rtc_int_flag |= RTC_AF;
1114 
1115 	if (rtc_int_flag) {
1116 		rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1117 		if (irq_handler)
1118 			irq_handler(rtc_int_flag, dev_id);
1119 	}
1120 	return IRQ_HANDLED;
1121 }
1122 EXPORT_SYMBOL_GPL(hpet_rtc_interrupt);
1123 #endif
1124