xref: /linux/arch/mips/kernel/i8253.c (revision 1ea6428cbdbb0fd1a6e7e8c3044253eab9aff4c7)
1d865bea4SRalf Baechle /*
2d865bea4SRalf Baechle  * i8253.c  8253/PIT functions
3d865bea4SRalf Baechle  *
4d865bea4SRalf Baechle  */
5d865bea4SRalf Baechle #include <linux/clockchips.h>
6d865bea4SRalf Baechle #include <linux/init.h>
7d865bea4SRalf Baechle #include <linux/interrupt.h>
8d865bea4SRalf Baechle #include <linux/jiffies.h>
9d865bea4SRalf Baechle #include <linux/module.h>
10d865bea4SRalf Baechle #include <linux/spinlock.h>
11d865bea4SRalf Baechle 
12d865bea4SRalf Baechle #include <asm/delay.h>
13d865bea4SRalf Baechle #include <asm/i8253.h>
14d865bea4SRalf Baechle #include <asm/io.h>
15dd3db6ebSRalf Baechle #include <asm/time.h>
16d865bea4SRalf Baechle 
1774521c28SRalf Baechle DEFINE_SPINLOCK(i8253_lock);
18a05e623fSRalf Baechle EXPORT_SYMBOL(i8253_lock);
19d865bea4SRalf Baechle 
20d865bea4SRalf Baechle /*
21d865bea4SRalf Baechle  * Initialize the PIT timer.
22d865bea4SRalf Baechle  *
23d865bea4SRalf Baechle  * This is also called after resume to bring the PIT into operation again.
24d865bea4SRalf Baechle  */
25d865bea4SRalf Baechle static void init_pit_timer(enum clock_event_mode mode,
26d865bea4SRalf Baechle 			   struct clock_event_device *evt)
27d865bea4SRalf Baechle {
285f627f8eSRalf Baechle 	spin_lock(&i8253_lock);
29d865bea4SRalf Baechle 
30d865bea4SRalf Baechle 	switch(mode) {
31d865bea4SRalf Baechle 	case CLOCK_EVT_MODE_PERIODIC:
32d865bea4SRalf Baechle 		/* binary, mode 2, LSB/MSB, ch 0 */
33d865bea4SRalf Baechle 		outb_p(0x34, PIT_MODE);
34d865bea4SRalf Baechle 		outb_p(LATCH & 0xff , PIT_CH0);	/* LSB */
35d865bea4SRalf Baechle 		outb(LATCH >> 8 , PIT_CH0);	/* MSB */
36d865bea4SRalf Baechle 		break;
37d865bea4SRalf Baechle 
38d865bea4SRalf Baechle 	case CLOCK_EVT_MODE_SHUTDOWN:
39d865bea4SRalf Baechle 	case CLOCK_EVT_MODE_UNUSED:
40d865bea4SRalf Baechle 		if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
41d865bea4SRalf Baechle 		    evt->mode == CLOCK_EVT_MODE_ONESHOT) {
42d865bea4SRalf Baechle 			outb_p(0x30, PIT_MODE);
43d865bea4SRalf Baechle 			outb_p(0, PIT_CH0);
44d865bea4SRalf Baechle 			outb_p(0, PIT_CH0);
45d865bea4SRalf Baechle 		}
46d865bea4SRalf Baechle 		break;
47d865bea4SRalf Baechle 
48d865bea4SRalf Baechle 	case CLOCK_EVT_MODE_ONESHOT:
49d865bea4SRalf Baechle 		/* One shot setup */
50d865bea4SRalf Baechle 		outb_p(0x38, PIT_MODE);
51d865bea4SRalf Baechle 		break;
52d865bea4SRalf Baechle 
53d865bea4SRalf Baechle 	case CLOCK_EVT_MODE_RESUME:
54d865bea4SRalf Baechle 		/* Nothing to do here */
55d865bea4SRalf Baechle 		break;
56d865bea4SRalf Baechle 	}
575f627f8eSRalf Baechle 	spin_unlock(&i8253_lock);
58d865bea4SRalf Baechle }
59d865bea4SRalf Baechle 
60d865bea4SRalf Baechle /*
61d865bea4SRalf Baechle  * Program the next event in oneshot mode
62d865bea4SRalf Baechle  *
63d865bea4SRalf Baechle  * Delta is given in PIT ticks
64d865bea4SRalf Baechle  */
65d865bea4SRalf Baechle static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
66d865bea4SRalf Baechle {
675f627f8eSRalf Baechle 	spin_lock(&i8253_lock);
68d865bea4SRalf Baechle 	outb_p(delta & 0xff , PIT_CH0);	/* LSB */
69d865bea4SRalf Baechle 	outb(delta >> 8 , PIT_CH0);	/* MSB */
705f627f8eSRalf Baechle 	spin_unlock(&i8253_lock);
71d865bea4SRalf Baechle 
72d865bea4SRalf Baechle 	return 0;
73d865bea4SRalf Baechle }
74d865bea4SRalf Baechle 
75d865bea4SRalf Baechle /*
76d865bea4SRalf Baechle  * On UP the PIT can serve all of the possible timer functions. On SMP systems
77d865bea4SRalf Baechle  * it can be solely used for the global tick.
78d865bea4SRalf Baechle  *
79d865bea4SRalf Baechle  * The profiling and update capabilites are switched off once the local apic is
80d865bea4SRalf Baechle  * registered. This mechanism replaces the previous #ifdef LOCAL_APIC -
81d865bea4SRalf Baechle  * !using_apic_timer decisions in do_timer_interrupt_hook()
82d865bea4SRalf Baechle  */
83*1ea6428cSDmitri Vorobiev static struct clock_event_device pit_clockevent = {
84d865bea4SRalf Baechle 	.name		= "pit",
85d865bea4SRalf Baechle 	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
86d865bea4SRalf Baechle 	.set_mode	= init_pit_timer,
87d865bea4SRalf Baechle 	.set_next_event = pit_next_event,
88d865bea4SRalf Baechle 	.irq		= 0,
89d865bea4SRalf Baechle };
90d865bea4SRalf Baechle 
91dd3db6ebSRalf Baechle static irqreturn_t timer_interrupt(int irq, void *dev_id)
92d865bea4SRalf Baechle {
93d865bea4SRalf Baechle 	pit_clockevent.event_handler(&pit_clockevent);
94d865bea4SRalf Baechle 
95d865bea4SRalf Baechle 	return IRQ_HANDLED;
96d865bea4SRalf Baechle }
97d865bea4SRalf Baechle 
98d865bea4SRalf Baechle static struct irqaction irq0  = {
99d865bea4SRalf Baechle 	.handler = timer_interrupt,
100d865bea4SRalf Baechle 	.flags = IRQF_DISABLED | IRQF_NOBALANCING,
101d865bea4SRalf Baechle 	.mask = CPU_MASK_NONE,
102d865bea4SRalf Baechle 	.name = "timer"
103d865bea4SRalf Baechle };
104d865bea4SRalf Baechle 
105d865bea4SRalf Baechle /*
106d865bea4SRalf Baechle  * Initialize the conversion factor and the min/max deltas of the clock event
107d865bea4SRalf Baechle  * structure and register the clock event source with the framework.
108d865bea4SRalf Baechle  */
109d865bea4SRalf Baechle void __init setup_pit_timer(void)
110d865bea4SRalf Baechle {
111dd3db6ebSRalf Baechle 	struct clock_event_device *cd = &pit_clockevent;
112dd3db6ebSRalf Baechle 	unsigned int cpu = smp_processor_id();
113dd3db6ebSRalf Baechle 
114d865bea4SRalf Baechle 	/*
115d865bea4SRalf Baechle 	 * Start pit with the boot cpu mask and make it global after the
116d865bea4SRalf Baechle 	 * IO_APIC has been initialized.
117d865bea4SRalf Baechle 	 */
118dd3db6ebSRalf Baechle 	cd->cpumask = cpumask_of_cpu(cpu);
119dd3db6ebSRalf Baechle 	clockevent_set_clock(cd, CLOCK_TICK_RATE);
120dd3db6ebSRalf Baechle 	cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd);
121dd3db6ebSRalf Baechle 	cd->min_delta_ns = clockevent_delta2ns(0xF, cd);
122dd3db6ebSRalf Baechle 	clockevents_register_device(cd);
123d865bea4SRalf Baechle 
124dd3db6ebSRalf Baechle 	irq0.mask = cpumask_of_cpu(cpu);
125d865bea4SRalf Baechle 	setup_irq(0, &irq0);
126d865bea4SRalf Baechle }
127d865bea4SRalf Baechle 
128d865bea4SRalf Baechle /*
129d865bea4SRalf Baechle  * Since the PIT overflows every tick, its not very useful
130d865bea4SRalf Baechle  * to just read by itself. So use jiffies to emulate a free
131d865bea4SRalf Baechle  * running counter:
132d865bea4SRalf Baechle  */
133d865bea4SRalf Baechle static cycle_t pit_read(void)
134d865bea4SRalf Baechle {
135d865bea4SRalf Baechle 	unsigned long flags;
136d865bea4SRalf Baechle 	int count;
137d865bea4SRalf Baechle 	u32 jifs;
138d865bea4SRalf Baechle 	static int old_count;
139d865bea4SRalf Baechle 	static u32 old_jifs;
140d865bea4SRalf Baechle 
141d865bea4SRalf Baechle 	spin_lock_irqsave(&i8253_lock, flags);
142d865bea4SRalf Baechle 	/*
143d865bea4SRalf Baechle 	 * Although our caller may have the read side of xtime_lock,
144d865bea4SRalf Baechle 	 * this is now a seqlock, and we are cheating in this routine
145d865bea4SRalf Baechle 	 * by having side effects on state that we cannot undo if
146d865bea4SRalf Baechle 	 * there is a collision on the seqlock and our caller has to
147d865bea4SRalf Baechle 	 * retry.  (Namely, old_jifs and old_count.)  So we must treat
148d865bea4SRalf Baechle 	 * jiffies as volatile despite the lock.  We read jiffies
149d865bea4SRalf Baechle 	 * before latching the timer count to guarantee that although
150d865bea4SRalf Baechle 	 * the jiffies value might be older than the count (that is,
151d865bea4SRalf Baechle 	 * the counter may underflow between the last point where
152d865bea4SRalf Baechle 	 * jiffies was incremented and the point where we latch the
153d865bea4SRalf Baechle 	 * count), it cannot be newer.
154d865bea4SRalf Baechle 	 */
155d865bea4SRalf Baechle 	jifs = jiffies;
156d865bea4SRalf Baechle 	outb_p(0x00, PIT_MODE);	/* latch the count ASAP */
157d865bea4SRalf Baechle 	count = inb_p(PIT_CH0);	/* read the latched count */
158d865bea4SRalf Baechle 	count |= inb_p(PIT_CH0) << 8;
159d865bea4SRalf Baechle 
160d865bea4SRalf Baechle 	/* VIA686a test code... reset the latch if count > max + 1 */
161d865bea4SRalf Baechle 	if (count > LATCH) {
162d865bea4SRalf Baechle 		outb_p(0x34, PIT_MODE);
163d865bea4SRalf Baechle 		outb_p(LATCH & 0xff, PIT_CH0);
164d865bea4SRalf Baechle 		outb(LATCH >> 8, PIT_CH0);
165d865bea4SRalf Baechle 		count = LATCH - 1;
166d865bea4SRalf Baechle 	}
167d865bea4SRalf Baechle 
168d865bea4SRalf Baechle 	/*
169d865bea4SRalf Baechle 	 * It's possible for count to appear to go the wrong way for a
170d865bea4SRalf Baechle 	 * couple of reasons:
171d865bea4SRalf Baechle 	 *
172d865bea4SRalf Baechle 	 *  1. The timer counter underflows, but we haven't handled the
173d865bea4SRalf Baechle 	 *     resulting interrupt and incremented jiffies yet.
174d865bea4SRalf Baechle 	 *  2. Hardware problem with the timer, not giving us continuous time,
175d865bea4SRalf Baechle 	 *     the counter does small "jumps" upwards on some Pentium systems,
176d865bea4SRalf Baechle 	 *     (see c't 95/10 page 335 for Neptun bug.)
177d865bea4SRalf Baechle 	 *
178d865bea4SRalf Baechle 	 * Previous attempts to handle these cases intelligently were
179d865bea4SRalf Baechle 	 * buggy, so we just do the simple thing now.
180d865bea4SRalf Baechle 	 */
181d865bea4SRalf Baechle 	if (count > old_count && jifs == old_jifs) {
182d865bea4SRalf Baechle 		count = old_count;
183d865bea4SRalf Baechle 	}
184d865bea4SRalf Baechle 	old_count = count;
185d865bea4SRalf Baechle 	old_jifs = jifs;
186d865bea4SRalf Baechle 
187d865bea4SRalf Baechle 	spin_unlock_irqrestore(&i8253_lock, flags);
188d865bea4SRalf Baechle 
189d865bea4SRalf Baechle 	count = (LATCH - 1) - count;
190d865bea4SRalf Baechle 
191d865bea4SRalf Baechle 	return (cycle_t)(jifs * LATCH) + count;
192d865bea4SRalf Baechle }
193d865bea4SRalf Baechle 
194d865bea4SRalf Baechle static struct clocksource clocksource_pit = {
195d865bea4SRalf Baechle 	.name	= "pit",
196d865bea4SRalf Baechle 	.rating = 110,
197d865bea4SRalf Baechle 	.read	= pit_read,
198d865bea4SRalf Baechle 	.mask	= CLOCKSOURCE_MASK(32),
199d865bea4SRalf Baechle 	.mult	= 0,
200d865bea4SRalf Baechle 	.shift	= 20,
201d865bea4SRalf Baechle };
202d865bea4SRalf Baechle 
203d865bea4SRalf Baechle static int __init init_pit_clocksource(void)
204d865bea4SRalf Baechle {
205d865bea4SRalf Baechle 	if (num_possible_cpus() > 1) /* PIT does not scale! */
206d865bea4SRalf Baechle 		return 0;
207d865bea4SRalf Baechle 
208d865bea4SRalf Baechle 	clocksource_pit.mult = clocksource_hz2mult(CLOCK_TICK_RATE, 20);
209d865bea4SRalf Baechle 	return clocksource_register(&clocksource_pit);
210d865bea4SRalf Baechle }
211d865bea4SRalf Baechle arch_initcall(init_pit_clocksource);
212