xref: /linux/arch/mips/kernel/cevt-r4k.c (revision 47902f3611b392209e2a412bf7ec02dca95e666d)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2007 MIPS Technologies, Inc.
7  * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
8  */
9 #include <linux/clockchips.h>
10 #include <linux/interrupt.h>
11 #include <linux/percpu.h>
12 #include <linux/smp.h>
13 
14 #include <asm/smtc_ipi.h>
15 #include <asm/time.h>
16 #include <asm/cevt-r4k.h>
17 
18 /*
19  * The SMTC Kernel for the 34K, 1004K, et. al. replaces several
20  * of these routines with SMTC-specific variants.
21  */
22 
23 #ifndef CONFIG_MIPS_MT_SMTC
24 
25 static int mips_next_event(unsigned long delta,
26                            struct clock_event_device *evt)
27 {
28 	unsigned int cnt;
29 	int res;
30 
31 	cnt = read_c0_count();
32 	cnt += delta;
33 	write_c0_compare(cnt);
34 	res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
35 	return res;
36 }
37 
38 #endif /* CONFIG_MIPS_MT_SMTC */
39 
40 void mips_set_clock_mode(enum clock_event_mode mode,
41 				struct clock_event_device *evt)
42 {
43 	/* Nothing to do ...  */
44 }
45 
46 DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
47 int cp0_timer_irq_installed;
48 
49 #ifndef CONFIG_MIPS_MT_SMTC
50 
51 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
52 {
53 	const int r2 = cpu_has_mips_r2;
54 	struct clock_event_device *cd;
55 	int cpu = smp_processor_id();
56 
57 	/*
58 	 * Suckage alert:
59 	 * Before R2 of the architecture there was no way to see if a
60 	 * performance counter interrupt was pending, so we have to run
61 	 * the performance counter interrupt handler anyway.
62 	 */
63 	if (handle_perf_irq(r2))
64 		goto out;
65 
66 	/*
67 	 * The same applies to performance counter interrupts.  But with the
68 	 * above we now know that the reason we got here must be a timer
69 	 * interrupt.  Being the paranoiacs we are we check anyway.
70 	 */
71 	if (!r2 || (read_c0_cause() & (1 << 30))) {
72 		/* Clear Count/Compare Interrupt */
73 		write_c0_compare(read_c0_compare());
74 		cd = &per_cpu(mips_clockevent_device, cpu);
75 		cd->event_handler(cd);
76 	}
77 
78 out:
79 	return IRQ_HANDLED;
80 }
81 
82 #endif /* Not CONFIG_MIPS_MT_SMTC */
83 
84 struct irqaction c0_compare_irqaction = {
85 	.handler = c0_compare_interrupt,
86 	.flags = IRQF_DISABLED | IRQF_PERCPU | IRQF_TIMER,
87 	.name = "timer",
88 };
89 
90 
91 void mips_event_handler(struct clock_event_device *dev)
92 {
93 }
94 
95 /*
96  * FIXME: This doesn't hold for the relocated E9000 compare interrupt.
97  */
98 static int c0_compare_int_pending(void)
99 {
100 	return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
101 }
102 
103 /*
104  * Compare interrupt can be routed and latched outside the core,
105  * so a single execution hazard barrier may not be enough to give
106  * it time to clear as seen in the Cause register.  4 time the
107  * pipeline depth seems reasonably conservative, and empirically
108  * works better in configurations with high CPU/bus clock ratios.
109  */
110 
111 #define compare_change_hazard() \
112 	do { \
113 		irq_disable_hazard(); \
114 		irq_disable_hazard(); \
115 		irq_disable_hazard(); \
116 		irq_disable_hazard(); \
117 	} while (0)
118 
119 int c0_compare_int_usable(void)
120 {
121 	unsigned int delta;
122 	unsigned int cnt;
123 
124 	/*
125 	 * IP7 already pending?  Try to clear it by acking the timer.
126 	 */
127 	if (c0_compare_int_pending()) {
128 		write_c0_compare(read_c0_count());
129 		compare_change_hazard();
130 		if (c0_compare_int_pending())
131 			return 0;
132 	}
133 
134 	for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
135 		cnt = read_c0_count();
136 		cnt += delta;
137 		write_c0_compare(cnt);
138 		compare_change_hazard();
139 		if ((int)(read_c0_count() - cnt) < 0)
140 		    break;
141 		/* increase delta if the timer was already expired */
142 	}
143 
144 	while ((int)(read_c0_count() - cnt) <= 0)
145 		;	/* Wait for expiry  */
146 
147 	compare_change_hazard();
148 	if (!c0_compare_int_pending())
149 		return 0;
150 
151 	write_c0_compare(read_c0_count());
152 	compare_change_hazard();
153 	if (c0_compare_int_pending())
154 		return 0;
155 
156 	/*
157 	 * Feels like a real count / compare timer.
158 	 */
159 	return 1;
160 }
161 
162 #ifndef CONFIG_MIPS_MT_SMTC
163 
164 int __cpuinit r4k_clockevent_init(void)
165 {
166 	uint64_t mips_freq = mips_hpt_frequency;
167 	unsigned int cpu = smp_processor_id();
168 	struct clock_event_device *cd;
169 	unsigned int irq;
170 
171 	if (!cpu_has_counter || !mips_hpt_frequency)
172 		return -ENXIO;
173 
174 	if (!c0_compare_int_usable())
175 		return -ENXIO;
176 
177 	/*
178 	 * With vectored interrupts things are getting platform specific.
179 	 * get_c0_compare_int is a hook to allow a platform to return the
180 	 * interrupt number of it's liking.
181 	 */
182 	irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
183 	if (get_c0_compare_int)
184 		irq = get_c0_compare_int();
185 
186 	cd = &per_cpu(mips_clockevent_device, cpu);
187 
188 	cd->name		= "MIPS";
189 	cd->features		= CLOCK_EVT_FEAT_ONESHOT;
190 
191 	/* Calculate the min / max delta */
192 	cd->mult	= div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
193 	cd->shift		= 32;
194 	cd->max_delta_ns	= clockevent_delta2ns(0x7fffffff, cd);
195 	cd->min_delta_ns	= clockevent_delta2ns(0x300, cd);
196 
197 	cd->rating		= 300;
198 	cd->irq			= irq;
199 	cd->cpumask		= cpumask_of(cpu);
200 	cd->set_next_event	= mips_next_event;
201 	cd->set_mode		= mips_set_clock_mode;
202 	cd->event_handler	= mips_event_handler;
203 
204 	clockevents_register_device(cd);
205 
206 	if (cp0_timer_irq_installed)
207 		return 0;
208 
209 	cp0_timer_irq_installed = 1;
210 
211 	setup_irq(irq, &c0_compare_irqaction);
212 
213 	return 0;
214 }
215 
216 #endif /* Not CONFIG_MIPS_MT_SMTC */
217