cevt-r4k.c (c1f3ee120bb61045b1c0a3ead620d1d65af47130) | cevt-r4k.c (8531a35e5e275b17c57c39b7911bc2b37025f28c) |
---|---|
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2007 MIPS Technologies, Inc. 7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> 8 */ 9#include <linux/clockchips.h> 10#include <linux/interrupt.h> 11#include <linux/percpu.h> 12 13#include <asm/smtc_ipi.h> 14#include <asm/time.h> | 1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2007 MIPS Technologies, Inc. 7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> 8 */ 9#include <linux/clockchips.h> 10#include <linux/interrupt.h> 11#include <linux/percpu.h> 12 13#include <asm/smtc_ipi.h> 14#include <asm/time.h> |
15#include <asm/cevt-r4k.h> |
|
15 | 16 |
17/* 18 * The SMTC Kernel for the 34K, 1004K, et. al. replaces several 19 * of these routines with SMTC-specific variants. 20 */ 21 22#ifndef CONFIG_MIPS_MT_SMTC 23 |
|
16static int mips_next_event(unsigned long delta, 17 struct clock_event_device *evt) 18{ 19 unsigned int cnt; 20 int res; 21 | 24static int mips_next_event(unsigned long delta, 25 struct clock_event_device *evt) 26{ 27 unsigned int cnt; 28 int res; 29 |
22#ifdef CONFIG_MIPS_MT_SMTC 23 { 24 unsigned long flags, vpflags; 25 local_irq_save(flags); 26 vpflags = dvpe(); 27#endif | |
28 cnt = read_c0_count(); 29 cnt += delta; 30 write_c0_compare(cnt); 31 res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; | 30 cnt = read_c0_count(); 31 cnt += delta; 32 write_c0_compare(cnt); 33 res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; |
32#ifdef CONFIG_MIPS_MT_SMTC 33 evpe(vpflags); 34 local_irq_restore(flags); 35 } 36#endif | |
37 return res; 38} 39 | 34 return res; 35} 36 |
40static void mips_set_mode(enum clock_event_mode mode, 41 struct clock_event_device *evt) | 37#endif /* CONFIG_MIPS_MT_SMTC */ 38 39void mips_set_clock_mode(enum clock_event_mode mode, 40 struct clock_event_device *evt) |
42{ 43 /* Nothing to do ... */ 44} 45 | 41{ 42 /* Nothing to do ... */ 43} 44 |
46static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); 47static int cp0_timer_irq_installed; | 45DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); 46int cp0_timer_irq_installed; |
48 | 47 |
49/* 50 * Timer ack for an R4k-compatible timer of a known frequency. 51 */ 52static void c0_timer_ack(void) 53{ 54 write_c0_compare(read_c0_compare()); 55} | 48#ifndef CONFIG_MIPS_MT_SMTC |
56 | 49 |
57/* 58 * Possibly handle a performance counter interrupt. 59 * Return true if the timer interrupt should not be checked 60 */ 61static inline int handle_perf_irq(int r2) | 50irqreturn_t c0_compare_interrupt(int irq, void *dev_id) |
62{ | 51{ |
63 /* 64 * The performance counter overflow interrupt may be shared with the 65 * timer interrupt (cp0_perfcount_irq < 0). If it is and a 66 * performance counter has overflowed (perf_irq() == IRQ_HANDLED) 67 * and we can't reliably determine if a counter interrupt has also 68 * happened (!r2) then don't check for a timer interrupt. 69 */ 70 return (cp0_perfcount_irq < 0) && 71 perf_irq() == IRQ_HANDLED && 72 !r2; 73} 74 75static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 76{ | |
77 const int r2 = cpu_has_mips_r2; 78 struct clock_event_device *cd; 79 int cpu = smp_processor_id(); 80 81 /* 82 * Suckage alert: 83 * Before R2 of the architecture there was no way to see if a 84 * performance counter interrupt was pending, so we have to run 85 * the performance counter interrupt handler anyway. 86 */ 87 if (handle_perf_irq(r2)) 88 goto out; 89 90 /* 91 * The same applies to performance counter interrupts. But with the 92 * above we now know that the reason we got here must be a timer 93 * interrupt. Being the paranoiacs we are we check anyway. 94 */ 95 if (!r2 || (read_c0_cause() & (1 << 30))) { | 52 const int r2 = cpu_has_mips_r2; 53 struct clock_event_device *cd; 54 int cpu = smp_processor_id(); 55 56 /* 57 * Suckage alert: 58 * Before R2 of the architecture there was no way to see if a 59 * performance counter interrupt was pending, so we have to run 60 * the performance counter interrupt handler anyway. 61 */ 62 if (handle_perf_irq(r2)) 63 goto out; 64 65 /* 66 * The same applies to performance counter interrupts. But with the 67 * above we now know that the reason we got here must be a timer 68 * interrupt. Being the paranoiacs we are we check anyway. 69 */ 70 if (!r2 || (read_c0_cause() & (1 << 30))) { |
96 c0_timer_ack(); 97#ifdef CONFIG_MIPS_MT_SMTC 98 if (cpu_data[cpu].vpe_id) 99 goto out; 100 cpu = 0; 101#endif | 71 /* Clear Count/Compare Interrupt */ 72 write_c0_compare(read_c0_compare()); |
102 cd = &per_cpu(mips_clockevent_device, cpu); 103 cd->event_handler(cd); 104 } 105 106out: 107 return IRQ_HANDLED; 108} 109 | 73 cd = &per_cpu(mips_clockevent_device, cpu); 74 cd->event_handler(cd); 75 } 76 77out: 78 return IRQ_HANDLED; 79} 80 |
110static struct irqaction c0_compare_irqaction = { | 81#endif /* Not CONFIG_MIPS_MT_SMTC */ 82 83struct irqaction c0_compare_irqaction = { |
111 .handler = c0_compare_interrupt, | 84 .handler = c0_compare_interrupt, |
112#ifdef CONFIG_MIPS_MT_SMTC 113 .flags = IRQF_DISABLED, 114#else | |
115 .flags = IRQF_DISABLED | IRQF_PERCPU, | 85 .flags = IRQF_DISABLED | IRQF_PERCPU, |
116#endif | |
117 .name = "timer", 118}; 119 | 86 .name = "timer", 87}; 88 |
120#ifdef CONFIG_MIPS_MT_SMTC 121DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); | |
122 | 89 |
123static void smtc_set_mode(enum clock_event_mode mode, 124 struct clock_event_device *evt) | 90void mips_event_handler(struct clock_event_device *dev) |
125{ 126} 127 | 91{ 92} 93 |
128static void mips_broadcast(cpumask_t mask) 129{ 130 unsigned int cpu; 131 132 for_each_cpu_mask(cpu, mask) 133 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); 134} 135 136static void setup_smtc_dummy_clockevent_device(void) 137{ 138 //uint64_t mips_freq = mips_hpt_^frequency; 139 unsigned int cpu = smp_processor_id(); 140 struct clock_event_device *cd; 141 142 cd = &per_cpu(smtc_dummy_clockevent_device, cpu); 143 144 cd->name = "SMTC"; 145 cd->features = CLOCK_EVT_FEAT_DUMMY; 146 147 /* Calculate the min / max delta */ 148 cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); 149 cd->shift = 0; //32; 150 cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd); 151 cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd); 152 153 cd->rating = 200; 154 cd->irq = 17; //-1; 155// if (cpu) 156// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu); 157// else 158 cd->cpumask = cpumask_of_cpu(cpu); 159 160 cd->set_mode = smtc_set_mode; 161 162 cd->broadcast = mips_broadcast; 163 164 clockevents_register_device(cd); 165} 166#endif 167 168static void mips_event_handler(struct clock_event_device *dev) 169{ 170} 171 | |
172/* 173 * FIXME: This doesn't hold for the relocated E9000 compare interrupt. 174 */ 175static int c0_compare_int_pending(void) 176{ 177 return (read_c0_cause() >> cp0_compare_irq) & 0x100; 178} 179 | 94/* 95 * FIXME: This doesn't hold for the relocated E9000 compare interrupt. 96 */ 97static int c0_compare_int_pending(void) 98{ 99 return (read_c0_cause() >> cp0_compare_irq) & 0x100; 100} 101 |
180static int c0_compare_int_usable(void) | 102/* 103 * Compare interrupt can be routed and latched outside the core, 104 * so a single execution hazard barrier may not be enough to give 105 * it time to clear as seen in the Cause register. 4 time the 106 * pipeline depth seems reasonably conservative, and empirically 107 * works better in configurations with high CPU/bus clock ratios. 108 */ 109 110#define compare_change_hazard() \ 111 do { \ 112 irq_disable_hazard(); \ 113 irq_disable_hazard(); \ 114 irq_disable_hazard(); \ 115 irq_disable_hazard(); \ 116 } while (0) 117 118int c0_compare_int_usable(void) |
181{ 182 unsigned int delta; 183 unsigned int cnt; 184 185 /* 186 * IP7 already pending? Try to clear it by acking the timer. 187 */ 188 if (c0_compare_int_pending()) { 189 write_c0_compare(read_c0_count()); | 119{ 120 unsigned int delta; 121 unsigned int cnt; 122 123 /* 124 * IP7 already pending? Try to clear it by acking the timer. 125 */ 126 if (c0_compare_int_pending()) { 127 write_c0_compare(read_c0_count()); |
190 irq_disable_hazard(); | 128 compare_change_hazard(); |
191 if (c0_compare_int_pending()) 192 return 0; 193 } 194 195 for (delta = 0x10; delta <= 0x400000; delta <<= 1) { 196 cnt = read_c0_count(); 197 cnt += delta; 198 write_c0_compare(cnt); | 129 if (c0_compare_int_pending()) 130 return 0; 131 } 132 133 for (delta = 0x10; delta <= 0x400000; delta <<= 1) { 134 cnt = read_c0_count(); 135 cnt += delta; 136 write_c0_compare(cnt); |
199 irq_disable_hazard(); | 137 compare_change_hazard(); |
200 if ((int)(read_c0_count() - cnt) < 0) 201 break; 202 /* increase delta if the timer was already expired */ 203 } 204 205 while ((int)(read_c0_count() - cnt) <= 0) 206 ; /* Wait for expiry */ 207 | 138 if ((int)(read_c0_count() - cnt) < 0) 139 break; 140 /* increase delta if the timer was already expired */ 141 } 142 143 while ((int)(read_c0_count() - cnt) <= 0) 144 ; /* Wait for expiry */ 145 |
146 compare_change_hazard(); |
|
208 if (!c0_compare_int_pending()) 209 return 0; 210 211 write_c0_compare(read_c0_count()); | 147 if (!c0_compare_int_pending()) 148 return 0; 149 150 write_c0_compare(read_c0_count()); |
212 irq_disable_hazard(); | 151 compare_change_hazard(); |
213 if (c0_compare_int_pending()) 214 return 0; 215 216 /* 217 * Feels like a real count / compare timer. 218 */ 219 return 1; 220} 221 | 152 if (c0_compare_int_pending()) 153 return 0; 154 155 /* 156 * Feels like a real count / compare timer. 157 */ 158 return 1; 159} 160 |
161#ifndef CONFIG_MIPS_MT_SMTC 162 |
|
222int __cpuinit mips_clockevent_init(void) 223{ 224 uint64_t mips_freq = mips_hpt_frequency; 225 unsigned int cpu = smp_processor_id(); 226 struct clock_event_device *cd; 227 unsigned int irq; 228 229 if (!cpu_has_counter || !mips_hpt_frequency) 230 return -ENXIO; 231 | 163int __cpuinit mips_clockevent_init(void) 164{ 165 uint64_t mips_freq = mips_hpt_frequency; 166 unsigned int cpu = smp_processor_id(); 167 struct clock_event_device *cd; 168 unsigned int irq; 169 170 if (!cpu_has_counter || !mips_hpt_frequency) 171 return -ENXIO; 172 |
232#ifdef CONFIG_MIPS_MT_SMTC 233 setup_smtc_dummy_clockevent_device(); 234 235 /* 236 * On SMTC we only register VPE0's compare interrupt as clockevent 237 * device. 238 */ 239 if (cpu) 240 return 0; 241#endif 242 | |
243 if (!c0_compare_int_usable()) 244 return -ENXIO; 245 246 /* 247 * With vectored interrupts things are getting platform specific. 248 * get_c0_compare_int is a hook to allow a platform to return the 249 * interrupt number of it's liking. 250 */ --- 9 unchanged lines hidden (view full) --- 260 /* Calculate the min / max delta */ 261 cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); 262 cd->shift = 32; 263 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 264 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 265 266 cd->rating = 300; 267 cd->irq = irq; | 173 if (!c0_compare_int_usable()) 174 return -ENXIO; 175 176 /* 177 * With vectored interrupts things are getting platform specific. 178 * get_c0_compare_int is a hook to allow a platform to return the 179 * interrupt number of it's liking. 180 */ --- 9 unchanged lines hidden (view full) --- 190 /* Calculate the min / max delta */ 191 cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); 192 cd->shift = 32; 193 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 194 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 195 196 cd->rating = 300; 197 cd->irq = irq; |
268#ifdef CONFIG_MIPS_MT_SMTC 269 cd->cpumask = CPU_MASK_ALL; 270#else | |
271 cd->cpumask = cpumask_of_cpu(cpu); | 198 cd->cpumask = cpumask_of_cpu(cpu); |
272#endif | |
273 cd->set_next_event = mips_next_event; | 199 cd->set_next_event = mips_next_event; |
274 cd->set_mode = mips_set_mode; | 200 cd->set_mode = mips_set_clock_mode; |
275 cd->event_handler = mips_event_handler; 276 277 clockevents_register_device(cd); 278 279 if (cp0_timer_irq_installed) 280 return 0; 281 282 cp0_timer_irq_installed = 1; 283 | 201 cd->event_handler = mips_event_handler; 202 203 clockevents_register_device(cd); 204 205 if (cp0_timer_irq_installed) 206 return 0; 207 208 cp0_timer_irq_installed = 1; 209 |
284#ifdef CONFIG_MIPS_MT_SMTC 285#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq) 286 setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT); 287#else | |
288 setup_irq(irq, &c0_compare_irqaction); | 210 setup_irq(irq, &c0_compare_irqaction); |
289#endif | |
290 291 return 0; 292} | 211 212 return 0; 213} |
214 215#endif /* Not CONFIG_MIPS_MT_SMTC */ |
|