1 /* 2 * linux/arch/arm/vfp/vfpmodule.c 3 * 4 * Copyright (C) 2004 ARM Limited. 5 * Written by Deep Blue Solutions Limited. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/kernel.h> 14 #include <linux/signal.h> 15 #include <linux/sched.h> 16 #include <linux/init.h> 17 18 #include <asm/thread_notify.h> 19 #include <asm/vfp.h> 20 21 #include "vfpinstr.h" 22 #include "vfp.h" 23 24 /* 25 * Our undef handlers (in entry.S) 26 */ 27 void vfp_testing_entry(void); 28 void vfp_support_entry(void); 29 30 void (*vfp_vector)(void) = vfp_testing_entry; 31 union vfp_state *last_VFP_context[NR_CPUS]; 32 33 /* 34 * Dual-use variable. 35 * Used in startup: set to non-zero if VFP checks fail 36 * After startup, holds VFP architecture 37 */ 38 unsigned int VFP_arch; 39 40 static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) 41 { 42 struct thread_info *thread = v; 43 union vfp_state *vfp; 44 __u32 cpu = thread->cpu; 45 46 if (likely(cmd == THREAD_NOTIFY_SWITCH)) { 47 u32 fpexc = fmrx(FPEXC); 48 49 #ifdef CONFIG_SMP 50 /* 51 * On SMP, if VFP is enabled, save the old state in 52 * case the thread migrates to a different CPU. The 53 * restoring is done lazily. 54 */ 55 if ((fpexc & FPEXC_ENABLE) && last_VFP_context[cpu]) { 56 vfp_save_state(last_VFP_context[cpu], fpexc); 57 last_VFP_context[cpu]->hard.cpu = cpu; 58 } 59 /* 60 * Thread migration, just force the reloading of the 61 * state on the new CPU in case the VFP registers 62 * contain stale data. 63 */ 64 if (thread->vfpstate.hard.cpu != cpu) 65 last_VFP_context[cpu] = NULL; 66 #endif 67 68 /* 69 * Always disable VFP so we can lazily save/restore the 70 * old state. 71 */ 72 fmxr(FPEXC, fpexc & ~FPEXC_ENABLE); 73 return NOTIFY_DONE; 74 } 75 76 vfp = &thread->vfpstate; 77 if (cmd == THREAD_NOTIFY_FLUSH) { 78 /* 79 * Per-thread VFP initialisation. 80 */ 81 memset(vfp, 0, sizeof(union vfp_state)); 82 83 vfp->hard.fpexc = FPEXC_ENABLE; 84 vfp->hard.fpscr = FPSCR_ROUND_NEAREST; 85 86 /* 87 * Disable VFP to ensure we initialise it first. 88 */ 89 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE); 90 } 91 92 /* flush and release case: Per-thread VFP cleanup. */ 93 if (last_VFP_context[cpu] == vfp) 94 last_VFP_context[cpu] = NULL; 95 96 return NOTIFY_DONE; 97 } 98 99 static struct notifier_block vfp_notifier_block = { 100 .notifier_call = vfp_notifier, 101 }; 102 103 /* 104 * Raise a SIGFPE for the current process. 105 * sicode describes the signal being raised. 106 */ 107 void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) 108 { 109 siginfo_t info; 110 111 memset(&info, 0, sizeof(info)); 112 113 info.si_signo = SIGFPE; 114 info.si_code = sicode; 115 info.si_addr = (void __user *)(instruction_pointer(regs) - 4); 116 117 /* 118 * This is the same as NWFPE, because it's not clear what 119 * this is used for 120 */ 121 current->thread.error_code = 0; 122 current->thread.trap_no = 6; 123 124 send_sig_info(SIGFPE, &info, current); 125 } 126 127 static void vfp_panic(char *reason) 128 { 129 int i; 130 131 printk(KERN_ERR "VFP: Error: %s\n", reason); 132 printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n", 133 fmrx(FPEXC), fmrx(FPSCR), fmrx(FPINST)); 134 for (i = 0; i < 32; i += 2) 135 printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n", 136 i, vfp_get_float(i), i+1, vfp_get_float(i+1)); 137 } 138 139 /* 140 * Process bitmask of exception conditions. 141 */ 142 static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs) 143 { 144 int si_code = 0; 145 146 pr_debug("VFP: raising exceptions %08x\n", exceptions); 147 148 if (exceptions == VFP_EXCEPTION_ERROR) { 149 vfp_panic("unhandled bounce"); 150 vfp_raise_sigfpe(0, regs); 151 return; 152 } 153 154 /* 155 * If any of the status flags are set, update the FPSCR. 156 * Comparison instructions always return at least one of 157 * these flags set. 158 */ 159 if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V)) 160 fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V); 161 162 fpscr |= exceptions; 163 164 fmxr(FPSCR, fpscr); 165 166 #define RAISE(stat,en,sig) \ 167 if (exceptions & stat && fpscr & en) \ 168 si_code = sig; 169 170 /* 171 * These are arranged in priority order, least to highest. 172 */ 173 RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV); 174 RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES); 175 RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND); 176 RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF); 177 RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV); 178 179 if (si_code) 180 vfp_raise_sigfpe(si_code, regs); 181 } 182 183 /* 184 * Emulate a VFP instruction. 185 */ 186 static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) 187 { 188 u32 exceptions = VFP_EXCEPTION_ERROR; 189 190 pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr); 191 192 if (INST_CPRTDO(inst)) { 193 if (!INST_CPRT(inst)) { 194 /* 195 * CPDO 196 */ 197 if (vfp_single(inst)) { 198 exceptions = vfp_single_cpdo(inst, fpscr); 199 } else { 200 exceptions = vfp_double_cpdo(inst, fpscr); 201 } 202 } else { 203 /* 204 * A CPRT instruction can not appear in FPINST2, nor 205 * can it cause an exception. Therefore, we do not 206 * have to emulate it. 207 */ 208 } 209 } else { 210 /* 211 * A CPDT instruction can not appear in FPINST2, nor can 212 * it cause an exception. Therefore, we do not have to 213 * emulate it. 214 */ 215 } 216 return exceptions & ~VFP_NAN_FLAG; 217 } 218 219 /* 220 * Package up a bounce condition. 221 */ 222 void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) 223 { 224 u32 fpscr, orig_fpscr, exceptions, inst; 225 226 pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc); 227 228 /* 229 * Enable access to the VFP so we can handle the bounce. 230 */ 231 fmxr(FPEXC, fpexc & ~(FPEXC_EXCEPTION|FPEXC_INV|FPEXC_UFC|FPEXC_IOC)); 232 233 orig_fpscr = fpscr = fmrx(FPSCR); 234 235 /* 236 * If we are running with inexact exceptions enabled, we need to 237 * emulate the trigger instruction. Note that as we're emulating 238 * the trigger instruction, we need to increment PC. 239 */ 240 if (fpscr & FPSCR_IXE) { 241 regs->ARM_pc += 4; 242 goto emulate; 243 } 244 245 barrier(); 246 247 /* 248 * Modify fpscr to indicate the number of iterations remaining 249 */ 250 if (fpexc & FPEXC_EXCEPTION) { 251 u32 len; 252 253 len = fpexc + (1 << FPEXC_LENGTH_BIT); 254 255 fpscr &= ~FPSCR_LENGTH_MASK; 256 fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT); 257 } 258 259 /* 260 * Handle the first FP instruction. We used to take note of the 261 * FPEXC bounce reason, but this appears to be unreliable. 262 * Emulate the bounced instruction instead. 263 */ 264 inst = fmrx(FPINST); 265 exceptions = vfp_emulate_instruction(inst, fpscr, regs); 266 if (exceptions) 267 vfp_raise_exceptions(exceptions, inst, orig_fpscr, regs); 268 269 /* 270 * If there isn't a second FP instruction, exit now. 271 */ 272 if (!(fpexc & FPEXC_FPV2)) 273 return; 274 275 /* 276 * The barrier() here prevents fpinst2 being read 277 * before the condition above. 278 */ 279 barrier(); 280 trigger = fmrx(FPINST2); 281 orig_fpscr = fpscr = fmrx(FPSCR); 282 283 emulate: 284 exceptions = vfp_emulate_instruction(trigger, fpscr, regs); 285 if (exceptions) 286 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); 287 } 288 289 static void vfp_enable(void *unused) 290 { 291 u32 access = get_copro_access(); 292 293 /* 294 * Enable full access to VFP (cp10 and cp11) 295 */ 296 set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); 297 } 298 299 #include <linux/smp.h> 300 301 /* 302 * VFP support code initialisation. 303 */ 304 static int __init vfp_init(void) 305 { 306 unsigned int vfpsid; 307 unsigned int cpu_arch = cpu_architecture(); 308 u32 access = 0; 309 310 if (cpu_arch >= CPU_ARCH_ARMv6) { 311 access = get_copro_access(); 312 313 /* 314 * Enable full access to VFP (cp10 and cp11) 315 */ 316 set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); 317 } 318 319 /* 320 * First check that there is a VFP that we can use. 321 * The handler is already setup to just log calls, so 322 * we just need to read the VFPSID register. 323 */ 324 vfpsid = fmrx(FPSID); 325 barrier(); 326 327 printk(KERN_INFO "VFP support v0.3: "); 328 if (VFP_arch) { 329 printk("not present\n"); 330 331 /* 332 * Restore the copro access register. 333 */ 334 if (cpu_arch >= CPU_ARCH_ARMv6) 335 set_copro_access(access); 336 } else if (vfpsid & FPSID_NODOUBLE) { 337 printk("no double precision support\n"); 338 } else { 339 smp_call_function(vfp_enable, NULL, 1, 1); 340 341 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ 342 printk("implementor %02x architecture %d part %02x variant %x rev %x\n", 343 (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, 344 (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT, 345 (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, 346 (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, 347 (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); 348 349 vfp_vector = vfp_support_entry; 350 351 thread_register_notifier(&vfp_notifier_block); 352 353 /* 354 * We detected VFP, and the support code is 355 * in place; report VFP support to userspace. 356 */ 357 elf_hwcap |= HWCAP_VFP; 358 } 359 return 0; 360 } 361 362 late_initcall(vfp_init); 363