1/* 2 * arch/xtensa/kernel/coprocessor.S 3 * 4 * Xtensa processor configuration-specific table of coprocessor and 5 * other custom register layout information. 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License. See the file "COPYING" in the main directory of this archive 9 * for more details. 10 * 11 * Copyright (C) 2003 - 2007 Tensilica Inc. 12 */ 13 14 15#include <linux/linkage.h> 16#include <asm/asm-offsets.h> 17#include <asm/processor.h> 18#include <asm/coprocessor.h> 19#include <asm/thread_info.h> 20#include <asm/uaccess.h> 21#include <asm/unistd.h> 22#include <asm/ptrace.h> 23#include <asm/current.h> 24#include <asm/pgtable.h> 25#include <asm/page.h> 26#include <asm/signal.h> 27#include <asm/tlbflush.h> 28 29/* 30 * Entry condition: 31 * 32 * a0: trashed, original value saved on stack (PT_AREG0) 33 * a1: a1 34 * a2: new stack pointer, original in DEPC 35 * a3: dispatch table 36 * depc: a2, original value saved on stack (PT_DEPC) 37 * excsave_1: a3 38 * 39 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 40 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 41 */ 42 43/* IO protection is currently unsupported. */ 44 45ENTRY(fast_io_protect) 46 47 wsr a0, excsave1 48 movi a0, unrecoverable_exception 49 callx0 a0 50 51ENDPROC(fast_io_protect) 52 53#if XTENSA_HAVE_COPROCESSORS 54 55/* 56 * Macros for lazy context switch. 57 */ 58 59#define SAVE_CP_REGS(x) \ 60 .align 4; \ 61 .Lsave_cp_regs_cp##x: \ 62 .if XTENSA_HAVE_COPROCESSOR(x); \ 63 xchal_cp##x##_store a2 a4 a5 a6 a7; \ 64 .endif; \ 65 jx a0 66 67#define SAVE_CP_REGS_TAB(x) \ 68 .if XTENSA_HAVE_COPROCESSOR(x); \ 69 .long .Lsave_cp_regs_cp##x - .Lsave_cp_regs_jump_table; \ 70 .else; \ 71 .long 0; \ 72 .endif; \ 73 .long THREAD_XTREGS_CP##x 74 75 76#define LOAD_CP_REGS(x) \ 77 .align 4; \ 78 .Lload_cp_regs_cp##x: \ 79 .if XTENSA_HAVE_COPROCESSOR(x); \ 80 xchal_cp##x##_load a2 a4 a5 a6 a7; \ 81 .endif; \ 82 jx a0 83 84#define LOAD_CP_REGS_TAB(x) \ 85 .if XTENSA_HAVE_COPROCESSOR(x); \ 86 .long .Lload_cp_regs_cp##x - .Lload_cp_regs_jump_table; \ 87 .else; \ 88 .long 0; \ 89 .endif; \ 90 .long THREAD_XTREGS_CP##x 91 92 SAVE_CP_REGS(0) 93 SAVE_CP_REGS(1) 94 SAVE_CP_REGS(2) 95 SAVE_CP_REGS(3) 96 SAVE_CP_REGS(4) 97 SAVE_CP_REGS(5) 98 SAVE_CP_REGS(6) 99 SAVE_CP_REGS(7) 100 101 LOAD_CP_REGS(0) 102 LOAD_CP_REGS(1) 103 LOAD_CP_REGS(2) 104 LOAD_CP_REGS(3) 105 LOAD_CP_REGS(4) 106 LOAD_CP_REGS(5) 107 LOAD_CP_REGS(6) 108 LOAD_CP_REGS(7) 109 110 .align 4 111.Lsave_cp_regs_jump_table: 112 SAVE_CP_REGS_TAB(0) 113 SAVE_CP_REGS_TAB(1) 114 SAVE_CP_REGS_TAB(2) 115 SAVE_CP_REGS_TAB(3) 116 SAVE_CP_REGS_TAB(4) 117 SAVE_CP_REGS_TAB(5) 118 SAVE_CP_REGS_TAB(6) 119 SAVE_CP_REGS_TAB(7) 120 121.Lload_cp_regs_jump_table: 122 LOAD_CP_REGS_TAB(0) 123 LOAD_CP_REGS_TAB(1) 124 LOAD_CP_REGS_TAB(2) 125 LOAD_CP_REGS_TAB(3) 126 LOAD_CP_REGS_TAB(4) 127 LOAD_CP_REGS_TAB(5) 128 LOAD_CP_REGS_TAB(6) 129 LOAD_CP_REGS_TAB(7) 130 131/* 132 * coprocessor_save(buffer, index) 133 * a2 a3 134 * coprocessor_load(buffer, index) 135 * a2 a3 136 * 137 * Save or load coprocessor registers for coprocessor 'index'. 138 * The register values are saved to or loaded from them 'buffer' address. 139 * 140 * Note that these functions don't update the coprocessor_owner information! 141 * 142 */ 143 144ENTRY(coprocessor_save) 145 146 entry a1, 32 147 s32i a0, a1, 0 148 movi a0, .Lsave_cp_regs_jump_table 149 addx8 a3, a3, a0 150 l32i a3, a3, 0 151 beqz a3, 1f 152 add a0, a0, a3 153 callx0 a0 1541: l32i a0, a1, 0 155 retw 156 157ENDPROC(coprocessor_save) 158 159ENTRY(coprocessor_load) 160 161 entry a1, 32 162 s32i a0, a1, 0 163 movi a0, .Lload_cp_regs_jump_table 164 addx4 a3, a3, a0 165 l32i a3, a3, 0 166 beqz a3, 1f 167 add a0, a0, a3 168 callx0 a0 1691: l32i a0, a1, 0 170 retw 171 172ENDPROC(coprocessor_load) 173 174/* 175 * coprocessor_flush(struct task_info*, index) 176 * a2 a3 177 * coprocessor_restore(struct task_info*, index) 178 * a2 a3 179 * 180 * Save or load coprocessor registers for coprocessor 'index'. 181 * The register values are saved to or loaded from the coprocessor area 182 * inside the task_info structure. 183 * 184 * Note that these functions don't update the coprocessor_owner information! 185 * 186 */ 187 188 189ENTRY(coprocessor_flush) 190 191 entry a1, 32 192 s32i a0, a1, 0 193 movi a0, .Lsave_cp_regs_jump_table 194 addx8 a3, a3, a0 195 l32i a4, a3, 4 196 l32i a3, a3, 0 197 add a2, a2, a4 198 beqz a3, 1f 199 add a0, a0, a3 200 callx0 a0 2011: l32i a0, a1, 0 202 retw 203 204ENDPROC(coprocessor_flush) 205 206ENTRY(coprocessor_restore) 207 entry a1, 32 208 s32i a0, a1, 0 209 movi a0, .Lload_cp_regs_jump_table 210 addx4 a3, a3, a0 211 l32i a4, a3, 4 212 l32i a3, a3, 0 213 add a2, a2, a4 214 beqz a3, 1f 215 add a0, a0, a3 216 callx0 a0 2171: l32i a0, a1, 0 218 retw 219 220ENDPROC(coprocessor_restore) 221 222/* 223 * Entry condition: 224 * 225 * a0: trashed, original value saved on stack (PT_AREG0) 226 * a1: a1 227 * a2: new stack pointer, original in DEPC 228 * a3: dispatch table 229 * depc: a2, original value saved on stack (PT_DEPC) 230 * excsave_1: a3 231 * 232 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 233 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 234 */ 235 236ENTRY(fast_coprocessor_double) 237 238 wsr a0, excsave1 239 movi a0, unrecoverable_exception 240 callx0 a0 241 242ENDPROC(fast_coprocessor_double) 243 244ENTRY(fast_coprocessor) 245 246 /* Save remaining registers a1-a3 and SAR */ 247 248 xsr a3, excsave1 249 s32i a3, a2, PT_AREG3 250 rsr a3, sar 251 s32i a1, a2, PT_AREG1 252 s32i a3, a2, PT_SAR 253 mov a1, a2 254 rsr a2, depc 255 s32i a2, a1, PT_AREG2 256 257 /* 258 * The hal macros require up to 4 temporary registers. We use a3..a6. 259 */ 260 261 s32i a4, a1, PT_AREG4 262 s32i a5, a1, PT_AREG5 263 s32i a6, a1, PT_AREG6 264 265 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */ 266 267 rsr a3, exccause 268 addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED 269 270 /* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/ 271 272 ssl a3 # SAR: 32 - coprocessor_number 273 movi a2, 1 274 rsr a0, cpenable 275 sll a2, a2 276 or a0, a0, a2 277 wsr a0, cpenable 278 rsync 279 280 /* Retrieve previous owner. (a3 still holds CP number) */ 281 282 movi a0, coprocessor_owner # list of owners 283 addx4 a0, a3, a0 # entry for CP 284 l32i a4, a0, 0 285 286 beqz a4, 1f # skip 'save' if no previous owner 287 288 /* Disable coprocessor for previous owner. (a2 = 1 << CP number) */ 289 290 l32i a5, a4, THREAD_CPENABLE 291 xor a5, a5, a2 # (1 << cp-id) still in a2 292 s32i a5, a4, THREAD_CPENABLE 293 294 /* 295 * Get context save area and 'call' save routine. 296 * (a4 still holds previous owner (thread_info), a3 CP number) 297 */ 298 299 movi a5, .Lsave_cp_regs_jump_table 300 movi a0, 2f # a0: 'return' address 301 addx8 a3, a3, a5 # a3: coprocessor number 302 l32i a2, a3, 4 # a2: xtregs offset 303 l32i a3, a3, 0 # a3: jump offset 304 add a2, a2, a4 305 add a4, a3, a5 # a4: address of save routine 306 jx a4 307 308 /* Note that only a0 and a1 were preserved. */ 309 3102: rsr a3, exccause 311 addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED 312 movi a0, coprocessor_owner 313 addx4 a0, a3, a0 314 315 /* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */ 316 3171: GET_THREAD_INFO (a4, a1) 318 s32i a4, a0, 0 319 320 /* Get context save area and 'call' load routine. */ 321 322 movi a5, .Lload_cp_regs_jump_table 323 movi a0, 1f 324 addx8 a3, a3, a5 325 l32i a2, a3, 4 # a2: xtregs offset 326 l32i a3, a3, 0 # a3: jump offset 327 add a2, a2, a4 328 add a4, a3, a5 329 jx a4 330 331 /* Restore all registers and return from exception handler. */ 332 3331: l32i a6, a1, PT_AREG6 334 l32i a5, a1, PT_AREG5 335 l32i a4, a1, PT_AREG4 336 337 l32i a0, a1, PT_SAR 338 l32i a3, a1, PT_AREG3 339 l32i a2, a1, PT_AREG2 340 wsr a0, sar 341 l32i a0, a1, PT_AREG0 342 l32i a1, a1, PT_AREG1 343 344 rfe 345 346ENDPROC(fast_coprocessor) 347 348 .data 349 350ENTRY(coprocessor_owner) 351 352 .fill XCHAL_CP_MAX, 4, 0 353 354END(coprocessor_owner) 355 356#endif /* XTENSA_HAVE_COPROCESSORS */ 357 358