1/* SPDX-License-Identifier: GPL-2.0 */ 2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 4#include <linux/linkage.h> 5#include <abi/entry.h> 6#include <abi/pgtable-bits.h> 7#include <asm/errno.h> 8#include <asm/setup.h> 9#include <asm/unistd.h> 10#include <asm/asm-offsets.h> 11#include <linux/threads.h> 12#include <asm/setup.h> 13#include <asm/page.h> 14#include <asm/thread_info.h> 15 16#define PTE_INDX_MSK 0xffc 17#define PTE_INDX_SHIFT 10 18#define _PGDIR_SHIFT 22 19 20.macro tlbop_begin name, val0, val1, val2 21ENTRY(csky_\name) 22 mtcr a3, ss2 23 mtcr r6, ss3 24 mtcr a2, ss4 25 26 RD_PGDR r6 27 RD_MEH a3 28#ifdef CONFIG_CPU_HAS_TLBI 29 tlbi.vaas a3 30 sync.is 31 32 btsti a3, 31 33 bf 1f 34 RD_PGDR_K r6 351: 36#else 37 bgeni a2, 31 38 WR_MCIR a2 39 bgeni a2, 25 40 WR_MCIR a2 41#endif 42 bclri r6, 0 43 lrw a2, PHYS_OFFSET 44 subu r6, a2 45 bseti r6, 31 46 47 mov a2, a3 48 lsri a2, _PGDIR_SHIFT 49 lsli a2, 2 50 addu r6, a2 51 ldw r6, (r6) 52 53 lrw a2, PHYS_OFFSET 54 subu r6, a2 55 bseti r6, 31 56 57 lsri a3, PTE_INDX_SHIFT 58 lrw a2, PTE_INDX_MSK 59 and a3, a2 60 addu r6, a3 61 ldw a3, (r6) 62 63 movi a2, (_PAGE_PRESENT | \val0) 64 and a3, a2 65 cmpne a3, a2 66 bt \name 67 68 /* First read/write the page, just update the flags */ 69 ldw a3, (r6) 70 bgeni a2, PAGE_VALID_BIT 71 bseti a2, PAGE_ACCESSED_BIT 72 bseti a2, \val1 73 bseti a2, \val2 74 or a3, a2 75 stw a3, (r6) 76 77 /* Some cpu tlb-hardrefill bypass the cache */ 78#ifdef CONFIG_CPU_NEED_TLBSYNC 79 movi a2, 0x22 80 bseti a2, 6 81 mtcr r6, cr22 82 mtcr a2, cr17 83 sync 84#endif 85 86 mfcr a3, ss2 87 mfcr r6, ss3 88 mfcr a2, ss4 89 rte 90\name: 91 mfcr a3, ss2 92 mfcr r6, ss3 93 mfcr a2, ss4 94 SAVE_ALL EPC_KEEP 95.endm 96.macro tlbop_end is_write 97 RD_MEH a2 98 psrset ee, ie 99 mov a0, sp 100 movi a1, \is_write 101 jbsr do_page_fault 102 movi r11_sig, 0 /* r11 = 0, Not a syscall. */ 103 jmpi ret_from_exception 104.endm 105 106.text 107 108tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT 109tlbop_end 0 110 111tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT 112tlbop_end 1 113 114tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT 115#ifndef CONFIG_CPU_HAS_LDSTEX 116jbsr csky_cmpxchg_fixup 117#endif 118tlbop_end 1 119 120ENTRY(csky_systemcall) 121 SAVE_ALL EPC_INCREASE 122 123 psrset ee, ie 124 125 lrw r11, __NR_syscalls 126 cmphs syscallid, r11 /* Check nr of syscall */ 127 bt ret_from_exception 128 129 lrw r13, sys_call_table 130 ixw r13, syscallid 131 ldw r11, (r13) 132 cmpnei r11, 0 133 bf ret_from_exception 134 135 mov r9, sp 136 bmaski r10, THREAD_SHIFT 137 andn r9, r10 138 ldw r8, (r9, TINFO_FLAGS) 139 ANDI_R3 r8, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT) 140 cmpnei r8, 0 141 bt csky_syscall_trace 142#if defined(__CSKYABIV2__) 143 subi sp, 8 144 stw r5, (sp, 0x4) 145 stw r4, (sp, 0x0) 146 jsr r11 /* Do system call */ 147 addi sp, 8 148#else 149 jsr r11 150#endif 151 stw a0, (sp, LSAVE_A0) /* Save return value */ 152 jmpi ret_from_exception 153 154csky_syscall_trace: 155 mov a0, sp /* sp = pt_regs pointer */ 156 jbsr syscall_trace_enter 157 /* Prepare args before do system call */ 158 ldw a0, (sp, LSAVE_A0) 159 ldw a1, (sp, LSAVE_A1) 160 ldw a2, (sp, LSAVE_A2) 161 ldw a3, (sp, LSAVE_A3) 162#if defined(__CSKYABIV2__) 163 subi sp, 8 164 stw r5, (sp, 0x4) 165 stw r4, (sp, 0x0) 166#else 167 ldw r6, (sp, LSAVE_A4) 168 ldw r7, (sp, LSAVE_A5) 169#endif 170 jsr r11 /* Do system call */ 171#if defined(__CSKYABIV2__) 172 addi sp, 8 173#endif 174 stw a0, (sp, LSAVE_A0) /* Save return value */ 175 176 mov a0, sp /* right now, sp --> pt_regs */ 177 jbsr syscall_trace_exit 178 br ret_from_exception 179 180ENTRY(ret_from_kernel_thread) 181 jbsr schedule_tail 182 mov a0, r8 183 jsr r9 184 jbsr ret_from_exception 185 186ENTRY(ret_from_fork) 187 jbsr schedule_tail 188 mov r9, sp 189 bmaski r10, THREAD_SHIFT 190 andn r9, r10 191 ldw r8, (r9, TINFO_FLAGS) 192 movi r11_sig, 1 193 ANDI_R3 r8, (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT) 194 cmpnei r8, 0 195 bf 3f 196 mov a0, sp /* sp = pt_regs pointer */ 197 jbsr syscall_trace_exit 1983: 199 jbsr ret_from_exception 200 201ret_from_exception: 202 ld syscallid, (sp, LSAVE_PSR) 203 btsti syscallid, 31 204 bt 1f 205 206 /* 207 * Load address of current->thread_info, Then get address of task_struct 208 * Get task_needreshed in task_struct 209 */ 210 mov r9, sp 211 bmaski r10, THREAD_SHIFT 212 andn r9, r10 213 214resume_userspace: 215 ldw r8, (r9, TINFO_FLAGS) 216 andi r8, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) 217 cmpnei r8, 0 218 bt exit_work 2191: RESTORE_ALL 220 221exit_work: 222 btsti r8, TIF_NEED_RESCHED 223 bt work_resched 224 /* If thread_info->flag is empty, RESTORE_ALL */ 225 cmpnei r8, 0 226 bf 1b 227 mov a1, sp 228 mov a0, r8 229 mov a2, r11_sig /* syscall? */ 230 btsti r8, TIF_SIGPENDING /* delivering a signal? */ 231 /* prevent further restarts(set r11 = 0) */ 232 clrt r11_sig 233 jbsr do_notify_resume /* do signals */ 234 br resume_userspace 235 236work_resched: 237 lrw syscallid, ret_from_exception 238 mov r15, syscallid /* Return address in link */ 239 jmpi schedule 240 241ENTRY(sys_rt_sigreturn) 242 movi r11_sig, 0 243 jmpi do_rt_sigreturn 244 245ENTRY(csky_trap) 246 SAVE_ALL EPC_KEEP 247 psrset ee 248 movi r11_sig, 0 /* r11 = 0, Not a syscall. */ 249 mov a0, sp /* Push Stack pointer arg */ 250 jbsr trap_c /* Call C-level trap handler */ 251 jmpi ret_from_exception 252 253/* 254 * Prototype from libc for abiv1: 255 * register unsigned int __result asm("a0"); 256 * asm( "trap 3" :"=r"(__result)::); 257 */ 258ENTRY(csky_get_tls) 259 USPTOKSP 260 261 /* increase epc for continue */ 262 mfcr a0, epc 263 INCTRAP a0 264 mtcr a0, epc 265 266 /* get current task thread_info with kernel 8K stack */ 267 bmaski a0, THREAD_SHIFT 268 not a0 269 subi sp, 1 270 and a0, sp 271 addi sp, 1 272 273 /* get tls */ 274 ldw a0, (a0, TINFO_TP_VALUE) 275 276 KSPTOUSP 277 rte 278 279ENTRY(csky_irq) 280 SAVE_ALL EPC_KEEP 281 psrset ee 282 movi r11_sig, 0 /* r11 = 0, Not a syscall. */ 283 284#ifdef CONFIG_PREEMPT 285 mov r9, sp /* Get current stack pointer */ 286 bmaski r10, THREAD_SHIFT 287 andn r9, r10 /* Get thread_info */ 288 289 /* 290 * Get task_struct->stack.preempt_count for current, 291 * and increase 1. 292 */ 293 ldw r8, (r9, TINFO_PREEMPT) 294 addi r8, 1 295 stw r8, (r9, TINFO_PREEMPT) 296#endif 297 298 mov a0, sp 299 jbsr csky_do_IRQ 300 301#ifdef CONFIG_PREEMPT 302 subi r8, 1 303 stw r8, (r9, TINFO_PREEMPT) 304 cmpnei r8, 0 305 bt 2f 306 ldw r8, (r9, TINFO_FLAGS) 307 btsti r8, TIF_NEED_RESCHED 308 bf 2f 3091: 310 jbsr preempt_schedule_irq /* irq en/disable is done inside */ 311 ldw r7, (r9, TINFO_FLAGS) /* get new tasks TI_FLAGS */ 312 btsti r7, TIF_NEED_RESCHED 313 bt 1b /* go again */ 314#endif 3152: 316 jmpi ret_from_exception 317 318/* 319 * a0 = prev task_struct * 320 * a1 = next task_struct * 321 * a0 = return next 322 */ 323ENTRY(__switch_to) 324 lrw a3, TASK_THREAD 325 addu a3, a0 326 327 mfcr a2, psr /* Save PSR value */ 328 stw a2, (a3, THREAD_SR) /* Save PSR in task struct */ 329 bclri a2, 6 /* Disable interrupts */ 330 mtcr a2, psr 331 332 SAVE_SWITCH_STACK 333 334 stw sp, (a3, THREAD_KSP) 335 336 /* Set up next process to run */ 337 lrw a3, TASK_THREAD 338 addu a3, a1 339 340 ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */ 341 342 ldw a2, (a3, THREAD_SR) /* Set next PSR */ 343 mtcr a2, psr 344 345#if defined(__CSKYABIV2__) 346 addi r7, a1, TASK_THREAD_INFO 347 ldw tls, (r7, TINFO_TP_VALUE) 348#endif 349 350 RESTORE_SWITCH_STACK 351 352 rts 353ENDPROC(__switch_to) 354