1/* SPDX-License-Identifier: GPL-2.0 */ 2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 4#include <linux/linkage.h> 5#include <abi/entry.h> 6#include <abi/pgtable-bits.h> 7#include <asm/errno.h> 8#include <asm/setup.h> 9#include <asm/unistd.h> 10#include <asm/asm-offsets.h> 11#include <linux/threads.h> 12#include <asm/setup.h> 13#include <asm/page.h> 14#include <asm/thread_info.h> 15 16#define PTE_INDX_MSK 0xffc 17#define PTE_INDX_SHIFT 10 18#define _PGDIR_SHIFT 22 19 20.macro tlbop_begin name, val0, val1, val2 21ENTRY(csky_\name) 22 mtcr a3, ss2 23 mtcr r6, ss3 24 mtcr a2, ss4 25 26 RD_PGDR r6 27 RD_MEH a3 28#ifdef CONFIG_CPU_HAS_TLBI 29 tlbi.vaas a3 30 sync.is 31 32 btsti a3, 31 33 bf 1f 34 RD_PGDR_K r6 351: 36#else 37 bgeni a2, 31 38 WR_MCIR a2 39 bgeni a2, 25 40 WR_MCIR a2 41#endif 42 bclri r6, 0 43 lrw a2, PHYS_OFFSET 44 subu r6, a2 45 bseti r6, 31 46 47 mov a2, a3 48 lsri a2, _PGDIR_SHIFT 49 lsli a2, 2 50 addu r6, a2 51 ldw r6, (r6) 52 53 lrw a2, PHYS_OFFSET 54 subu r6, a2 55 bseti r6, 31 56 57 lsri a3, PTE_INDX_SHIFT 58 lrw a2, PTE_INDX_MSK 59 and a3, a2 60 addu r6, a3 61 ldw a3, (r6) 62 63 movi a2, (_PAGE_PRESENT | \val0) 64 and a3, a2 65 cmpne a3, a2 66 bt \name 67 68 /* First read/write the page, just update the flags */ 69 ldw a3, (r6) 70 bgeni a2, PAGE_VALID_BIT 71 bseti a2, PAGE_ACCESSED_BIT 72 bseti a2, \val1 73 bseti a2, \val2 74 or a3, a2 75 stw a3, (r6) 76 77 /* Some cpu tlb-hardrefill bypass the cache */ 78#ifdef CONFIG_CPU_NEED_TLBSYNC 79 movi a2, 0x22 80 bseti a2, 6 81 mtcr r6, cr22 82 mtcr a2, cr17 83 sync 84#endif 85 86 mfcr a3, ss2 87 mfcr r6, ss3 88 mfcr a2, ss4 89 rte 90\name: 91 mfcr a3, ss2 92 mfcr r6, ss3 93 mfcr a2, ss4 94 SAVE_ALL EPC_KEEP 95.endm 96.macro tlbop_end is_write 97 RD_MEH a2 98 psrset ee, ie 99 mov a0, sp 100 movi a1, \is_write 101 jbsr do_page_fault 102 movi r11_sig, 0 /* r11 = 0, Not a syscall. */ 103 jmpi ret_from_exception 104.endm 105 106.text 107 108tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT 109tlbop_end 0 110 111tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT 112tlbop_end 1 113 114tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT 115#ifndef CONFIG_CPU_HAS_LDSTEX 116jbsr csky_cmpxchg_fixup 117#endif 118tlbop_end 1 119 120ENTRY(csky_systemcall) 121 SAVE_ALL EPC_INCREASE 122 123 psrset ee, ie 124 125 /* Stack frame for syscall, origin call set_esp0 */ 126 mov r12, sp 127 128 bmaski r11, 13 129 andn r12, r11 130 bgeni r11, 9 131 addi r11, 32 132 addu r12, r11 133 st sp, (r12, 0) 134 135 lrw r11, __NR_syscalls 136 cmphs syscallid, r11 /* Check nr of syscall */ 137 bt ret_from_exception 138 139 lrw r13, sys_call_table 140 ixw r13, syscallid 141 ldw r11, (r13) 142 cmpnei r11, 0 143 bf ret_from_exception 144 145 mov r9, sp 146 bmaski r10, THREAD_SHIFT 147 andn r9, r10 148 ldw r8, (r9, TINFO_FLAGS) 149 btsti r8, TIF_SYSCALL_TRACE 150 bt 1f 151#if defined(__CSKYABIV2__) 152 subi sp, 8 153 stw r5, (sp, 0x4) 154 stw r4, (sp, 0x0) 155 jsr r11 /* Do system call */ 156 addi sp, 8 157#else 158 jsr r11 159#endif 160 stw a0, (sp, LSAVE_A0) /* Save return value */ 161 jmpi ret_from_exception 162 1631: 164 movi a0, 0 /* enter system call */ 165 mov a1, sp /* sp = pt_regs pointer */ 166 jbsr syscall_trace 167 /* Prepare args before do system call */ 168 ldw a0, (sp, LSAVE_A0) 169 ldw a1, (sp, LSAVE_A1) 170 ldw a2, (sp, LSAVE_A2) 171 ldw a3, (sp, LSAVE_A3) 172#if defined(__CSKYABIV2__) 173 subi sp, 8 174 stw r5, (sp, 0x4) 175 stw r4, (sp, 0x0) 176#else 177 ldw r6, (sp, LSAVE_A4) 178 ldw r7, (sp, LSAVE_A5) 179#endif 180 jsr r11 /* Do system call */ 181#if defined(__CSKYABIV2__) 182 addi sp, 8 183#endif 184 stw a0, (sp, LSAVE_A0) /* Save return value */ 185 186 movi a0, 1 /* leave system call */ 187 mov a1, sp /* right now, sp --> pt_regs */ 188 jbsr syscall_trace 189 br ret_from_exception 190 191ENTRY(ret_from_kernel_thread) 192 jbsr schedule_tail 193 mov a0, r8 194 jsr r9 195 jbsr ret_from_exception 196 197ENTRY(ret_from_fork) 198 jbsr schedule_tail 199 mov r9, sp 200 bmaski r10, THREAD_SHIFT 201 andn r9, r10 202 ldw r8, (r9, TINFO_FLAGS) 203 movi r11_sig, 1 204 btsti r8, TIF_SYSCALL_TRACE 205 bf 3f 206 movi a0, 1 207 mov a1, sp /* sp = pt_regs pointer */ 208 jbsr syscall_trace 2093: 210 jbsr ret_from_exception 211 212ret_from_exception: 213 ld syscallid, (sp, LSAVE_PSR) 214 btsti syscallid, 31 215 bt 1f 216 217 /* 218 * Load address of current->thread_info, Then get address of task_struct 219 * Get task_needreshed in task_struct 220 */ 221 mov r9, sp 222 bmaski r10, THREAD_SHIFT 223 andn r9, r10 224 225resume_userspace: 226 ldw r8, (r9, TINFO_FLAGS) 227 andi r8, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED) 228 cmpnei r8, 0 229 bt exit_work 2301: RESTORE_ALL 231 232exit_work: 233 mov a0, sp /* Stack address is arg[0] */ 234 jbsr set_esp0 /* Call C level */ 235 btsti r8, TIF_NEED_RESCHED 236 bt work_resched 237 /* If thread_info->flag is empty, RESTORE_ALL */ 238 cmpnei r8, 0 239 bf 1b 240 mov a1, sp 241 mov a0, r8 242 mov a2, r11_sig /* syscall? */ 243 btsti r8, TIF_SIGPENDING /* delivering a signal? */ 244 /* prevent further restarts(set r11 = 0) */ 245 clrt r11_sig 246 jbsr do_notify_resume /* do signals */ 247 br resume_userspace 248 249work_resched: 250 lrw syscallid, ret_from_exception 251 mov r15, syscallid /* Return address in link */ 252 jmpi schedule 253 254ENTRY(sys_rt_sigreturn) 255 movi r11_sig, 0 256 jmpi do_rt_sigreturn 257 258ENTRY(csky_trap) 259 SAVE_ALL EPC_KEEP 260 psrset ee 261 movi r11_sig, 0 /* r11 = 0, Not a syscall. */ 262 mov a0, sp /* Push Stack pointer arg */ 263 jbsr trap_c /* Call C-level trap handler */ 264 jmpi ret_from_exception 265 266/* 267 * Prototype from libc for abiv1: 268 * register unsigned int __result asm("a0"); 269 * asm( "trap 3" :"=r"(__result)::); 270 */ 271ENTRY(csky_get_tls) 272 USPTOKSP 273 274 /* increase epc for continue */ 275 mfcr a0, epc 276 INCTRAP a0 277 mtcr a0, epc 278 279 /* get current task thread_info with kernel 8K stack */ 280 bmaski a0, THREAD_SHIFT 281 not a0 282 subi sp, 1 283 and a0, sp 284 addi sp, 1 285 286 /* get tls */ 287 ldw a0, (a0, TINFO_TP_VALUE) 288 289 KSPTOUSP 290 rte 291 292ENTRY(csky_irq) 293 SAVE_ALL EPC_KEEP 294 psrset ee 295 movi r11_sig, 0 /* r11 = 0, Not a syscall. */ 296 297#ifdef CONFIG_PREEMPT 298 mov r9, sp /* Get current stack pointer */ 299 bmaski r10, THREAD_SHIFT 300 andn r9, r10 /* Get thread_info */ 301 302 /* 303 * Get task_struct->stack.preempt_count for current, 304 * and increase 1. 305 */ 306 ldw r8, (r9, TINFO_PREEMPT) 307 addi r8, 1 308 stw r8, (r9, TINFO_PREEMPT) 309#endif 310 311 mov a0, sp 312 jbsr csky_do_IRQ 313 314#ifdef CONFIG_PREEMPT 315 subi r8, 1 316 stw r8, (r9, TINFO_PREEMPT) 317 cmpnei r8, 0 318 bt 2f 319 ldw r8, (r9, TINFO_FLAGS) 320 btsti r8, TIF_NEED_RESCHED 321 bf 2f 3221: 323 jbsr preempt_schedule_irq /* irq en/disable is done inside */ 324 ldw r7, (r9, TINFO_FLAGS) /* get new tasks TI_FLAGS */ 325 btsti r7, TIF_NEED_RESCHED 326 bt 1b /* go again */ 327#endif 3282: 329 jmpi ret_from_exception 330 331/* 332 * a0 = prev task_struct * 333 * a1 = next task_struct * 334 * a0 = return next 335 */ 336ENTRY(__switch_to) 337 lrw a3, TASK_THREAD 338 addu a3, a0 339 340 mfcr a2, psr /* Save PSR value */ 341 stw a2, (a3, THREAD_SR) /* Save PSR in task struct */ 342 bclri a2, 6 /* Disable interrupts */ 343 mtcr a2, psr 344 345 SAVE_SWITCH_STACK 346 347 stw sp, (a3, THREAD_KSP) 348 349 /* Set up next process to run */ 350 lrw a3, TASK_THREAD 351 addu a3, a1 352 353 ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */ 354 355 ldw a2, (a3, THREAD_SR) /* Set next PSR */ 356 mtcr a2, psr 357 358#if defined(__CSKYABIV2__) 359 addi r7, a1, TASK_THREAD_INFO 360 ldw tls, (r7, TINFO_TP_VALUE) 361#endif 362 363 RESTORE_SWITCH_STACK 364 365 rts 366ENDPROC(__switch_to) 367