xref: /linux/arch/csky/kernel/entry.S (revision 71e193d7cbcfc988c3802e15261bd807171adb6b)
1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#include <linux/linkage.h>
5#include <abi/entry.h>
6#include <abi/pgtable-bits.h>
7#include <asm/errno.h>
8#include <asm/setup.h>
9#include <asm/unistd.h>
10#include <asm/asm-offsets.h>
11#include <linux/threads.h>
12#include <asm/setup.h>
13#include <asm/page.h>
14#include <asm/thread_info.h>
15
16#define PTE_INDX_MSK    0xffc
17#define PTE_INDX_SHIFT  10
18#define _PGDIR_SHIFT    22
19
20.macro	zero_fp
21#ifdef CONFIG_STACKTRACE
22	movi	r8, 0
23#endif
24.endm
25
26.macro tlbop_begin name, val0, val1, val2
27ENTRY(csky_\name)
28	mtcr    a3, ss2
29	mtcr    r6, ss3
30	mtcr    a2, ss4
31
32	RD_PGDR	r6
33	RD_MEH	a3
34#ifdef CONFIG_CPU_HAS_TLBI
35	tlbi.vaas a3
36	sync.is
37
38	btsti	a3, 31
39	bf	1f
40	RD_PGDR_K r6
411:
42#else
43	bgeni	a2, 31
44	WR_MCIR	a2
45	bgeni	a2, 25
46	WR_MCIR	a2
47#endif
48	bclri   r6, 0
49	lrw	a2, va_pa_offset
50	ld.w	a2, (a2, 0)
51	subu	r6, a2
52	bseti	r6, 31
53
54	mov     a2, a3
55	lsri    a2, _PGDIR_SHIFT
56	lsli    a2, 2
57	addu    r6, a2
58	ldw     r6, (r6)
59
60	lrw	a2, va_pa_offset
61	ld.w	a2, (a2, 0)
62	subu	r6, a2
63	bseti	r6, 31
64
65	lsri    a3, PTE_INDX_SHIFT
66	lrw     a2, PTE_INDX_MSK
67	and     a3, a2
68	addu    r6, a3
69	ldw     a3, (r6)
70
71	movi	a2, (_PAGE_PRESENT | \val0)
72	and     a3, a2
73	cmpne   a3, a2
74	bt	\name
75
76	/* First read/write the page, just update the flags */
77	ldw     a3, (r6)
78	bgeni   a2, PAGE_VALID_BIT
79	bseti   a2, PAGE_ACCESSED_BIT
80	bseti   a2, \val1
81	bseti   a2, \val2
82	or      a3, a2
83	stw     a3, (r6)
84
85	/* Some cpu tlb-hardrefill bypass the cache */
86#ifdef CONFIG_CPU_NEED_TLBSYNC
87	movi	a2, 0x22
88	bseti	a2, 6
89	mtcr	r6, cr22
90	mtcr	a2, cr17
91	sync
92#endif
93
94	mfcr    a3, ss2
95	mfcr    r6, ss3
96	mfcr    a2, ss4
97	rte
98\name:
99	mfcr    a3, ss2
100	mfcr    r6, ss3
101	mfcr    a2, ss4
102	SAVE_ALL 0
103.endm
104.macro tlbop_end is_write
105	zero_fp
106	RD_MEH	a2
107	psrset  ee, ie
108	mov     a0, sp
109	movi    a1, \is_write
110	jbsr    do_page_fault
111	jmpi    ret_from_exception
112.endm
113
114.text
115
116tlbop_begin tlbinvalidl, _PAGE_READ, PAGE_VALID_BIT, PAGE_ACCESSED_BIT
117tlbop_end 0
118
119tlbop_begin tlbinvalids, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
120tlbop_end 1
121
122tlbop_begin tlbmodified, _PAGE_WRITE, PAGE_DIRTY_BIT, PAGE_MODIFIED_BIT
123#ifndef CONFIG_CPU_HAS_LDSTEX
124jbsr csky_cmpxchg_fixup
125#endif
126tlbop_end 1
127
128ENTRY(csky_systemcall)
129	SAVE_ALL TRAP0_SIZE
130	zero_fp
131	psrset  ee, ie
132
133	lrw     r9, __NR_syscalls
134	cmphs   syscallid, r9		/* Check nr of syscall */
135	bt      1f
136
137	lrw     r9, sys_call_table
138	ixw     r9, syscallid
139	ldw     syscallid, (r9)
140	cmpnei  syscallid, 0
141	bf      ret_from_exception
142
143	mov     r9, sp
144	bmaski  r10, THREAD_SHIFT
145	andn    r9, r10
146	ldw     r10, (r9, TINFO_FLAGS)
147	lrw	r9, _TIF_SYSCALL_WORK
148	and	r10, r9
149	cmpnei	r10, 0
150	bt      csky_syscall_trace
151#if defined(__CSKYABIV2__)
152	subi    sp, 8
153	stw  	r5, (sp, 0x4)
154	stw  	r4, (sp, 0x0)
155	jsr     syscallid                      /* Do system call */
156	addi 	sp, 8
157#else
158	jsr     syscallid
159#endif
160	stw     a0, (sp, LSAVE_A0)      /* Save return value */
1611:
162#ifdef CONFIG_DEBUG_RSEQ
163	mov	a0, sp
164	jbsr	rseq_syscall
165#endif
166	jmpi    ret_from_exception
167
168csky_syscall_trace:
169	mov	a0, sp                  /* sp = pt_regs pointer */
170	jbsr	syscall_trace_enter
171	cmpnei	a0, 0
172	bt	1f
173	/* Prepare args before do system call */
174	ldw	a0, (sp, LSAVE_A0)
175	ldw	a1, (sp, LSAVE_A1)
176	ldw	a2, (sp, LSAVE_A2)
177	ldw	a3, (sp, LSAVE_A3)
178#if defined(__CSKYABIV2__)
179	subi	sp, 8
180	ldw	r9, (sp, LSAVE_A4)
181	stw	r9, (sp, 0x0)
182	ldw	r9, (sp, LSAVE_A5)
183	stw	r9, (sp, 0x4)
184	jsr	syscallid                     /* Do system call */
185	addi	sp, 8
186#else
187	ldw	r6, (sp, LSAVE_A4)
188	ldw	r7, (sp, LSAVE_A5)
189	jsr	syscallid                     /* Do system call */
190#endif
191	stw	a0, (sp, LSAVE_A0)	/* Save return value */
192
1931:
194#ifdef CONFIG_DEBUG_RSEQ
195	mov	a0, sp
196	jbsr	rseq_syscall
197#endif
198	mov     a0, sp                  /* right now, sp --> pt_regs */
199	jbsr    syscall_trace_exit
200	br	ret_from_exception
201
202ENTRY(ret_from_kernel_thread)
203	jbsr	schedule_tail
204	mov	a0, r10
205	jsr	r9
206	jbsr	ret_from_exception
207
208ENTRY(ret_from_fork)
209	jbsr	schedule_tail
210	mov	r9, sp
211	bmaski	r10, THREAD_SHIFT
212	andn	r9, r10
213	ldw	r10, (r9, TINFO_FLAGS)
214	lrw	r9, _TIF_SYSCALL_WORK
215	and	r10, r9
216	cmpnei	r10, 0
217	bf	ret_from_exception
218	mov	a0, sp			/* sp = pt_regs pointer */
219	jbsr	syscall_trace_exit
220
221ret_from_exception:
222	psrclr	ie
223	ld	r9, (sp, LSAVE_PSR)
224	btsti	r9, 31
225
226	bt	1f
227	/*
228	 * Load address of current->thread_info, Then get address of task_struct
229	 * Get task_needreshed in task_struct
230	 */
231	mov	r9, sp
232	bmaski	r10, THREAD_SHIFT
233	andn	r9, r10
234
235	ldw	r10, (r9, TINFO_FLAGS)
236	lrw	r9, _TIF_WORK_MASK
237	and	r10, r9
238	cmpnei	r10, 0
239	bt	exit_work
2401:
241#ifdef CONFIG_PREEMPTION
242	mov	r9, sp
243	bmaski	r10, THREAD_SHIFT
244	andn	r9, r10
245
246	ldw	r10, (r9, TINFO_PREEMPT)
247	cmpnei	r10, 0
248	bt	2f
249	jbsr	preempt_schedule_irq	/* irq en/disable is done inside */
2502:
251#endif
252
253#ifdef CONFIG_TRACE_IRQFLAGS
254	ld	r10, (sp, LSAVE_PSR)
255	btsti	r10, 6
256	bf	2f
257	jbsr	trace_hardirqs_on
2582:
259#endif
260	RESTORE_ALL
261
262exit_work:
263	lrw	r9, ret_from_exception
264	mov	lr, r9
265
266	btsti	r10, TIF_NEED_RESCHED
267	bt	work_resched
268
269	psrset	ie
270	mov	a0, sp
271	mov	a1, r10
272	jmpi	do_notify_resume
273
274work_resched:
275	jmpi	schedule
276
277ENTRY(csky_trap)
278	SAVE_ALL 0
279	zero_fp
280	psrset	ee
281	mov	a0, sp                 /* Push Stack pointer arg */
282	jbsr	trap_c                 /* Call C-level trap handler */
283	jmpi	ret_from_exception
284
285/*
286 * Prototype from libc for abiv1:
287 * register unsigned int __result asm("a0");
288 * asm( "trap 3" :"=r"(__result)::);
289 */
290ENTRY(csky_get_tls)
291	USPTOKSP
292
293	/* increase epc for continue */
294	mfcr	a0, epc
295	addi	a0, TRAP0_SIZE
296	mtcr	a0, epc
297
298	/* get current task thread_info with kernel 8K stack */
299	bmaski	a0, THREAD_SHIFT
300	not	a0
301	subi	sp, 1
302	and	a0, sp
303	addi	sp, 1
304
305	/* get tls */
306	ldw	a0, (a0, TINFO_TP_VALUE)
307
308	KSPTOUSP
309	rte
310
311ENTRY(csky_irq)
312	SAVE_ALL 0
313	zero_fp
314	psrset	ee
315
316#ifdef CONFIG_TRACE_IRQFLAGS
317	jbsr	trace_hardirqs_off
318#endif
319
320
321	mov	a0, sp
322	jbsr	csky_do_IRQ
323
324	jmpi	ret_from_exception
325
326/*
327 * a0 =  prev task_struct *
328 * a1 =  next task_struct *
329 * a0 =  return next
330 */
331ENTRY(__switch_to)
332	lrw	a3, TASK_THREAD
333	addu	a3, a0
334
335	SAVE_SWITCH_STACK
336
337	stw	sp, (a3, THREAD_KSP)
338
339	/* Set up next process to run */
340	lrw	a3, TASK_THREAD
341	addu	a3, a1
342
343	ldw	sp, (a3, THREAD_KSP)	/* Set next kernel sp */
344
345#if  defined(__CSKYABIV2__)
346	addi	a3, a1, TASK_THREAD_INFO
347	ldw	tls, (a3, TINFO_TP_VALUE)
348#endif
349
350	RESTORE_SWITCH_STACK
351
352	rts
353ENDPROC(__switch_to)
354