entry-header.S (87a0b2fafc09766d8c55461a18345a1cfb10a7fe) entry-header.S (75fa4adc4f50ee52d8cdfa3e84798176ccb4a354)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/init.h>
3#include <linux/linkage.h>
4
5#include <asm/assembler.h>
6#include <asm/asm-offsets.h>
7#include <asm/errno.h>
8#include <asm/thread_info.h>

--- 278 unchanged lines hidden (view full) ---

287 add r8, r0, #S_PC
288 ldmia r0, {r0 - r1}
289 rfeia r8
290#endif
291 .endm
292
293
294 .macro restore_user_regs, fast = 0, offset = 0
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/init.h>
3#include <linux/linkage.h>
4
5#include <asm/assembler.h>
6#include <asm/asm-offsets.h>
7#include <asm/errno.h>
8#include <asm/thread_info.h>

--- 278 unchanged lines hidden (view full) ---

287 add r8, r0, #S_PC
288 ldmia r0, {r0 - r1}
289 rfeia r8
290#endif
291 .endm
292
293
294 .macro restore_user_regs, fast = 0, offset = 0
295#if defined(CONFIG_CPU_32v6K) && !defined(CONFIG_CPU_V6)
295#if defined(CONFIG_CPU_32v6K) && \
296 (!defined(CONFIG_CPU_V6) || defined(CONFIG_SMP))
297#ifdef CONFIG_CPU_V6
298ALT_SMP(nop)
299ALT_UP_B(.L1_\@)
300#endif
296 @ The TLS register update is deferred until return to user space so we
297 @ can use it for other things while running in the kernel
301 @ The TLS register update is deferred until return to user space so we
302 @ can use it for other things while running in the kernel
298 get_thread_info r1
303 mrc p15, 0, r1, c13, c0, 3 @ get current_thread_info pointer
299 ldr r1, [r1, #TI_TP_VALUE]
300 mcr p15, 0, r1, c13, c0, 3 @ set TLS register
304 ldr r1, [r1, #TI_TP_VALUE]
305 mcr p15, 0, r1, c13, c0, 3 @ set TLS register
306.L1_\@:
301#endif
302
303 uaccess_enable r1, isb=0
304#ifndef CONFIG_THUMB2_KERNEL
305 @ ARM mode restore
306 mov r2, sp
307 ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
308 ldr lr, [r2, #\offset + S_PC]! @ get pc

--- 109 unchanged lines hidden (view full) ---

418 * Note that tbl == why is intentional.
419 *
420 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
421 */
422scno .req r7 @ syscall number
423tbl .req r8 @ syscall table pointer
424why .req r8 @ Linux syscall (!= 0)
425tsk .req r9 @ current thread_info
307#endif
308
309 uaccess_enable r1, isb=0
310#ifndef CONFIG_THUMB2_KERNEL
311 @ ARM mode restore
312 mov r2, sp
313 ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
314 ldr lr, [r2, #\offset + S_PC]! @ get pc

--- 109 unchanged lines hidden (view full) ---

424 * Note that tbl == why is intentional.
425 *
426 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
427 */
428scno .req r7 @ syscall number
429tbl .req r8 @ syscall table pointer
430why .req r8 @ Linux syscall (!= 0)
431tsk .req r9 @ current thread_info
432
433 .macro do_overflow_check, frame_size:req
434#ifdef CONFIG_VMAP_STACK
435 @
436 @ Test whether the SP has overflowed. Task and IRQ stacks are aligned
437 @ so that SP & BIT(THREAD_SIZE_ORDER + PAGE_SHIFT) should always be
438 @ zero.
439 @
440ARM( tst sp, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT) )
441THUMB( tst r1, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT) )
442THUMB( it ne )
443 bne .Lstack_overflow_check\@
444
445 .pushsection .text
446.Lstack_overflow_check\@:
447 @
448 @ The stack pointer is not pointing to a valid vmap'ed stack, but it
449 @ may be pointing into the linear map instead, which may happen if we
450 @ are already running from the overflow stack. We cannot detect overflow
451 @ in such cases so just carry on.
452 @
453 str ip, [r0, #12] @ Stash IP on the mode stack
454 ldr_va ip, high_memory @ Start of VMALLOC space
455ARM( cmp sp, ip ) @ SP in vmalloc space?
456THUMB( cmp r1, ip )
457THUMB( itt lo )
458 ldrlo ip, [r0, #12] @ Restore IP
459 blo .Lout\@ @ Carry on
460
461THUMB( sub r1, sp, r1 ) @ Restore original R1
462THUMB( sub sp, r1 ) @ Restore original SP
463 add sp, sp, #\frame_size @ Undo svc_entry's SP change
464 b __bad_stack @ Handle VMAP stack overflow
465 .popsection
466.Lout\@:
467#endif
468 .endm