1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 29f97da78SDavid Howells #ifndef __ASM_ARM_SWITCH_TO_H 39f97da78SDavid Howells #define __ASM_ARM_SWITCH_TO_H 49f97da78SDavid Howells 59f97da78SDavid Howells #include <linux/thread_info.h> 6*75fa4adcSArd Biesheuvel #include <asm/smp_plat.h> 79f97da78SDavid Howells 89f97da78SDavid Howells /* 973a6fdc4SWill Deacon * For v7 SMP cores running a preemptible kernel we may be pre-empted 1073a6fdc4SWill Deacon * during a TLB maintenance operation, so execute an inner-shareable dsb 1173a6fdc4SWill Deacon * to ensure that the maintenance completes in case we migrate to another 1273a6fdc4SWill Deacon * CPU. 1373a6fdc4SWill Deacon */ 14e7289c6dSThomas Gleixner #if defined(CONFIG_PREEMPTION) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) 157baa7aecSWill Deacon #define __complete_pending_tlbi() dsb(ish) 167baa7aecSWill Deacon #else 177baa7aecSWill Deacon #define __complete_pending_tlbi() 1873a6fdc4SWill Deacon #endif 1973a6fdc4SWill Deacon 2073a6fdc4SWill Deacon /* 219f97da78SDavid Howells * switch_to(prev, next) should switch from task `prev' to `next' 229f97da78SDavid Howells * `prev' will never be the same as `next'. schedule() itself 239f97da78SDavid Howells * contains the memory barrier to tell GCC not to cache `current'. 249f97da78SDavid Howells */ 259f97da78SDavid Howells extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); 269f97da78SDavid Howells 279f97da78SDavid Howells #define switch_to(prev,next,last) \ 289f97da78SDavid Howells do { \ 297baa7aecSWill Deacon __complete_pending_tlbi(); \ 30*75fa4adcSArd Biesheuvel if (IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || is_smp()) \ 3150596b75SArd Biesheuvel __this_cpu_write(__entry_task, next); \ 329f97da78SDavid Howells last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ 339f97da78SDavid Howells } while (0) 349f97da78SDavid Howells 359f97da78SDavid Howells #endif /* __ASM_ARM_SWITCH_TO_H */ 36