19f97da78SDavid Howells #ifndef __ASM_ARM_SWITCH_TO_H 29f97da78SDavid Howells #define __ASM_ARM_SWITCH_TO_H 39f97da78SDavid Howells 49f97da78SDavid Howells #include <linux/thread_info.h> 59f97da78SDavid Howells 69f97da78SDavid Howells /* 7*73a6fdc4SWill Deacon * For v7 SMP cores running a preemptible kernel we may be pre-empted 8*73a6fdc4SWill Deacon * during a TLB maintenance operation, so execute an inner-shareable dsb 9*73a6fdc4SWill Deacon * to ensure that the maintenance completes in case we migrate to another 10*73a6fdc4SWill Deacon * CPU. 11*73a6fdc4SWill Deacon */ 12*73a6fdc4SWill Deacon #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) 13*73a6fdc4SWill Deacon #define finish_arch_switch(prev) dsb(ish) 14*73a6fdc4SWill Deacon #endif 15*73a6fdc4SWill Deacon 16*73a6fdc4SWill Deacon /* 179f97da78SDavid Howells * switch_to(prev, next) should switch from task `prev' to `next' 189f97da78SDavid Howells * `prev' will never be the same as `next'. schedule() itself 199f97da78SDavid Howells * contains the memory barrier to tell GCC not to cache `current'. 209f97da78SDavid Howells */ 219f97da78SDavid Howells extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); 229f97da78SDavid Howells 239f97da78SDavid Howells #define switch_to(prev,next,last) \ 249f97da78SDavid Howells do { \ 259f97da78SDavid Howells last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ 269f97da78SDavid Howells } while (0) 279f97da78SDavid Howells 289f97da78SDavid Howells #endif /* __ASM_ARM_SWITCH_TO_H */ 29