1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ASM_PREEMPT_H 3 #define __ASM_PREEMPT_H 4 5 #include <asm/current.h> 6 #include <linux/thread_info.h> 7 #include <asm/atomic_ops.h> 8 #include <asm/march.h> 9 10 #ifdef MARCH_HAS_Z196_FEATURES 11 12 /* We use the MSB mostly because its available */ 13 #define PREEMPT_NEED_RESCHED 0x80000000 14 #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) 15 16 static __always_inline int preempt_count(void) 17 { 18 return READ_ONCE(get_lowcore()->preempt_count) & ~PREEMPT_NEED_RESCHED; 19 } 20 21 static __always_inline void preempt_count_set(int pc) 22 { 23 int old, new; 24 25 do { 26 old = READ_ONCE(get_lowcore()->preempt_count); 27 new = (old & PREEMPT_NEED_RESCHED) | 28 (pc & ~PREEMPT_NEED_RESCHED); 29 } while (__atomic_cmpxchg(&get_lowcore()->preempt_count, 30 old, new) != old); 31 } 32 33 static __always_inline void set_preempt_need_resched(void) 34 { 35 __atomic_and(~PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count); 36 } 37 38 static __always_inline void clear_preempt_need_resched(void) 39 { 40 __atomic_or(PREEMPT_NEED_RESCHED, &get_lowcore()->preempt_count); 41 } 42 43 static __always_inline bool test_preempt_need_resched(void) 44 { 45 return !(READ_ONCE(get_lowcore()->preempt_count) & PREEMPT_NEED_RESCHED); 46 } 47 48 static __always_inline void __preempt_count_add(int val) 49 { 50 /* 51 * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES 52 * enabled, gcc 12 fails to handle __builtin_constant_p(). 53 */ 54 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) { 55 if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) { 56 __atomic_add_const(val, &get_lowcore()->preempt_count); 57 return; 58 } 59 } 60 __atomic_add(val, &get_lowcore()->preempt_count); 61 } 62 63 static __always_inline void __preempt_count_sub(int val) 64 { 65 __preempt_count_add(-val); 66 } 67 68 static __always_inline bool __preempt_count_dec_and_test(void) 69 { 70 return __atomic_add(-1, &get_lowcore()->preempt_count) == 1; 71 } 72 73 static __always_inline bool should_resched(int preempt_offset) 74 { 75 return unlikely(READ_ONCE(get_lowcore()->preempt_count) == 76 preempt_offset); 77 } 78 79 #else /* MARCH_HAS_Z196_FEATURES */ 80 81 #define PREEMPT_ENABLED (0) 82 83 static __always_inline int preempt_count(void) 84 { 85 return READ_ONCE(get_lowcore()->preempt_count); 86 } 87 88 static __always_inline void preempt_count_set(int pc) 89 { 90 get_lowcore()->preempt_count = pc; 91 } 92 93 static __always_inline void set_preempt_need_resched(void) 94 { 95 } 96 97 static __always_inline void clear_preempt_need_resched(void) 98 { 99 } 100 101 static __always_inline bool test_preempt_need_resched(void) 102 { 103 return false; 104 } 105 106 static __always_inline void __preempt_count_add(int val) 107 { 108 get_lowcore()->preempt_count += val; 109 } 110 111 static __always_inline void __preempt_count_sub(int val) 112 { 113 get_lowcore()->preempt_count -= val; 114 } 115 116 static __always_inline bool __preempt_count_dec_and_test(void) 117 { 118 return !--get_lowcore()->preempt_count && tif_need_resched(); 119 } 120 121 static __always_inline bool should_resched(int preempt_offset) 122 { 123 return unlikely(preempt_count() == preempt_offset && 124 tif_need_resched()); 125 } 126 127 #endif /* MARCH_HAS_Z196_FEATURES */ 128 129 #define init_task_preempt_count(p) do { } while (0) 130 /* Deferred to CPU bringup time */ 131 #define init_idle_preempt_count(p, cpu) do { } while (0) 132 133 #ifdef CONFIG_PREEMPTION 134 extern void preempt_schedule(void); 135 #define __preempt_schedule() preempt_schedule() 136 extern void preempt_schedule_notrace(void); 137 #define __preempt_schedule_notrace() preempt_schedule_notrace() 138 #endif /* CONFIG_PREEMPTION */ 139 140 #endif /* __ASM_PREEMPT_H */ 141