1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_ARC_JUMP_LABEL_H 3 #define _ASM_ARC_JUMP_LABEL_H 4 5 #ifndef __ASSEMBLY__ 6 7 #include <linux/stringify.h> 8 #include <linux/types.h> 9 10 #define JUMP_LABEL_NOP_SIZE 4 11 12 /* 13 * NOTE about '.balign 4': 14 * 15 * To make atomic update of patched instruction available we need to guarantee 16 * that this instruction doesn't cross L1 cache line boundary. 17 * 18 * As of today we simply align instruction which can be patched by 4 byte using 19 * ".balign 4" directive. In that case patched instruction is aligned with one 20 * 16-bit NOP_S if this is required. 21 * However 'align by 4' directive is much stricter than it actually required. 22 * It's enough that our 32-bit instruction don't cross L1 cache line boundary / 23 * L1 I$ fetch block boundary which can be achieved by using 24 * ".bundle_align_mode" assembler directive. That will save us from adding 25 * useless NOP_S padding in most of the cases. 26 * 27 * TODO: switch to ".bundle_align_mode" directive using whin it will be 28 * supported by ARC toolchain. 29 */ 30 31 static __always_inline bool arch_static_branch(struct static_key *key, 32 bool branch) 33 { 34 asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n" 35 "1: \n" 36 "nop \n" 37 ".pushsection __jump_table, \"aw\" \n" 38 ".word 1b, %l[l_yes], %c0 \n" 39 ".popsection \n" 40 : : "i" (&((char *)key)[branch]) : : l_yes); 41 42 return false; 43 l_yes: 44 return true; 45 } 46 47 static __always_inline bool arch_static_branch_jump(struct static_key *key, 48 bool branch) 49 { 50 asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n" 51 "1: \n" 52 "b %l[l_yes] \n" 53 ".pushsection __jump_table, \"aw\" \n" 54 ".word 1b, %l[l_yes], %c0 \n" 55 ".popsection \n" 56 : : "i" (&((char *)key)[branch]) : : l_yes); 57 58 return false; 59 l_yes: 60 return true; 61 } 62 63 typedef u32 jump_label_t; 64 65 struct jump_entry { 66 jump_label_t code; 67 jump_label_t target; 68 jump_label_t key; 69 }; 70 71 #endif /* __ASSEMBLY__ */ 72 #endif 73