Lines Matching +full:is +full:- +full:decoded +full:- +full:cs
1 // SPDX-License-Identifier: GPL-2.0-only
10 #include <asm/text-patching.h>
20 #define MAX_PATCH_LEN (255-1)
41 __setup("debug-alternative", debug_alt);
50 __setup("noreplace-smp", setup_noreplace_smp);
67 for (j = 0; j < (len) - 1; j++) \
127 void *tmp = krealloc(pages->pages, (pages->num+1) * sizeof(void *), in __its_alloc()
132 pages->pages = tmp; in __its_alloc()
133 pages->pages[pages->num++] = page; in __its_alloc()
163 reg -= 8; in its_init_thunk()
174 for (int i = 0; i < pages->num; i++) { in its_pages_protect()
175 void *page = pages->pages[i]; in its_pages_protect()
210 its_pages_protect(&mod->arch.its_pages); in its_fini_mod()
218 for (int i = 0; i < mod->arch.its_pages.num; i++) { in its_free_mod()
219 void *page = mod->arch.its_pages.pages[i]; in its_free_mod()
222 kfree(mod->arch.its_pages.pages); in its_free_mod()
233 pages = &its_mod->arch.its_pages; in its_alloc()
254 * its size is 3 or 4 bytes depending on the register used. If CFI in its_allocate_thunk()
255 * paranoid is used then 3 extra bytes are added in the ITS thunk to in its_allocate_thunk()
262 if (!its_page || (its_offset + size - 1) >= PAGE_SIZE) { in its_allocate_thunk()
276 if ((its_offset + size - 1) % 64 < 32) in its_allocate_thunk()
277 its_offset = ((its_offset - 1) | 0x3F) + 33; in its_allocate_thunk()
292 return thunk - 2; in its_static_thunk()
308 * @buf: temporary buffer on which the patching operates. This buffer is
309 * eventually text-poked into the kernel image.
319 * for every single-byte NOP, try to generate the maximally available NOP of
320 * size <= ASM_NOP_MAX such that only a single CFI entry is generated (vs one for
321 * each single-byte NOPs). If @len to fill out is > ASM_NOP_MAX, pad with INT3 and
354 if (insn->opcode.bytes[0] == 0x90 && in insn_is_nop()
355 (!insn->prefixes.nbytes || insn->prefixes.bytes[0] != 0xF3)) in insn_is_nop()
359 if (insn->opcode.bytes[0] == 0x0F && insn->opcode.bytes[1] == 0x1F) in insn_is_nop()
368 * Find the offset of the first non-NOP instruction starting at @offset
409 add_nop(buf + nop, next - nop); in optimize_nops()
416 * In this context, "source" is where the instructions are placed in the
419 * "Destination" is where the instructions are being patched in by this
422 * The source offset is:
424 * src_imm = target - src_next_ip (1)
426 * and the target offset is:
428 * dst_imm = target - dst_next_ip (2)
436 * dst_imm = (src_imm + src_next_ip) - dst_next_ip (3)
438 * Now, since the instruction stream is 'identical' at src and dst (it
439 * is being copied after all) it can be stated that:
447 * dst_imm = src_imm + (src + ip_offset) - (dst + ip_offset)
448 * = src_imm + src - dst + ip_offset - ip_offset
449 * = src_imm + src - dst (5)
458 BUG_ON((v >> 31) != (v >> (n_-1))); \
479 * If the target is inside the patched block, it's relative to the in need_reloc()
509 repl - instr); in __apply_relocation()
517 imm += repl - instr; in __apply_relocation()
518 imm += JMP32_INSN_SIZE - JMP8_INSN_SIZE; in __apply_relocation()
523 memset(&buf[i+2], INT3_INSN_OPCODE, insn.length - 2); in __apply_relocation()
533 repl - instr); in __apply_relocation()
545 /* Low-level backend functions usable from alternative code replacements. */
567 if (a->replacementlen != 5 || insn_buff[0] != CALL_INSN_OPCODE) { in alt_replace_call()
568 pr_err("ALT_FLAG_DIRECT_CALL set for a non-call replacement instruction\n"); in alt_replace_call()
572 if (a->instrlen != 6 || in alt_replace_call()
583 /* target address is stored at "next instruction + disp". */ in alt_replace_call()
584 target = *(void **)(instr + a->instrlen + disp); in alt_replace_call()
587 /* target address is stored at disp. */ in alt_replace_call()
593 /* (BUG_func - .) + (target - BUG_func) := target - . */ in alt_replace_call()
594 *(s32 *)(insn_buff + 1) += target - bug; in alt_replace_call()
604 return (u8 *)&i->instr_offset + i->instr_offset; in instr_va()
609 * before SMP is initialized to avoid SMP problems with self modifying code.
624 DPRINTK(ALT, "alt table %px, -> %px", start, end); in apply_alternatives()
627 * KASAN_SHADOW_START is defined using in apply_alternatives()
628 * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here. in apply_alternatives()
630 * conversion and triggers a false-positive out-of-bound report. in apply_alternatives()
632 * Disable KASAN until the patching is complete. in apply_alternatives()
655 u8 len = max(a->instrlen, b->instrlen); in apply_alternatives()
656 a->instrlen = b->instrlen = len; in apply_alternatives()
660 replacement = (u8 *)&a->repl_offset + a->repl_offset; in apply_alternatives()
661 BUG_ON(a->instrlen > sizeof(insn_buff)); in apply_alternatives()
662 BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32); in apply_alternatives()
666 * - feature is present in apply_alternatives()
667 * - feature not present but ALT_FLAG_NOT is set to mean, in apply_alternatives()
668 * patch if feature is *NOT* present. in apply_alternatives()
670 if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) { in apply_alternatives()
671 memcpy(insn_buff, instr, a->instrlen); in apply_alternatives()
672 optimize_nops(instr, insn_buff, a->instrlen); in apply_alternatives()
673 text_poke_early(instr, insn_buff, a->instrlen); in apply_alternatives()
678 a->cpuid >> 5, in apply_alternatives()
679 a->cpuid & 0x1f, in apply_alternatives()
680 instr, instr, a->instrlen, in apply_alternatives()
681 replacement, a->replacementlen, a->flags); in apply_alternatives()
683 memcpy(insn_buff, replacement, a->replacementlen); in apply_alternatives()
684 insn_buff_sz = a->replacementlen; in apply_alternatives()
686 if (a->flags & ALT_FLAG_DIRECT_CALL) { in apply_alternatives()
692 for (; insn_buff_sz < a->instrlen; insn_buff_sz++) in apply_alternatives()
695 text_poke_apply_relocation(insn_buff, instr, a->instrlen, replacement, a->replacementlen); in apply_alternatives()
697 DUMP_BYTES(ALT, instr, a->instrlen, "%px: old_insn: ", instr); in apply_alternatives()
698 DUMP_BYTES(ALT, replacement, a->replacementlen, "%px: rpl_insn: ", replacement); in apply_alternatives()
709 /* Jcc.d32 second opcode byte is in the range: 0x80-0x8f */ in is_jcc32()
710 return insn->opcode.bytes[0] == 0x0f && (insn->opcode.bytes[1] & 0xf0) == 0x80; in is_jcc32()
716 * [CS]{,3} CALL/JMP *%\reg [INT3]*
720 int cs = 0, bp = 0; in emit_indirect() local
727 len -= 2 + (reg >= 8); in emit_indirect()
734 * Additional NOP is better than prefix decode penalty. in emit_indirect()
737 cs = len; in emit_indirect()
747 return -1; in emit_indirect()
750 while (cs--) in emit_indirect()
751 bytes[i++] = 0x2e; /* CS-prefix */ in emit_indirect()
755 reg -= 8; in emit_indirect()
764 while (bp--) in emit_indirect()
773 u8 op = insn->opcode.bytes[0]; in __emit_trampoline()
778 * tail-calls. Deal with them. in __emit_trampoline()
782 op = insn->opcode.bytes[1]; in __emit_trampoline()
786 if (insn->length == 6) in __emit_trampoline()
787 bytes[i++] = 0x2e; /* CS-prefix */ in __emit_trampoline()
807 return -1; in __emit_trampoline()
810 WARN_ON_ONCE(i != insn->length); in __emit_trampoline()
834 /* Check if an indirect branch is at ITS-unsafe address */
840 /* Indirect branch opcode is 2 or 3 bytes depending on reg */ in cpu_wants_indirect_its_thunk_at()
843 /* Lower-half of the cacheline? */ in cpu_wants_indirect_its_thunk_at()
879 target = addr + insn->length + insn->immediate.value; in patch_retpoline()
880 reg = target - __x86_indirect_thunk_array; in patch_retpoline()
883 return -1; in patch_retpoline()
893 return -1; in patch_retpoline()
896 op = insn->opcode.bytes[0]; in patch_retpoline()
912 cc = insn->opcode.bytes[1] & 0xf; in patch_retpoline()
916 bytes[i++] = insn->length - 2; /* sizeof(Jcc.d8) == 2 */ in patch_retpoline()
933 * Check if the address of last byte of emitted-indirect is in in patch_retpoline()
934 * lower-half of the cacheline. Such branches need ITS mitigation. in patch_retpoline()
940 ret = emit_indirect(op, reg, bytes + i, insn->length - i); in patch_retpoline()
945 for (; i < insn->length;) in patch_retpoline()
952 * Generated by 'objtool --retpoline'.
983 if (dest[-1] == 0xd6 && (dest[0] & 0xf0) == 0x70) { in apply_retpolines()
1030 * Rewrite the compiler generated return thunk tail-calls.
1053 for (; i < insn->length;) in patch_return()
1082 "missing return thunk: %pS-%pS: %*ph", in apply_returns()
1157 * Generated by: objtool --ibt
1171 poison_cfi(addr - 16); in apply_seal_endbr()
1201 func -= cfi_get_offset(); in cfi_get_func_hash()
1227 if (get_kernel_nofault(disp, func - 4)) in cfi_get_func_arity()
1231 return target - __bhi_args; in cfi_get_func_arity()
1241 * Re-hash the CFI hash with a boot-time seed while making sure the result is
1247 while (unlikely(__is_endbr(hash) || __is_endbr(-hash))) { in cfi_rehash()
1259 return -EINVAL; in cfi_parse_cmdline()
1282 pr_alert("CFI: mismatch non-fatal!\n"); in cfi_parse_cmdline()
1328 * endbr64 nopl -42(%rax)
1332 * movl $(-0x12345678),%r10d // 6 movl $0x12345678,%eax // 5
1333 * addl $-15(%r11),%r10d // 4 lea -0x10(%r11),%r11 // 4
1336 * 1: cs call __x86_indirect_thunk_r11 // 6 call *%r11; nop3; // 6
1340 * non-taken. This is based on Agner Fog's optimization manual, which states:
1343 * for not-taken branches is better than for taken branches on most
1344 * processors. Therefore, it is good to place the most frequent branch first"
1352 * 10: 0f 1f 40 d6 nopl -0x2a(%rax)
1354 * Note that the JNE target is the 0xD6 byte inside the NOPL, this decodes as
1362 " cs jne.d32 fineibt_preamble_start+0x13 \n"
1364 " nopl -42(%rax) \n"
1373 #define fineibt_preamble_size (fineibt_preamble_end - fineibt_preamble_start)
1374 #define fineibt_preamble_bhi (fineibt_preamble_bhi - fineibt_preamble_start)
1381 * 5: 4d 8d 5b f0 lea -0x10(%r11), %r11
1387 " lea -0x10(%r11), %r11 \n"
1396 #define fineibt_caller_size (fineibt_caller_end - fineibt_caller_start)
1399 #define fineibt_caller_jmp (fineibt_caller_size - 2)
1402 * Since FineIBT does hash validation on the callee side it is prone to
1404 * is not part of the fineibt_preamble sequence.
1415 * 5: 41 3b 43 f5 cmp -0x11(%r11), %eax
1416 * 9: 2e 4d 8d 5b <f0> cs lea -0x10(%r11), %r11
1422 * avoiding a dependency. Again, using a non-taken (backwards) branch
1429 " cmpl -11(%r11), %eax \n"
1430 " cs lea -0x10(%r11), %r11 \n"
1434 " cs call *%r11 \n"
1443 #define fineibt_paranoid_size (fineibt_paranoid_end - fineibt_paranoid_start)
1444 #define fineibt_paranoid_ind (fineibt_paranoid_ind - fineibt_paranoid_start)
1454 *reg = p[0] - 0xb8; in decode_preamble_hash()
1465 /* 41 ba 88 a9 cb ed mov $(-0x12345678),%r10d */ in decode_caller_hash()
1467 return -*(u32 *)(addr + 2); in decode_caller_hash()
1471 return -*(u32 *)(addr + 2); in decode_caller_hash()
1491 addr -= fineibt_caller_size; in cfi_disable_callers()
1505 * Re-enable kCFI, undo what cfi_disable_callers() did. in cfi_enable_callers()
1514 addr -= fineibt_caller_size; in cfi_enable_callers()
1537 return -EINVAL; in cfi_rand_preamble()
1547 * Inline the bhi-arity 1 case:
1556 * 10: 0f 1f 40 <d6> nopl -42(%rax)
1558 * Notably, this scheme is incompatible with permissive CFI
1559 * because the CMOVcc is unconditional and RDI will have been
1565 " cs jne fineibt_bhi1_func + 0x3 \n"
1567 " nopl -42(%rax) \n"
1575 #define fineibt_bhi1_size (fineibt_bhi1_end - fineibt_bhi1_start)
1598 * 9: 2e 2e e8 DD DD DD DD cs cs call __bhi_args[arity] in cfi_fineibt_bhi_preamble()
1629 return -EINVAL; in cfi_rewrite_preamble()
1669 addr -= fineibt_caller_size; in cfi_rand_callers()
1672 hash = -cfi_rehash(hash); in cfi_rand_callers()
1682 u8 *thunk = (void *)__x86_indirect_its_thunk_array[reg] - 2; in emit_paranoid_trampoline()
1705 addr -= fineibt_caller_size; in cfi_rewrite_callers()
1736 int len = fineibt_paranoid_size - fineibt_paranoid_ind; in cfi_rewrite_callers()
1769 * is less easy to take advantage of. in __apply_fineibt()
1815 pr_cfi_debug("CFI: re-enabling all indirect call checking\n"); in __apply_fineibt()
1827 /* place the FineIBT preamble at func()-16 */ in __apply_fineibt()
1832 /* rewrite the callers to target func()-16 */ in __apply_fineibt()
1867 * is never taken do not get a __cfi prefix, but *DO* get an ENDBR. in poison_cfi()
1882 * nopl -42(%rax) in poison_cfi()
1886 * nopl -42(%rax) in poison_cfi()
1912 #define fineibt_prefix_size (fineibt_preamble_size - ENDBR_INSN_SIZE)
1915 * When regs->ip points to a 0xD6 byte in the FineIBT preamble,
1923 unsigned long addr = regs->ip - fineibt_preamble_ud; in decode_fineibt_preamble()
1932 *type = (u32)regs->ax + hash; in decode_fineibt_preamble()
1935 * Since regs->ip points to the middle of an instruction; it cannot in decode_fineibt_preamble()
1938 regs->ip = *target; in decode_fineibt_preamble()
1947 * regs->ip points to one of the UD2 in __bhi_args[].
1957 if (regs->ip < (unsigned long)__bhi_args || in decode_fineibt_bhi()
1958 regs->ip >= (unsigned long)__bhi_args_end) in decode_fineibt_bhi()
1963 * FineIBT preamble. Since the CALL instruction is in the 5 last in decode_fineibt_bhi()
1964 * bytes of the preamble, the return address is in fact the target in decode_fineibt_bhi()
1967 __get_kernel_nofault(&addr, regs->sp, unsigned long, Efault); in decode_fineibt_bhi()
1970 addr -= fineibt_prefix_size; in decode_fineibt_bhi()
1975 *type = (u32)regs->ax + hash; in decode_fineibt_bhi()
1979 * as such the non-fatal case can use the regular fixup. in decode_fineibt_bhi()
1999 * regs->ip points to a LOCK Jcc.d8 instruction from the fineibt_paranoid_start[]
2004 unsigned long addr = regs->ip - fineibt_paranoid_ud; in decode_fineibt_paranoid()
2009 if (is_cfi_trap(addr + fineibt_caller_size - LEN_UD2)) { in decode_fineibt_paranoid()
2010 *target = regs->r11 + fineibt_prefix_size; in decode_fineibt_paranoid()
2011 *type = regs->ax; in decode_fineibt_paranoid()
2014 * Since the trapping instruction is the exact, but LOCK prefixed, in decode_fineibt_paranoid()
2024 * 5: 41 3b 43 f7 cmp -11(%r11), %eax in decode_fineibt_paranoid()
2025 * a: 2e 3d 8d 5b f0 cs lea -0x10(%r11), %r11 in decode_fineibt_paranoid()
2026 * e: 2e e8 XX XX XX XX cs call __x86_indirect_paranoid_thunk_r11 in decode_fineibt_paranoid()
2038 if (is_paranoid_thunk(regs->ip)) { in decode_fineibt_paranoid()
2039 *target = regs->r11 + fineibt_prefix_size; in decode_fineibt_paranoid()
2040 *type = regs->ax; in decode_fineibt_paranoid()
2042 regs->ip = *target; in decode_fineibt_paranoid()
2117 /* what is this ??? */
2154 smp->mod = mod; in alternatives_smp_module_add()
2155 smp->name = name; in alternatives_smp_module_add()
2156 smp->locks = locks; in alternatives_smp_module_add()
2157 smp->locks_end = locks_end; in alternatives_smp_module_add()
2158 smp->text = text; in alternatives_smp_module_add()
2159 smp->text_end = text_end; in alternatives_smp_module_add()
2160 DPRINTK(SMP, "locks %p -> %p, text %p -> %p, name %s\n", in alternatives_smp_module_add()
2161 smp->locks, smp->locks_end, in alternatives_smp_module_add()
2162 smp->text, smp->text_end, smp->name); in alternatives_smp_module_add()
2164 list_add_tail(&smp->next, &smp_alt_modules); in alternatives_smp_module_add()
2177 if (mod != item->mod) in alternatives_smp_module_del()
2179 list_del(&item->next); in alternatives_smp_module_del()
2201 alternatives_smp_lock(mod->locks, mod->locks_end, in alternatives_enable_smp()
2202 mod->text, mod->text_end); in alternatives_enable_smp()
2209 * Return 1 if the address range is reserved for SMP-alternatives.
2222 if (mod->text > text_end || mod->text_end < text_start) in alternatives_text_reserved()
2224 for (poff = mod->locks; poff < mod->locks_end; poff++) { in alternatives_text_reserved()
2237 * Self-test for the INT3 based CALL emulation code.
2240 * properly and that there is a stack gap between the INT3 frame and the
2261 " .size int3_magic, .-int3_magic\n"
2272 struct pt_regs *regs = args->regs; in int3_exception_notify()
2282 if (regs->ip - INT3_INSN_SIZE != selftest) in int3_exception_notify()
2294 .priority = INT_MAX-1, /* last */ in int3_selftest()
2301 * Basically: int3_magic(&val); but really complicated :-) in int3_selftest()
2332 * instruction and additionally on x86_64 we get a RIP-relative LEA: in alt_reloc_selftest()
2335 * call +0 # 5d5: R_X86_64_PLT32 __alt_reloc_selftest-0x4 in alt_reloc_selftest()
2355 * The patching is not fully atomic, so try to avoid local in alternative_instructions()
2365 * Ignoring it is worse than an unlikely patching race. in alternative_instructions()
2378 /* Keep CET-IBT disabled until caller/callee are patched */ in alternative_instructions()
2395 * Adjust all CALL instructions to point to func()-10, including in alternative_instructions()
2432 * text_poke_early - Update instructions on a live kernel at boot time
2451 * Modules text is marked initially as non-executable, so the in text_poke_early()
2452 * code cannot be running and speculative code-fetches are in text_poke_early()
2497 * While boot memory allocator is running we cannot use struct pages as in __text_poke()
2498 * they are not yet initialized. There is no way to recover. in __text_poke()
2519 * Map the page without the global bit, as TLB flushing is done with in __text_poke()
2520 * flush_tlb_mm_range(), which is intended for non-global PTEs. in __text_poke()
2525 * The lock is not really needed, but this allows to avoid open-coding. in __text_poke()
2546 * guarantees that the PTE will be set at the time memcpy() is done. in __text_poke()
2555 * Ensure that the PTE is only cleared after the instructions of memcpy in __text_poke()
2565 * Loading the previous page-table hierarchy requires a serializing in __text_poke()
2567 * Xen-PV is assumed to serialize execution in a similar manner. in __text_poke()
2573 * IRQs, but not if the mm is not used, as it is in this point. in __text_poke()
2581 * If the text does not match what we just wrote then something is in __text_poke()
2593 * text_poke - Update instructions on a live kernel
2603 * Note that the caller must ensure that if the modified code is part of a
2616 * text_poke_kgdb - Update instructions on a live kernel by kgdb
2626 * Context: should only be used by kgdb, which ensures no other core is running,
2647 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched); in text_poke_copy_locked()
2656 * text_poke_copy - Copy instructions into (an unused part of) RX memory
2676 * text_poke_set - memset into (an unused part of) RX memory
2681 * This is useful to overwrite unused regions of RX memory with illegal
2697 s = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(ptr), len - patched); in text_poke_set()
2718 * this thing. When len == 6 everything is prefixed with 0x0f and we map
2766 return _stext + tpl->rel_addr; in text_poke_addr()
2772 return -1; in patch_cmp()
2789 * text_poke_array with non-zero refcount: in smp_text_poke_int3_handler()
2803 ip = (void *) regs->ip - INT3_INSN_SIZE; in smp_text_poke_int3_handler()
2806 * Skip the binary search if there is a single member in the vector. in smp_text_poke_int3_handler()
2820 ip += tpl->len; in smp_text_poke_int3_handler()
2822 switch (tpl->opcode) { in smp_text_poke_int3_handler()
2835 int3_emulate_call(regs, (long)ip + tpl->disp); in smp_text_poke_int3_handler()
2840 int3_emulate_jmp(regs, (long)ip + tpl->disp); in smp_text_poke_int3_handler()
2844 int3_emulate_jcc(regs, tpl->opcode & 0xf, (long)ip, tpl->disp); in smp_text_poke_int3_handler()
2859 * smp_text_poke_batch_finish() -- update instructions on live kernel on SMP
2865 * Modify multi-byte instructions by using INT3 breakpoints on SMP.
2867 * synchronization using INT3 breakpoints and SMP cross-calls.
2869 * The way it is done:
2870 * - For each entry in the vector:
2871 * - add an INT3 trap to the address that will be patched
2872 * - SMP sync all CPUs
2873 * - For each entry in the vector:
2874 * - update all but the first byte of the patched range
2875 * - SMP sync all CPUs
2876 * - For each entry in the vector:
2877 * - replace the first byte (INT3) by the first byte of the
2879 * - SMP sync all CPUs
2894 * ensure reading a non-zero refcount provides up to date text_poke_array data. in smp_text_poke_batch_finish()
2934 if (len - INT3_INSN_SIZE > 0) { in smp_text_poke_batch_finish()
2937 len - INT3_INSN_SIZE); in smp_text_poke_batch_finish()
2947 len - INT3_INSN_SIZE); in smp_text_poke_batch_finish()
2955 * to reconstruct the trace. The flow up to here is: in smp_text_poke_batch_finish()
2956 * - write INT3 byte in smp_text_poke_batch_finish()
2957 * - IPI-SYNC in smp_text_poke_batch_finish()
2958 * - write instruction tail in smp_text_poke_batch_finish()
2962 * can still be decoded. Subsequently: in smp_text_poke_batch_finish()
2963 * - emit RECORD_TEXT_POKE with the new instruction in smp_text_poke_batch_finish()
2964 * - IPI-SYNC in smp_text_poke_batch_finish()
2965 * - write first byte in smp_text_poke_batch_finish()
2966 * - IPI-SYNC in smp_text_poke_batch_finish()
2973 * The old instruction is recorded so that the event can be in smp_text_poke_batch_finish()
2981 * According to Intel, this core syncing is very likely in smp_text_poke_batch_finish()
3011 * Notably, if after step-3 above the INT3 got removed, then the in smp_text_poke_batch_finish()
3013 * handlers and the below spin-wait will not happen. in smp_text_poke_batch_finish()
3015 * IOW. unless the replacement instruction is INT3, this case goes in smp_text_poke_batch_finish()
3039 memcpy((void *)tpl->text, opcode+i, len-i); in __smp_text_poke_batch_add()
3046 tpl->rel_addr = addr - (void *)_stext; in __smp_text_poke_batch_add()
3047 tpl->len = len; in __smp_text_poke_batch_add()
3048 tpl->opcode = insn.opcode.bytes[0]; in __smp_text_poke_batch_add()
3054 tpl->opcode = insn.opcode.bytes[1] - 0x10; in __smp_text_poke_batch_add()
3057 switch (tpl->opcode) { in __smp_text_poke_batch_add()
3066 BUG_ON(tpl->text[i] != INT3_INSN_OPCODE); in __smp_text_poke_batch_add()
3073 switch (tpl->opcode) { in __smp_text_poke_batch_add()
3082 tpl->disp = insn.immediate.value; in __smp_text_poke_batch_add()
3087 case 2: /* NOP2 -- emulate as JMP8+0 */ in __smp_text_poke_batch_add()
3089 tpl->opcode = JMP8_INSN_OPCODE; in __smp_text_poke_batch_add()
3090 tpl->disp = 0; in __smp_text_poke_batch_add()
3093 case 5: /* NOP5 -- emulate as JMP32+0 */ in __smp_text_poke_batch_add()
3095 tpl->opcode = JMP32_INSN_OPCODE; in __smp_text_poke_batch_add()
3096 tpl->disp = 0; in __smp_text_poke_batch_add()
3107 * We hard rely on the text_poke_array.vec being ordered; ensure this is so by flushing
3118 * If the last current entry's address is higher than the in text_poke_addr_ordered()
3120 * is violated and we must first flush all pending patching in text_poke_addr_ordered()
3123 if (text_poke_addr(text_poke_array.vec + text_poke_array.nr_entries-1) > addr) in text_poke_addr_ordered()
3130 * smp_text_poke_batch_add() -- update instruction on live kernel on SMP, batched
3136 * Add a new instruction to the current queue of to-be-patched instructions
3150 * smp_text_poke_single() -- update instruction on live kernel on SMP immediately
3157 * dynamically allocated memory. This function should be used when it is
3159 * is patched in immediately.