xref: /freebsd/contrib/llvm-project/compiler-rt/lib/xray/xray_AArch64.cpp (revision 06c3fb2749bda94cb5201f81ffdb8fa6c3161b2e)
1 //===-- xray_AArch64.cpp ----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of XRay, a dynamic runtime instrumentation system.
10 //
11 // Implementation of AArch64-specific routines (64-bit).
12 //
13 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "xray_defs.h"
16 #include "xray_interface_internal.h"
17 #include <atomic>
18 #include <cassert>
19 
20 extern "C" void __clear_cache(void *start, void *end);
21 
22 namespace __xray {
23 
24 // The machine codes for some instructions used in runtime patching.
25 enum class PatchOpcodes : uint32_t {
26   PO_StpX0X30SP_m16e = 0xA9BF7BE0, // STP X0, X30, [SP, #-16]!
27   PO_LdrX16_12 = 0x58000070,       // LDR X16, #12
28   PO_BlrX16 = 0xD63F0200,          // BLR X16
29   PO_LdpX0X30SP_16 = 0xA8C17BE0,   // LDP X0, X30, [SP], #16
30   PO_B32 = 0x14000008              // B #32
31 };
32 
patchSled(const bool Enable,const uint32_t FuncId,const XRaySledEntry & Sled,void (* TracingHook)())33 inline static bool patchSled(const bool Enable, const uint32_t FuncId,
34                              const XRaySledEntry &Sled,
35                              void (*TracingHook)()) XRAY_NEVER_INSTRUMENT {
36   // When |Enable| == true,
37   // We replace the following compile-time stub (sled):
38   //
39   // xray_sled_n:
40   //   B #32
41   //   7 NOPs (24 bytes)
42   //
43   // With the following runtime patch:
44   //
45   // xray_sled_n:
46   //   STP X0, X30, [SP, #-16]! ; PUSH {r0, lr}
47   //   LDR W17, #12 ; W17 := function ID
48   //   LDR X16,#12 ; X16 := address of the trampoline
49   //   BLR X16
50   //   ;DATA: 32 bits of function ID
51   //   ;DATA: lower 32 bits of the address of the trampoline
52   //   ;DATA: higher 32 bits of the address of the trampoline
53   //   LDP X0, X30, [SP], #16 ; POP {r0, lr}
54   //
55   // Replacement of the first 4-byte instruction should be the last and atomic
56   // operation, so that the user code which reaches the sled concurrently
57   // either jumps over the whole sled, or executes the whole sled when the
58   // latter is ready.
59   //
60   // When |Enable|==false, we set back the first instruction in the sled to be
61   //   B #32
62 
63   uint32_t *FirstAddress = reinterpret_cast<uint32_t *>(Sled.address());
64   uint32_t *CurAddress = FirstAddress + 1;
65   if (Enable) {
66     *CurAddress++ = 0x18000071; // ldr w17, #12
67     *CurAddress = uint32_t(PatchOpcodes::PO_LdrX16_12);
68     CurAddress++;
69     *CurAddress = uint32_t(PatchOpcodes::PO_BlrX16);
70     CurAddress++;
71     *CurAddress = FuncId;
72     CurAddress++;
73     *reinterpret_cast<void (**)()>(CurAddress) = TracingHook;
74     CurAddress += 2;
75     *CurAddress = uint32_t(PatchOpcodes::PO_LdpX0X30SP_16);
76     CurAddress++;
77     std::atomic_store_explicit(
78         reinterpret_cast<std::atomic<uint32_t> *>(FirstAddress),
79         uint32_t(PatchOpcodes::PO_StpX0X30SP_m16e), std::memory_order_release);
80   } else {
81     std::atomic_store_explicit(
82         reinterpret_cast<std::atomic<uint32_t> *>(FirstAddress),
83         uint32_t(PatchOpcodes::PO_B32), std::memory_order_release);
84   }
85   __clear_cache(reinterpret_cast<char *>(FirstAddress),
86                 reinterpret_cast<char *>(CurAddress));
87   return true;
88 }
89 
patchFunctionEntry(const bool Enable,const uint32_t FuncId,const XRaySledEntry & Sled,void (* Trampoline)())90 bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
91                         const XRaySledEntry &Sled,
92                         void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
93   return patchSled(Enable, FuncId, Sled, Trampoline);
94 }
95 
patchFunctionExit(const bool Enable,const uint32_t FuncId,const XRaySledEntry & Sled)96 bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
97                        const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
98   return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
99 }
100 
patchFunctionTailExit(const bool Enable,const uint32_t FuncId,const XRaySledEntry & Sled)101 bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
102                            const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
103   return patchSled(Enable, FuncId, Sled, __xray_FunctionTailExit);
104 }
105 
106 // AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL generates this code sequence:
107 //
108 // .Lxray_event_sled_N:
109 //   b 1f
110 //   save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
111 //   set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
112 //   bl __xray_CustomEvent or __xray_TypedEvent
113 //   restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
114 // 1f
115 //
116 // There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
117 //
118 // Enable: b .+24 => nop
119 // Disable: nop => b .+24
patchCustomEvent(const bool Enable,const uint32_t FuncId,const XRaySledEntry & Sled)120 bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
121                       const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
122   uint32_t Inst = Enable ? 0xd503201f : 0x14000006;
123   std::atomic_store_explicit(
124       reinterpret_cast<std::atomic<uint32_t> *>(Sled.address()), Inst,
125       std::memory_order_release);
126   return false;
127 }
128 
129 // Enable: b +36 => nop
130 // Disable: nop => b +36
patchTypedEvent(const bool Enable,const uint32_t FuncId,const XRaySledEntry & Sled)131 bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
132                      const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
133   uint32_t Inst = Enable ? 0xd503201f : 0x14000009;
134   std::atomic_store_explicit(
135       reinterpret_cast<std::atomic<uint32_t> *>(Sled.address()), Inst,
136       std::memory_order_release);
137   return false;
138 }
139 
140 // FIXME: Maybe implement this better?
probeRequiredCPUFeatures()141 bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { return true; }
142 
143 } // namespace __xray
144