xref: /freebsd/contrib/llvm-project/compiler-rt/lib/xray/xray_interface.cpp (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //===-- xray_interface.cpp --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of XRay, a dynamic runtime instrumentation system.
10 //
11 // Implementation of the API functions.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "xray_interface_internal.h"
16 
17 #include <cinttypes>
18 #include <cstdio>
19 #include <errno.h>
20 #include <limits>
21 #include <string.h>
22 #include <sys/mman.h>
23 
24 #if SANITIZER_FUCHSIA
25 #include <zircon/process.h>
26 #include <zircon/sanitizer.h>
27 #include <zircon/status.h>
28 #include <zircon/syscalls.h>
29 #endif
30 
31 #include "sanitizer_common/sanitizer_addrhashmap.h"
32 #include "sanitizer_common/sanitizer_common.h"
33 
34 #include "xray_defs.h"
35 #include "xray_flags.h"
36 
37 extern __sanitizer::SpinMutex XRayInstrMapMutex;
38 extern __sanitizer::atomic_uint8_t XRayInitialized;
39 extern __xray::XRaySledMap *XRayInstrMaps;
40 extern __sanitizer::atomic_uint32_t XRayNumObjects;
41 
42 namespace __xray {
43 
44 #if defined(__x86_64__)
45 static const int16_t cSledLength = 12;
46 #elif defined(__aarch64__)
47 static const int16_t cSledLength = 32;
48 #elif defined(__arm__)
49 static const int16_t cSledLength = 28;
50 #elif SANITIZER_LOONGARCH64
51 static const int16_t cSledLength = 48;
52 #elif SANITIZER_MIPS32
53 static const int16_t cSledLength = 48;
54 #elif SANITIZER_MIPS64
55 static const int16_t cSledLength = 64;
56 #elif defined(__powerpc64__)
57 static const int16_t cSledLength = 8;
58 #elif defined(__hexagon__)
59 static const int16_t cSledLength = 20;
60 #elif defined(__riscv) && (__riscv_xlen == 64)
61 static const int16_t cSledLength = 68;
62 #elif defined(__riscv) && (__riscv_xlen == 32)
63 static const int16_t cSledLength = 52;
64 #elif defined(__s390x__)
65 static const int16_t cSledLength = 18;
66 #else
67 #error "Unsupported CPU Architecture"
68 #endif /* CPU architecture */
69 
70 // This is the function to call when we encounter the entry or exit sleds.
71 atomic_uintptr_t XRayPatchedFunction SANITIZER_INTERFACE_ATTRIBUTE{0};
72 
73 // This is the function to call from the arg1-enabled sleds/trampolines.
74 atomic_uintptr_t XRayArgLogger SANITIZER_INTERFACE_ATTRIBUTE{0};
75 
76 // This is the function to call when we encounter a custom event log call.
77 atomic_uintptr_t XRayPatchedCustomEvent SANITIZER_INTERFACE_ATTRIBUTE{0};
78 
79 // This is the function to call when we encounter a typed event log call.
80 atomic_uintptr_t XRayPatchedTypedEvent SANITIZER_INTERFACE_ATTRIBUTE{0};
81 
82 // This is the global status to determine whether we are currently
83 // patching/unpatching.
84 atomic_uint8_t XRayPatching{0};
85 
86 struct TypeDescription {
87   uint32_t type_id;
88   std::size_t description_string_length;
89 };
90 
91 using TypeDescriptorMapType = AddrHashMap<TypeDescription, 11>;
92 // An address map from immutable descriptors to type ids.
93 TypeDescriptorMapType TypeDescriptorAddressMap{};
94 
95 atomic_uint32_t TypeEventDescriptorCounter{0};
96 
97 // MProtectHelper is an RAII wrapper for calls to mprotect(...) that will
98 // undo any successful mprotect(...) changes. This is used to make a page
99 // writeable and executable, and upon destruction if it was successful in
100 // doing so returns the page into a read-only and executable page.
101 //
102 // This is only used specifically for runtime-patching of the XRay
103 // instrumentation points. This assumes that the executable pages are
104 // originally read-and-execute only.
105 class MProtectHelper {
106   void *PageAlignedAddr;
107   std::size_t MProtectLen;
108   bool MustCleanup;
109 
110 public:
MProtectHelper(void * PageAlignedAddr,std::size_t MProtectLen,std::size_t PageSize)111   explicit MProtectHelper(void *PageAlignedAddr,
112                           std::size_t MProtectLen,
113                           std::size_t PageSize) XRAY_NEVER_INSTRUMENT
114       : PageAlignedAddr(PageAlignedAddr),
115         MProtectLen(MProtectLen),
116         MustCleanup(false) {
117 #if SANITIZER_FUCHSIA
118     MProtectLen = RoundUpTo(MProtectLen, PageSize);
119 #endif
120   }
121 
MakeWriteable()122   int MakeWriteable() XRAY_NEVER_INSTRUMENT {
123 #if SANITIZER_FUCHSIA
124     auto R = __sanitizer_change_code_protection(
125         reinterpret_cast<uintptr_t>(PageAlignedAddr), MProtectLen, true);
126     if (R != ZX_OK) {
127       Report("XRay: cannot change code protection: %s\n",
128              _zx_status_get_string(R));
129       return -1;
130     }
131     MustCleanup = true;
132     return 0;
133 #else
134     auto R = mprotect(PageAlignedAddr, MProtectLen,
135                       PROT_READ | PROT_WRITE | PROT_EXEC);
136     if (R != -1)
137       MustCleanup = true;
138     return R;
139 #endif
140   }
141 
~MProtectHelper()142   ~MProtectHelper() XRAY_NEVER_INSTRUMENT {
143     if (MustCleanup) {
144 #if SANITIZER_FUCHSIA
145       auto R = __sanitizer_change_code_protection(
146           reinterpret_cast<uintptr_t>(PageAlignedAddr), MProtectLen, false);
147       if (R != ZX_OK) {
148         Report("XRay: cannot change code protection: %s\n",
149                _zx_status_get_string(R));
150       }
151 #else
152       mprotect(PageAlignedAddr, MProtectLen, PROT_READ | PROT_EXEC);
153 #endif
154     }
155   }
156 };
157 
158 namespace {
159 
isObjectLoaded(int32_t ObjId)160 bool isObjectLoaded(int32_t ObjId) {
161   SpinMutexLock Guard(&XRayInstrMapMutex);
162   if (ObjId < 0 || static_cast<uint32_t>(ObjId) >=
163                        atomic_load(&XRayNumObjects, memory_order_acquire)) {
164     return false;
165   }
166   return XRayInstrMaps[ObjId].Loaded;
167 }
168 
patchSled(const XRaySledEntry & Sled,bool Enable,int32_t FuncId,const XRayTrampolines & Trampolines)169 bool patchSled(const XRaySledEntry &Sled, bool Enable, int32_t FuncId,
170                const XRayTrampolines &Trampolines) XRAY_NEVER_INSTRUMENT {
171   bool Success = false;
172   switch (Sled.Kind) {
173   case XRayEntryType::ENTRY:
174     Success = patchFunctionEntry(Enable, FuncId, Sled, Trampolines,
175                                  /*LogArgs=*/false);
176     break;
177   case XRayEntryType::EXIT:
178     Success = patchFunctionExit(Enable, FuncId, Sled, Trampolines);
179     break;
180   case XRayEntryType::TAIL:
181     Success = patchFunctionTailExit(Enable, FuncId, Sled, Trampolines);
182     break;
183   case XRayEntryType::LOG_ARGS_ENTRY:
184     Success = patchFunctionEntry(Enable, FuncId, Sled, Trampolines,
185                                  /*LogArgs=*/true);
186     break;
187   case XRayEntryType::CUSTOM_EVENT:
188     Success = patchCustomEvent(Enable, FuncId, Sled);
189     break;
190   case XRayEntryType::TYPED_EVENT:
191     Success = patchTypedEvent(Enable, FuncId, Sled);
192     break;
193   default:
194     Report("Unsupported sled kind '%" PRIu64 "' @%04x\n", Sled.Address,
195            int(Sled.Kind));
196     return false;
197   }
198   return Success;
199 }
200 
201 const XRayFunctionSledIndex
findFunctionSleds(int32_t FuncId,const XRaySledMap & InstrMap)202 findFunctionSleds(int32_t FuncId,
203                   const XRaySledMap &InstrMap) XRAY_NEVER_INSTRUMENT {
204   int32_t CurFn = 0;
205   uint64_t LastFnAddr = 0;
206   XRayFunctionSledIndex Index = {nullptr, 0};
207 
208   for (std::size_t I = 0; I < InstrMap.Entries && CurFn <= FuncId; I++) {
209     const auto &Sled = InstrMap.Sleds[I];
210     const auto Function = Sled.function();
211     if (Function != LastFnAddr) {
212       CurFn++;
213       LastFnAddr = Function;
214     }
215 
216     if (CurFn == FuncId) {
217       if (Index.Begin == nullptr)
218         Index.Begin = &Sled;
219       Index.Size = &Sled - Index.Begin + 1;
220     }
221   }
222 
223   return Index;
224 }
225 
patchFunction(int32_t FuncId,int32_t ObjId,bool Enable)226 XRayPatchingStatus patchFunction(int32_t FuncId, int32_t ObjId,
227                                  bool Enable) XRAY_NEVER_INSTRUMENT {
228   if (!atomic_load(&XRayInitialized, memory_order_acquire))
229     return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
230 
231   uint8_t NotPatching = false;
232   if (!atomic_compare_exchange_strong(
233           &XRayPatching, &NotPatching, true, memory_order_acq_rel))
234     return XRayPatchingStatus::ONGOING; // Already patching.
235 
236   // Next, we look for the function index.
237   XRaySledMap InstrMap;
238   {
239     SpinMutexLock Guard(&XRayInstrMapMutex);
240     if (ObjId < 0 || static_cast<uint32_t>(ObjId) >=
241                          atomic_load(&XRayNumObjects, memory_order_acquire)) {
242       Report("Unable to patch function: invalid sled map index: %d", ObjId);
243       return XRayPatchingStatus::FAILED;
244     }
245     InstrMap = XRayInstrMaps[ObjId];
246   }
247 
248   // If we don't have an index, we can't patch individual functions.
249   if (InstrMap.Functions == 0)
250     return XRayPatchingStatus::NOT_INITIALIZED;
251 
252   // Check if the corresponding DSO has been unloaded.
253   if (!InstrMap.Loaded) {
254     Report("Invalid function id provided: %d\n", FuncId);
255     return XRayPatchingStatus::NOT_INITIALIZED;
256   }
257 
258   // FuncId must be a positive number, less than the number of functions
259   // instrumented.
260   if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions) {
261     Report("Invalid function id provided: %d\n", FuncId);
262     return XRayPatchingStatus::FAILED;
263   }
264 
265   auto PackedId = __xray::MakePackedId(FuncId, ObjId);
266 
267   // Now we patch ths sleds for this specific function.
268   XRayFunctionSledIndex SledRange;
269   if (InstrMap.SledsIndex) {
270     SledRange = {InstrMap.SledsIndex[FuncId - 1].fromPCRelative(),
271                  InstrMap.SledsIndex[FuncId - 1].Size};
272   } else {
273     SledRange = findFunctionSleds(FuncId, InstrMap);
274   }
275 
276   auto *f = SledRange.Begin;
277   bool SucceedOnce = false;
278   for (size_t i = 0; i != SledRange.Size; ++i)
279     SucceedOnce |= patchSled(f[i], Enable, PackedId, InstrMap.Trampolines);
280 
281   atomic_store(&XRayPatching, false, memory_order_release);
282 
283   if (!SucceedOnce) {
284     Report("Failed patching any sled for function '%d'.", FuncId);
285     return XRayPatchingStatus::FAILED;
286   }
287 
288   return XRayPatchingStatus::SUCCESS;
289 }
290 
291 // controlPatching implements the common internals of the patching/unpatching
292 // implementation. |Enable| defines whether we're enabling or disabling the
293 // runtime XRay instrumentation.
294 // This function should only be called after ensuring that XRay is initialized
295 // and no other thread is currently patching.
controlPatchingObjectUnchecked(bool Enable,int32_t ObjId)296 XRayPatchingStatus controlPatchingObjectUnchecked(bool Enable, int32_t ObjId) {
297   XRaySledMap InstrMap;
298   {
299     SpinMutexLock Guard(&XRayInstrMapMutex);
300     if (ObjId < 0 || static_cast<uint32_t>(ObjId) >=
301                          atomic_load(&XRayNumObjects, memory_order_acquire)) {
302       Report("Unable to patch functions: invalid sled map index: %d\n", ObjId);
303       return XRayPatchingStatus::FAILED;
304     }
305     InstrMap = XRayInstrMaps[ObjId];
306   }
307   if (InstrMap.Entries == 0)
308     return XRayPatchingStatus::NOT_INITIALIZED;
309 
310   if (Verbosity())
311     Report("Patching object %d with %d functions.\n", ObjId, InstrMap.Entries);
312 
313   // Check if the corresponding DSO has been unloaded.
314   if (!InstrMap.Loaded) {
315     Report("Object is not loaded at index: %d\n", ObjId);
316     return XRayPatchingStatus::FAILED;
317   }
318 
319   uint32_t FuncId = 1;
320   uint64_t CurFun = 0;
321 
322   // First we want to find the bounds for which we have instrumentation points,
323   // and try to get as few calls to mprotect(...) as possible. We're assuming
324   // that all the sleds for the instrumentation map are contiguous as a single
325   // set of pages. When we do support dynamic shared object instrumentation,
326   // we'll need to do this for each set of page load offsets per DSO loaded. For
327   // now we're assuming we can mprotect the whole section of text between the
328   // minimum sled address and the maximum sled address (+ the largest sled
329   // size).
330   auto *MinSled = &InstrMap.Sleds[0];
331   auto *MaxSled = &InstrMap.Sleds[InstrMap.Entries - 1];
332   for (std::size_t I = 0; I < InstrMap.Entries; I++) {
333     const auto &Sled = InstrMap.Sleds[I];
334     if (Sled.address() < MinSled->address())
335       MinSled = &Sled;
336     if (Sled.address() > MaxSled->address())
337       MaxSled = &Sled;
338   }
339 
340   const size_t PageSize = flags()->xray_page_size_override > 0
341                               ? flags()->xray_page_size_override
342                               : GetPageSizeCached();
343   if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
344     Report("System page size is not a power of two: %zu\n", PageSize);
345     return XRayPatchingStatus::FAILED;
346   }
347 
348   void *PageAlignedAddr =
349       reinterpret_cast<void *>(MinSled->address() & ~(PageSize - 1));
350   size_t MProtectLen =
351       (MaxSled->address() - reinterpret_cast<uptr>(PageAlignedAddr)) +
352       cSledLength;
353   MProtectHelper Protector(PageAlignedAddr, MProtectLen, PageSize);
354   if (Protector.MakeWriteable() == -1) {
355     Report("Failed mprotect: %d\n", errno);
356     return XRayPatchingStatus::FAILED;
357   }
358 
359   for (std::size_t I = 0; I < InstrMap.Entries; ++I) {
360     auto &Sled = InstrMap.Sleds[I];
361     auto F = Sled.function();
362     if (CurFun == 0)
363       CurFun = F;
364     if (F != CurFun) {
365       ++FuncId;
366       CurFun = F;
367     }
368     auto PackedId = __xray::MakePackedId(FuncId, ObjId);
369     patchSled(Sled, Enable, PackedId, InstrMap.Trampolines);
370   }
371   atomic_store(&XRayPatching, false, memory_order_release);
372   return XRayPatchingStatus::SUCCESS;
373 }
374 
375 // Controls patching for all registered objects.
376 // Returns: SUCCESS, if patching succeeds for all objects.
377 //          NOT_INITIALIZED, if one or more objects returned NOT_INITIALIZED
378 //             but none failed.
379 //          FAILED, if patching of one or more objects failed.
controlPatching(bool Enable)380 XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
381   if (!atomic_load(&XRayInitialized, memory_order_acquire))
382     return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
383 
384   uint8_t NotPatching = false;
385   if (!atomic_compare_exchange_strong(&XRayPatching, &NotPatching, true,
386                                       memory_order_acq_rel))
387     return XRayPatchingStatus::ONGOING; // Already patching.
388 
389   auto XRayPatchingStatusResetter = at_scope_exit(
390       [] { atomic_store(&XRayPatching, false, memory_order_release); });
391 
392   unsigned NumObjects = __xray_num_objects();
393 
394   XRayPatchingStatus CombinedStatus{NOT_INITIALIZED};
395   for (unsigned I = 0; I < NumObjects; ++I) {
396     if (!isObjectLoaded(I))
397       continue;
398     auto LastStatus = controlPatchingObjectUnchecked(Enable, I);
399     switch (LastStatus) {
400     case SUCCESS:
401       if (CombinedStatus == NOT_INITIALIZED)
402         CombinedStatus = SUCCESS;
403       break;
404     case FAILED:
405       // Report failure, but try to patch the remaining objects
406       CombinedStatus = FAILED;
407       break;
408     case NOT_INITIALIZED:
409       // XRay has been initialized but there are no sleds available for this
410       // object. Try to patch remaining objects.
411       if (CombinedStatus != FAILED)
412         CombinedStatus = NOT_INITIALIZED;
413       break;
414     case ONGOING:
415       UNREACHABLE("Status ONGOING should not appear at this point");
416     }
417   }
418   return CombinedStatus;
419 }
420 
421 // Controls patching for one object.
controlPatching(bool Enable,int32_t ObjId)422 XRayPatchingStatus controlPatching(bool Enable,
423                                    int32_t ObjId) XRAY_NEVER_INSTRUMENT {
424 
425   if (!atomic_load(&XRayInitialized, memory_order_acquire))
426     return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
427 
428   uint8_t NotPatching = false;
429   if (!atomic_compare_exchange_strong(&XRayPatching, &NotPatching, true,
430                                       memory_order_acq_rel))
431     return XRayPatchingStatus::ONGOING; // Already patching.
432 
433   auto XRayPatchingStatusResetter = at_scope_exit(
434       [] { atomic_store(&XRayPatching, false, memory_order_release); });
435 
436   return controlPatchingObjectUnchecked(Enable, ObjId);
437 }
438 
mprotectAndPatchFunction(int32_t FuncId,int32_t ObjId,bool Enable)439 XRayPatchingStatus mprotectAndPatchFunction(int32_t FuncId, int32_t ObjId,
440                                             bool Enable) XRAY_NEVER_INSTRUMENT {
441   XRaySledMap InstrMap;
442   {
443     SpinMutexLock Guard(&XRayInstrMapMutex);
444     if (ObjId < 0 || static_cast<uint32_t>(ObjId) >=
445                          atomic_load(&XRayNumObjects, memory_order_acquire)) {
446       Report("Unable to patch function: invalid sled map index: %d\n", ObjId);
447       return XRayPatchingStatus::FAILED;
448     }
449     InstrMap = XRayInstrMaps[ObjId];
450   }
451 
452   // Check if the corresponding DSO has been unloaded.
453   if (!InstrMap.Loaded) {
454     Report("Object is not loaded at index: %d\n", ObjId);
455     return XRayPatchingStatus::FAILED;
456   }
457 
458   // FuncId must be a positive number, less than the number of functions
459   // instrumented.
460   if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions) {
461     Report("Invalid function id provided: %d\n", FuncId);
462     return XRayPatchingStatus::FAILED;
463   }
464 
465   const size_t PageSize = flags()->xray_page_size_override > 0
466                               ? flags()->xray_page_size_override
467                               : GetPageSizeCached();
468   if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
469     Report("Provided page size is not a power of two: %zu\n", PageSize);
470     return XRayPatchingStatus::FAILED;
471   }
472 
473   // Here we compute the minimum sled and maximum sled associated with a
474   // particular function ID.
475   XRayFunctionSledIndex SledRange;
476   if (InstrMap.SledsIndex) {
477     SledRange = {InstrMap.SledsIndex[FuncId - 1].fromPCRelative(),
478                  InstrMap.SledsIndex[FuncId - 1].Size};
479   } else {
480     SledRange = findFunctionSleds(FuncId, InstrMap);
481   }
482   auto *f = SledRange.Begin;
483   auto *e = SledRange.Begin + SledRange.Size;
484   auto *MinSled = f;
485   auto *MaxSled = e - 1;
486   while (f != e) {
487     if (f->address() < MinSled->address())
488       MinSled = f;
489     if (f->address() > MaxSled->address())
490       MaxSled = f;
491     ++f;
492   }
493 
494   void *PageAlignedAddr =
495       reinterpret_cast<void *>(MinSled->address() & ~(PageSize - 1));
496   size_t MProtectLen =
497       (MaxSled->address() - reinterpret_cast<uptr>(PageAlignedAddr)) +
498       cSledLength;
499   MProtectHelper Protector(PageAlignedAddr, MProtectLen, PageSize);
500   if (Protector.MakeWriteable() == -1) {
501     Report("Failed mprotect: %d\n", errno);
502     return XRayPatchingStatus::FAILED;
503   }
504   return patchFunction(FuncId, ObjId, Enable);
505 }
506 
507 } // namespace
508 
509 } // namespace __xray
510 
511 using namespace __xray;
512 
513 // The following functions are declared `extern "C" {...}` in the header, hence
514 // they're defined in the global namespace.
515 
__xray_set_handler(void (* entry)(int32_t,XRayEntryType))516 int __xray_set_handler(void (*entry)(int32_t,
517                                      XRayEntryType)) XRAY_NEVER_INSTRUMENT {
518   if (atomic_load(&XRayInitialized, memory_order_acquire)) {
519 
520     atomic_store(&__xray::XRayPatchedFunction,
521                  reinterpret_cast<uintptr_t>(entry), memory_order_release);
522     return 1;
523   }
524   return 0;
525 }
526 
__xray_set_customevent_handler(void (* entry)(void *,size_t))527 int __xray_set_customevent_handler(void (*entry)(void *, size_t))
528     XRAY_NEVER_INSTRUMENT {
529   if (atomic_load(&XRayInitialized, memory_order_acquire)) {
530     atomic_store(&__xray::XRayPatchedCustomEvent,
531                  reinterpret_cast<uintptr_t>(entry), memory_order_release);
532     return 1;
533   }
534   return 0;
535 }
536 
__xray_set_typedevent_handler(void (* entry)(size_t,const void *,size_t))537 int __xray_set_typedevent_handler(void (*entry)(size_t, const void *,
538                                                 size_t)) XRAY_NEVER_INSTRUMENT {
539   if (atomic_load(&XRayInitialized, memory_order_acquire)) {
540     atomic_store(&__xray::XRayPatchedTypedEvent,
541                  reinterpret_cast<uintptr_t>(entry), memory_order_release);
542     return 1;
543   }
544   return 0;
545 }
546 
__xray_remove_handler()547 int __xray_remove_handler() XRAY_NEVER_INSTRUMENT {
548   return __xray_set_handler(nullptr);
549 }
550 
__xray_remove_customevent_handler()551 int __xray_remove_customevent_handler() XRAY_NEVER_INSTRUMENT {
552   return __xray_set_customevent_handler(nullptr);
553 }
554 
__xray_remove_typedevent_handler()555 int __xray_remove_typedevent_handler() XRAY_NEVER_INSTRUMENT {
556   return __xray_set_typedevent_handler(nullptr);
557 }
558 
__xray_register_event_type(const char * const event_type)559 uint16_t __xray_register_event_type(
560     const char *const event_type) XRAY_NEVER_INSTRUMENT {
561   TypeDescriptorMapType::Handle h(&TypeDescriptorAddressMap, (uptr)event_type);
562   if (h.created()) {
563     h->type_id = atomic_fetch_add(
564         &TypeEventDescriptorCounter, 1, memory_order_acq_rel);
565     h->description_string_length = strnlen(event_type, 1024);
566   }
567   return h->type_id;
568 }
569 
__xray_patch()570 XRayPatchingStatus __xray_patch() XRAY_NEVER_INSTRUMENT {
571   return controlPatching(true);
572 }
573 
__xray_patch_object(int32_t ObjId)574 XRayPatchingStatus __xray_patch_object(int32_t ObjId) XRAY_NEVER_INSTRUMENT {
575   return controlPatching(true, ObjId);
576 }
577 
__xray_unpatch()578 XRayPatchingStatus __xray_unpatch() XRAY_NEVER_INSTRUMENT {
579   return controlPatching(false);
580 }
581 
__xray_unpatch_object(int32_t ObjId)582 XRayPatchingStatus __xray_unpatch_object(int32_t ObjId) XRAY_NEVER_INSTRUMENT {
583   return controlPatching(false, ObjId);
584 }
585 
__xray_patch_function(int32_t FuncId)586 XRayPatchingStatus __xray_patch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
587   auto Ids = __xray::UnpackId(FuncId);
588   auto ObjId = Ids.first;
589   auto FnId = Ids.second;
590   return mprotectAndPatchFunction(FnId, ObjId, true);
591 }
592 
593 XRayPatchingStatus
__xray_patch_function_in_object(int32_t FuncId,int32_t ObjId)594 __xray_patch_function_in_object(int32_t FuncId,
595                                 int32_t ObjId) XRAY_NEVER_INSTRUMENT {
596   return mprotectAndPatchFunction(FuncId, ObjId, true);
597 }
598 
599 XRayPatchingStatus
__xray_unpatch_function(int32_t FuncId)600 __xray_unpatch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
601   auto Ids = __xray::UnpackId(FuncId);
602   auto ObjId = Ids.first;
603   auto FnId = Ids.second;
604   return mprotectAndPatchFunction(FnId, ObjId, false);
605 }
606 
607 XRayPatchingStatus
__xray_unpatch_function_in_object(int32_t FuncId,int32_t ObjId)608 __xray_unpatch_function_in_object(int32_t FuncId,
609                                   int32_t ObjId) XRAY_NEVER_INSTRUMENT {
610   return mprotectAndPatchFunction(FuncId, ObjId, false);
611 }
612 
__xray_set_handler_arg1(void (* entry)(int32_t,XRayEntryType,uint64_t))613 int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, uint64_t)) {
614   if (!atomic_load(&XRayInitialized, memory_order_acquire))
615     return 0;
616 
617   // A relaxed write might not be visible even if the current thread gets
618   // scheduled on a different CPU/NUMA node.  We need to wait for everyone to
619   // have this handler installed for consistency of collected data across CPUs.
620   atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(entry),
621                memory_order_release);
622   return 1;
623 }
624 
__xray_remove_handler_arg1()625 int __xray_remove_handler_arg1() { return __xray_set_handler_arg1(nullptr); }
626 
627 uintptr_t
__xray_function_address(int32_t CombinedFuncId)628 __xray_function_address(int32_t CombinedFuncId) XRAY_NEVER_INSTRUMENT {
629   auto Ids = __xray::UnpackId(CombinedFuncId);
630   return __xray_function_address_in_object(Ids.second, Ids.first);
631 }
632 
__xray_function_address_in_object(int32_t FuncId,int32_t ObjId)633 uintptr_t __xray_function_address_in_object(int32_t FuncId, int32_t ObjId)
634     XRAY_NEVER_INSTRUMENT {
635   XRaySledMap InstrMap;
636   {
637     SpinMutexLock Guard(&XRayInstrMapMutex);
638     auto count = atomic_load(&XRayNumObjects, memory_order_acquire);
639     if (ObjId < 0 || static_cast<uint32_t>(ObjId) >= count) {
640       Report("Unable to determine function address: invalid sled map index %d "
641              "(size is %d)\n",
642              ObjId, (int)count);
643       return 0;
644     }
645     InstrMap = XRayInstrMaps[ObjId];
646   }
647 
648   if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions)
649     return 0;
650   const XRaySledEntry *Sled =
651       InstrMap.SledsIndex ? InstrMap.SledsIndex[FuncId - 1].fromPCRelative()
652                           : findFunctionSleds(FuncId, InstrMap).Begin;
653   return Sled->function()
654 // On PPC, function entries are always aligned to 16 bytes. The beginning of a
655 // sled might be a local entry, which is always +8 based on the global entry.
656 // Always return the global entry.
657 #ifdef __PPC__
658          & ~0xf
659 #endif
660       ;
661 }
662 
__xray_max_function_id()663 size_t __xray_max_function_id() XRAY_NEVER_INSTRUMENT {
664   return __xray_max_function_id_in_object(0);
665 }
666 
__xray_max_function_id_in_object(int32_t ObjId)667 size_t __xray_max_function_id_in_object(int32_t ObjId) XRAY_NEVER_INSTRUMENT {
668   SpinMutexLock Guard(&XRayInstrMapMutex);
669   if (ObjId < 0 || static_cast<uint32_t>(ObjId) >=
670                        atomic_load(&XRayNumObjects, memory_order_acquire))
671     return 0;
672   return XRayInstrMaps[ObjId].Functions;
673 }
674 
__xray_num_objects()675 size_t __xray_num_objects() XRAY_NEVER_INSTRUMENT {
676   SpinMutexLock Guard(&XRayInstrMapMutex);
677   return atomic_load(&XRayNumObjects, memory_order_acquire);
678 }
679 
__xray_unpack_function_id(int32_t PackedId)680 int32_t __xray_unpack_function_id(int32_t PackedId) {
681   return __xray::UnpackId(PackedId).second;
682 }
683 
__xray_unpack_object_id(int32_t PackedId)684 int32_t __xray_unpack_object_id(int32_t PackedId) {
685   return __xray::UnpackId(PackedId).first;
686 }
687 
__xray_pack_id(int32_t FuncId,int32_t ObjId)688 int32_t __xray_pack_id(int32_t FuncId, int32_t ObjId) {
689   return __xray::MakePackedId(FuncId, ObjId);
690 }
691