xref: /freebsd/contrib/llvm-project/lldb/source/Symbol/FuncUnwinders.cpp (revision 9c77fb6aaa366cbabc80ee1b834bcfe4df135491)
1 //===-- FuncUnwinders.cpp -------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "lldb/Symbol/FuncUnwinders.h"
10 #include "lldb/Core/Address.h"
11 #include "lldb/Core/AddressRange.h"
12 #include "lldb/Symbol/ArmUnwindInfo.h"
13 #include "lldb/Symbol/CallFrameInfo.h"
14 #include "lldb/Symbol/CompactUnwindInfo.h"
15 #include "lldb/Symbol/DWARFCallFrameInfo.h"
16 #include "lldb/Symbol/ObjectFile.h"
17 #include "lldb/Symbol/SymbolFile.h"
18 #include "lldb/Symbol/UnwindPlan.h"
19 #include "lldb/Symbol/UnwindTable.h"
20 #include "lldb/Target/ABI.h"
21 #include "lldb/Target/ExecutionContext.h"
22 #include "lldb/Target/Process.h"
23 #include "lldb/Target/RegisterContext.h"
24 #include "lldb/Target/RegisterNumber.h"
25 #include "lldb/Target/Target.h"
26 #include "lldb/Target/Thread.h"
27 #include "lldb/Target/UnwindAssembly.h"
28 
29 #include <memory>
30 
31 using namespace lldb;
32 using namespace lldb_private;
33 
34 FuncUnwinders::FuncUnwinders(UnwindTable &unwind_table, Address addr,
35                              AddressRanges ranges)
36     : m_unwind_table(unwind_table), m_addr(std::move(addr)),
37       m_ranges(std::move(ranges)), m_tried_unwind_plan_assembly(false),
38       m_tried_unwind_plan_eh_frame(false),
39       m_tried_unwind_plan_object_file(false),
40       m_tried_unwind_plan_debug_frame(false),
41       m_tried_unwind_plan_object_file_augmented(false),
42       m_tried_unwind_plan_eh_frame_augmented(false),
43       m_tried_unwind_plan_debug_frame_augmented(false),
44       m_tried_unwind_plan_compact_unwind(false),
45       m_tried_unwind_plan_arm_unwind(false),
46       m_tried_unwind_plan_symbol_file(false), m_tried_unwind_fast(false),
47       m_tried_unwind_arch_default(false),
48       m_tried_unwind_arch_default_at_func_entry(false),
49       m_first_non_prologue_insn() {}
50 
51 /// destructor
52 
53 FuncUnwinders::~FuncUnwinders() = default;
54 
55 std::shared_ptr<const UnwindPlan>
56 FuncUnwinders::GetUnwindPlanAtCallSite(Target &target, Thread &thread) {
57   std::lock_guard<std::recursive_mutex> guard(m_mutex);
58 
59   if (std::shared_ptr<const UnwindPlan> plan_sp =
60           GetObjectFileUnwindPlan(target))
61     return plan_sp;
62   if (std::shared_ptr<const UnwindPlan> plan_sp =
63           GetSymbolFileUnwindPlan(thread))
64     return plan_sp;
65   if (std::shared_ptr<const UnwindPlan> plan_sp =
66           GetDebugFrameUnwindPlan(target))
67     return plan_sp;
68   if (std::shared_ptr<const UnwindPlan> plan_sp = GetEHFrameUnwindPlan(target))
69     return plan_sp;
70   if (std::shared_ptr<const UnwindPlan> plan_sp =
71           GetCompactUnwindUnwindPlan(target))
72     return plan_sp;
73   if (std::shared_ptr<const UnwindPlan> plan_sp =
74           GetArmUnwindUnwindPlan(target))
75     return plan_sp;
76 
77   return nullptr;
78 }
79 
80 std::shared_ptr<const UnwindPlan>
81 FuncUnwinders::GetCompactUnwindUnwindPlan(Target &target) {
82   std::lock_guard<std::recursive_mutex> guard(m_mutex);
83   if (m_unwind_plan_compact_unwind.size() > 0)
84     return m_unwind_plan_compact_unwind[0]; // FIXME support multiple compact
85                                             // unwind plans for one func
86   if (m_tried_unwind_plan_compact_unwind)
87     return nullptr;
88 
89   m_tried_unwind_plan_compact_unwind = true;
90   // Only continuous functions are supported.
91   if (m_ranges.size() == 1) {
92     Address current_pc(m_ranges[0].GetBaseAddress());
93     CompactUnwindInfo *compact_unwind = m_unwind_table.GetCompactUnwindInfo();
94     if (compact_unwind) {
95       auto unwind_plan_sp =
96           std::make_shared<UnwindPlan>(lldb::eRegisterKindGeneric);
97       if (compact_unwind->GetUnwindPlan(target, current_pc, *unwind_plan_sp)) {
98         m_unwind_plan_compact_unwind.push_back(unwind_plan_sp);
99         return m_unwind_plan_compact_unwind[0]; // FIXME support multiple
100                                                 // compact unwind plans for one
101                                                 // func
102       }
103     }
104   }
105   return nullptr;
106 }
107 
108 std::shared_ptr<const UnwindPlan>
109 FuncUnwinders::GetObjectFileUnwindPlan(Target &target) {
110   std::lock_guard<std::recursive_mutex> guard(m_mutex);
111   if (m_unwind_plan_object_file_sp.get() ||
112       m_tried_unwind_plan_object_file)
113     return m_unwind_plan_object_file_sp;
114 
115   m_tried_unwind_plan_object_file = true;
116   if (CallFrameInfo *object_file_frame =
117           m_unwind_table.GetObjectFileUnwindInfo())
118     m_unwind_plan_object_file_sp =
119         object_file_frame->GetUnwindPlan(m_ranges, m_addr);
120   return m_unwind_plan_object_file_sp;
121 }
122 
123 std::shared_ptr<const UnwindPlan>
124 FuncUnwinders::GetEHFrameUnwindPlan(Target &target) {
125   std::lock_guard<std::recursive_mutex> guard(m_mutex);
126   if (m_unwind_plan_eh_frame_sp.get() || m_tried_unwind_plan_eh_frame)
127     return m_unwind_plan_eh_frame_sp;
128 
129   m_tried_unwind_plan_eh_frame = true;
130   if (m_addr.IsValid()) {
131     if (DWARFCallFrameInfo *eh_frame = m_unwind_table.GetEHFrameInfo())
132       m_unwind_plan_eh_frame_sp = eh_frame->GetUnwindPlan(m_ranges, m_addr);
133   }
134   return m_unwind_plan_eh_frame_sp;
135 }
136 
137 std::shared_ptr<const UnwindPlan>
138 FuncUnwinders::GetDebugFrameUnwindPlan(Target &target) {
139   std::lock_guard<std::recursive_mutex> guard(m_mutex);
140   if (m_unwind_plan_debug_frame_sp || m_tried_unwind_plan_debug_frame)
141     return m_unwind_plan_debug_frame_sp;
142 
143   m_tried_unwind_plan_debug_frame = true;
144   if (!m_ranges.empty()) {
145     if (DWARFCallFrameInfo *debug_frame = m_unwind_table.GetDebugFrameInfo())
146       m_unwind_plan_debug_frame_sp =
147           debug_frame->GetUnwindPlan(m_ranges, m_addr);
148   }
149   return m_unwind_plan_debug_frame_sp;
150 }
151 
152 std::shared_ptr<const UnwindPlan>
153 FuncUnwinders::GetArmUnwindUnwindPlan(Target &target) {
154   std::lock_guard<std::recursive_mutex> guard(m_mutex);
155   if (m_unwind_plan_arm_unwind_sp.get() || m_tried_unwind_plan_arm_unwind)
156     return m_unwind_plan_arm_unwind_sp;
157 
158   m_tried_unwind_plan_arm_unwind = true;
159   // Only continuous functions are supported.
160   if (m_ranges.size() == 1) {
161     Address current_pc = m_ranges[0].GetBaseAddress();
162     ArmUnwindInfo *arm_unwind_info = m_unwind_table.GetArmUnwindInfo();
163     if (arm_unwind_info) {
164       auto plan_sp = std::make_shared<UnwindPlan>(lldb::eRegisterKindGeneric);
165       if (arm_unwind_info->GetUnwindPlan(target, current_pc, *plan_sp))
166         m_unwind_plan_arm_unwind_sp = std::move(plan_sp);
167     }
168   }
169   return m_unwind_plan_arm_unwind_sp;
170 }
171 
172 namespace {
173 class RegisterContextToInfo: public SymbolFile::RegisterInfoResolver {
174 public:
175   RegisterContextToInfo(RegisterContext &ctx) : m_ctx(ctx) {}
176 
177   const RegisterInfo *ResolveName(llvm::StringRef name) const override {
178     return m_ctx.GetRegisterInfoByName(name);
179   }
180   const RegisterInfo *ResolveNumber(lldb::RegisterKind kind,
181                                     uint32_t number) const override {
182     return m_ctx.GetRegisterInfo(kind, number);
183   }
184 
185 private:
186   RegisterContext &m_ctx;
187 };
188 } // namespace
189 
190 std::shared_ptr<const UnwindPlan>
191 FuncUnwinders::GetSymbolFileUnwindPlan(Thread &thread) {
192   std::lock_guard<std::recursive_mutex> guard(m_mutex);
193   if (m_unwind_plan_symbol_file_sp.get() || m_tried_unwind_plan_symbol_file)
194     return m_unwind_plan_symbol_file_sp;
195 
196   m_tried_unwind_plan_symbol_file = true;
197   if (SymbolFile *symfile = m_unwind_table.GetSymbolFile();
198       symfile && m_ranges.size() == 1) {
199     m_unwind_plan_symbol_file_sp = symfile->GetUnwindPlan(
200         m_ranges[0].GetBaseAddress(),
201         RegisterContextToInfo(*thread.GetRegisterContext()));
202   }
203   return m_unwind_plan_symbol_file_sp;
204 }
205 
206 std::shared_ptr<const UnwindPlan>
207 FuncUnwinders::GetObjectFileAugmentedUnwindPlan(Target &target,
208                                                 Thread &thread) {
209   std::lock_guard<std::recursive_mutex> guard(m_mutex);
210   if (m_unwind_plan_object_file_augmented_sp.get() ||
211       m_tried_unwind_plan_object_file_augmented)
212     return m_unwind_plan_object_file_augmented_sp;
213 
214   m_tried_unwind_plan_object_file_augmented = true;
215 
216   std::shared_ptr<const UnwindPlan> object_file_unwind_plan =
217       GetObjectFileUnwindPlan(target);
218   if (!object_file_unwind_plan)
219     return m_unwind_plan_object_file_augmented_sp;
220 
221   // Augment the instructions with epilogue descriptions if necessary
222   // so the UnwindPlan can be used at any instruction in the function.
223 
224   UnwindAssemblySP assembly_profiler_sp(GetUnwindAssemblyProfiler(target));
225   // Only continuous functions are supported.
226   if (assembly_profiler_sp && m_ranges.size() == 1) {
227     auto plan_sp = std::make_shared<UnwindPlan>(*object_file_unwind_plan);
228 
229     if (assembly_profiler_sp->AugmentUnwindPlanFromCallSite(m_ranges[0], thread,
230                                                             *plan_sp))
231       m_unwind_plan_object_file_augmented_sp = std::move(plan_sp);
232   }
233   return m_unwind_plan_object_file_augmented_sp;
234 }
235 
236 std::shared_ptr<const UnwindPlan>
237 FuncUnwinders::GetEHFrameAugmentedUnwindPlan(Target &target, Thread &thread) {
238   std::lock_guard<std::recursive_mutex> guard(m_mutex);
239   if (m_unwind_plan_eh_frame_augmented_sp.get() ||
240       m_tried_unwind_plan_eh_frame_augmented)
241     return m_unwind_plan_eh_frame_augmented_sp;
242 
243   // Only supported on x86 architectures where we get eh_frame from the
244   // compiler that describes the prologue instructions perfectly, and sometimes
245   // the epilogue instructions too.
246   if (target.GetArchitecture().GetCore() != ArchSpec::eCore_x86_32_i386 &&
247       target.GetArchitecture().GetCore() != ArchSpec::eCore_x86_64_x86_64 &&
248       target.GetArchitecture().GetCore() != ArchSpec::eCore_x86_64_x86_64h) {
249     m_tried_unwind_plan_eh_frame_augmented = true;
250     return m_unwind_plan_eh_frame_augmented_sp;
251   }
252 
253   m_tried_unwind_plan_eh_frame_augmented = true;
254 
255   std::shared_ptr<const UnwindPlan> eh_frame_plan =
256       GetEHFrameUnwindPlan(target);
257   if (!eh_frame_plan)
258     return m_unwind_plan_eh_frame_augmented_sp;
259 
260   // Augment the eh_frame instructions with epilogue descriptions if necessary
261   // so the UnwindPlan can be used at any instruction in the function.
262 
263   UnwindAssemblySP assembly_profiler_sp(GetUnwindAssemblyProfiler(target));
264   // Only continuous functions are supported.
265   if (assembly_profiler_sp && m_ranges.size() == 1) {
266     auto plan_sp = std::make_shared<UnwindPlan>(*eh_frame_plan);
267     if (assembly_profiler_sp->AugmentUnwindPlanFromCallSite(m_ranges[0], thread,
268                                                             *plan_sp))
269       m_unwind_plan_eh_frame_augmented_sp = std::move(plan_sp);
270   }
271   return m_unwind_plan_eh_frame_augmented_sp;
272 }
273 
274 std::shared_ptr<const UnwindPlan>
275 FuncUnwinders::GetDebugFrameAugmentedUnwindPlan(Target &target,
276                                                 Thread &thread) {
277   std::lock_guard<std::recursive_mutex> guard(m_mutex);
278   if (m_unwind_plan_debug_frame_augmented_sp.get() ||
279       m_tried_unwind_plan_debug_frame_augmented)
280     return m_unwind_plan_debug_frame_augmented_sp;
281 
282   // Only supported on x86 architectures where we get debug_frame from the
283   // compiler that describes the prologue instructions perfectly, and sometimes
284   // the epilogue instructions too.
285   if (target.GetArchitecture().GetCore() != ArchSpec::eCore_x86_32_i386 &&
286       target.GetArchitecture().GetCore() != ArchSpec::eCore_x86_64_x86_64 &&
287       target.GetArchitecture().GetCore() != ArchSpec::eCore_x86_64_x86_64h) {
288     m_tried_unwind_plan_debug_frame_augmented = true;
289     return m_unwind_plan_debug_frame_augmented_sp;
290   }
291 
292   m_tried_unwind_plan_debug_frame_augmented = true;
293 
294   std::shared_ptr<const UnwindPlan> debug_frame_plan =
295       GetDebugFrameUnwindPlan(target);
296   if (!debug_frame_plan)
297     return m_unwind_plan_debug_frame_augmented_sp;
298 
299   // Augment the debug_frame instructions with epilogue descriptions if
300   // necessary so the UnwindPlan can be used at any instruction in the
301   // function.
302 
303   UnwindAssemblySP assembly_profiler_sp(GetUnwindAssemblyProfiler(target));
304   // Only continuous functions are supported.
305   if (assembly_profiler_sp && m_ranges.size() == 1) {
306     auto plan_sp = std::make_shared<UnwindPlan>(*debug_frame_plan);
307 
308     if (assembly_profiler_sp->AugmentUnwindPlanFromCallSite(m_ranges[0], thread,
309                                                             *plan_sp))
310       m_unwind_plan_debug_frame_augmented_sp = std::move(plan_sp);
311   }
312   return m_unwind_plan_debug_frame_augmented_sp;
313 }
314 
315 std::shared_ptr<const UnwindPlan>
316 FuncUnwinders::GetAssemblyUnwindPlan(Target &target, Thread &thread) {
317   std::lock_guard<std::recursive_mutex> guard(m_mutex);
318   if (m_unwind_plan_assembly_sp.get() || m_tried_unwind_plan_assembly ||
319       !m_unwind_table.GetAllowAssemblyEmulationUnwindPlans()) {
320     return m_unwind_plan_assembly_sp;
321   }
322 
323   m_tried_unwind_plan_assembly = true;
324 
325   UnwindAssemblySP assembly_profiler_sp(GetUnwindAssemblyProfiler(target));
326   // Only continuous functions are supported.
327   if (assembly_profiler_sp && m_ranges.size() == 1) {
328     // Don't analyze more than 10 megabytes of instructions,
329     // if a function is legitimately larger than that, we'll
330     // miss the epilogue instructions, but guard against a
331     // bogusly large function and analyzing large amounts of
332     // non-instruction data.
333     AddressRange range = m_ranges[0];
334     const addr_t func_size =
335         std::min(range.GetByteSize(), (addr_t)1024 * 10 * 10);
336     range.SetByteSize(func_size);
337 
338     auto plan_sp = std::make_shared<UnwindPlan>(lldb::eRegisterKindGeneric);
339     if (assembly_profiler_sp->GetNonCallSiteUnwindPlanFromAssembly(
340             range, thread, *plan_sp))
341       m_unwind_plan_assembly_sp = std::move(plan_sp);
342   }
343   return m_unwind_plan_assembly_sp;
344 }
345 
346 // This method compares the pc unwind rule in the first row of two UnwindPlans.
347 // If they have the same way of getting the pc value (e.g. "CFA - 8" + "CFA is
348 // sp"), then it will return LazyBoolTrue.
349 LazyBool FuncUnwinders::CompareUnwindPlansForIdenticalInitialPCLocation(
350     Thread &thread, const std::shared_ptr<const UnwindPlan> &a,
351     const std::shared_ptr<const UnwindPlan> &b) {
352   if (!a || !b)
353     return eLazyBoolCalculate;
354 
355   const UnwindPlan::Row *a_first_row = a->GetRowAtIndex(0);
356   const UnwindPlan::Row *b_first_row = b->GetRowAtIndex(0);
357   if (!a_first_row || !b_first_row)
358     return eLazyBoolCalculate;
359 
360   RegisterNumber pc_reg(thread, eRegisterKindGeneric, LLDB_REGNUM_GENERIC_PC);
361   uint32_t a_pc_regnum = pc_reg.GetAsKind(a->GetRegisterKind());
362   uint32_t b_pc_regnum = pc_reg.GetAsKind(b->GetRegisterKind());
363 
364   UnwindPlan::Row::AbstractRegisterLocation a_pc_regloc;
365   UnwindPlan::Row::AbstractRegisterLocation b_pc_regloc;
366 
367   a_first_row->GetRegisterInfo(a_pc_regnum, a_pc_regloc);
368   b_first_row->GetRegisterInfo(b_pc_regnum, b_pc_regloc);
369 
370   if (a_first_row->GetCFAValue() != b_first_row->GetCFAValue())
371     return eLazyBoolNo;
372   if (a_pc_regloc != b_pc_regloc)
373     return eLazyBoolNo;
374 
375   return eLazyBoolYes;
376 }
377 
378 std::shared_ptr<const UnwindPlan>
379 FuncUnwinders::GetUnwindPlanAtNonCallSite(Target &target, Thread &thread) {
380   std::shared_ptr<const UnwindPlan> eh_frame_sp = GetEHFrameUnwindPlan(target);
381   if (!eh_frame_sp)
382     eh_frame_sp = GetDebugFrameUnwindPlan(target);
383   if (!eh_frame_sp)
384     eh_frame_sp = GetObjectFileUnwindPlan(target);
385   std::shared_ptr<const UnwindPlan> arch_default_at_entry_sp =
386       GetUnwindPlanArchitectureDefaultAtFunctionEntry(thread);
387   std::shared_ptr<const UnwindPlan> arch_default_sp =
388       GetUnwindPlanArchitectureDefault(thread);
389   std::shared_ptr<const UnwindPlan> assembly_sp =
390       GetAssemblyUnwindPlan(target, thread);
391 
392   // This point of this code is to detect when a function is using a non-
393   // standard ABI, and the eh_frame correctly describes that alternate ABI.
394   // This is addressing a specific situation on x86_64 linux systems where one
395   // function in a library pushes a value on the stack and jumps to another
396   // function.  So using an assembly instruction based unwind will not work
397   // when you're in the second function - the stack has been modified in a non-
398   // ABI way.  But we have eh_frame that correctly describes how to unwind from
399   // this location.  So we're looking to see if the initial pc register save
400   // location from the eh_frame is different from the assembly unwind, the arch
401   // default unwind, and the arch default at initial function entry.
402   //
403   // We may have eh_frame that describes the entire function -- or we may have
404   // eh_frame that only describes the unwind after the prologue has executed --
405   // so we need to check both the arch default (once the prologue has executed)
406   // and the arch default at initial function entry.  And we may be running on
407   // a target where we have only some of the assembly/arch default unwind plans
408   // available.
409 
410   if (CompareUnwindPlansForIdenticalInitialPCLocation(
411           thread, eh_frame_sp, arch_default_at_entry_sp) == eLazyBoolNo &&
412       CompareUnwindPlansForIdenticalInitialPCLocation(
413           thread, eh_frame_sp, arch_default_sp) == eLazyBoolNo &&
414       CompareUnwindPlansForIdenticalInitialPCLocation(
415           thread, assembly_sp, arch_default_sp) == eLazyBoolNo) {
416     return eh_frame_sp;
417   }
418 
419   if (std::shared_ptr<const UnwindPlan> plan_sp =
420           GetSymbolFileUnwindPlan(thread))
421     return plan_sp;
422   if (std::shared_ptr<const UnwindPlan> plan_sp =
423           GetDebugFrameAugmentedUnwindPlan(target, thread))
424     return plan_sp;
425   if (std::shared_ptr<const UnwindPlan> plan_sp =
426           GetEHFrameAugmentedUnwindPlan(target, thread))
427     return plan_sp;
428   if (std::shared_ptr<const UnwindPlan> plan_sp =
429           GetObjectFileAugmentedUnwindPlan(target, thread))
430     return plan_sp;
431 
432   return assembly_sp;
433 }
434 
435 std::shared_ptr<const UnwindPlan>
436 FuncUnwinders::GetUnwindPlanFastUnwind(Target &target, Thread &thread) {
437   std::lock_guard<std::recursive_mutex> guard(m_mutex);
438   if (m_unwind_plan_fast_sp.get() || m_tried_unwind_fast)
439     return m_unwind_plan_fast_sp;
440 
441   m_tried_unwind_fast = true;
442 
443   UnwindAssemblySP assembly_profiler_sp(GetUnwindAssemblyProfiler(target));
444   if (assembly_profiler_sp && m_ranges.size() == 1) {
445     auto plan_sp = std::make_shared<UnwindPlan>(lldb::eRegisterKindGeneric);
446     if (assembly_profiler_sp->GetFastUnwindPlan(m_ranges[0], thread, *plan_sp))
447       m_unwind_plan_fast_sp = std::move(plan_sp);
448   }
449   return m_unwind_plan_fast_sp;
450 }
451 
452 std::shared_ptr<const UnwindPlan>
453 FuncUnwinders::GetUnwindPlanArchitectureDefault(Thread &thread) {
454   std::lock_guard<std::recursive_mutex> guard(m_mutex);
455   if (m_unwind_plan_arch_default_sp.get() || m_tried_unwind_arch_default)
456     return m_unwind_plan_arch_default_sp;
457 
458   m_tried_unwind_arch_default = true;
459 
460   ProcessSP process_sp(thread.CalculateProcess());
461   if (process_sp) {
462     if (ABI *abi = process_sp->GetABI().get())
463       m_unwind_plan_arch_default_sp = abi->CreateDefaultUnwindPlan();
464   }
465 
466   return m_unwind_plan_arch_default_sp;
467 }
468 
469 std::shared_ptr<const UnwindPlan>
470 FuncUnwinders::GetUnwindPlanArchitectureDefaultAtFunctionEntry(Thread &thread) {
471   std::lock_guard<std::recursive_mutex> guard(m_mutex);
472   if (m_unwind_plan_arch_default_at_func_entry_sp.get() ||
473       m_tried_unwind_arch_default_at_func_entry)
474     return m_unwind_plan_arch_default_at_func_entry_sp;
475 
476   m_tried_unwind_arch_default_at_func_entry = true;
477 
478   Address current_pc;
479   ProcessSP process_sp(thread.CalculateProcess());
480   if (process_sp) {
481     if (ABI *abi = process_sp->GetABI().get()) {
482       m_unwind_plan_arch_default_at_func_entry_sp =
483           abi->CreateFunctionEntryUnwindPlan();
484     }
485   }
486 
487   return m_unwind_plan_arch_default_at_func_entry_sp;
488 }
489 
490 const Address &FuncUnwinders::GetFunctionStartAddress() const { return m_addr; }
491 
492 lldb::UnwindAssemblySP
493 FuncUnwinders::GetUnwindAssemblyProfiler(Target &target) {
494   UnwindAssemblySP assembly_profiler_sp;
495   if (ArchSpec arch = m_unwind_table.GetArchitecture()) {
496     arch.MergeFrom(target.GetArchitecture());
497     assembly_profiler_sp = UnwindAssembly::FindPlugin(arch);
498   }
499   return assembly_profiler_sp;
500 }
501