1 //=== aarch64.h - Generic JITLink aarch64 edge kinds, utilities -*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Generic utilities for graphs representing aarch64 objects.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #ifndef LLVM_EXECUTIONENGINE_JITLINK_AARCH64_H
14 #define LLVM_EXECUTIONENGINE_JITLINK_AARCH64_H
15
16 #include "TableManager.h"
17 #include "llvm/ExecutionEngine/JITLink/JITLink.h"
18 #include "llvm/ExecutionEngine/Orc/Shared/MemoryFlags.h"
19
20 namespace llvm {
21 namespace jitlink {
22 namespace aarch64 {
23
24 /// Represents aarch64 fixups and other aarch64-specific edge kinds.
25 enum EdgeKind_aarch64 : Edge::Kind {
26
27 /// A plain 64-bit pointer value relocation.
28 ///
29 /// Fixup expression:
30 /// Fixup <- Target + Addend : uint64
31 ///
32 Pointer64 = Edge::FirstRelocation,
33
34 /// A plain 32-bit pointer value relocation.
35 ///
36 /// Fixup expression:
37 /// Fixup <- Target + Addend : uint32
38 ///
39 /// Errors:
40 /// - The target must reside in the low 32-bits of the address space,
41 /// otherwise an out-of-range error will be returned.
42 ///
43 Pointer32,
44
45 /// A 64-bit delta.
46 ///
47 /// Delta from the fixup to the target.
48 ///
49 /// Fixup expression:
50 /// Fixup <- Target - Fixup + Addend : int64
51 ///
52 Delta64,
53
54 /// A 32-bit delta.
55 ///
56 /// Delta from the fixup to the target.
57 ///
58 /// Fixup expression:
59 /// Fixup <- Target - Fixup + Addend : int64
60 ///
61 /// Errors:
62 /// - The result of the fixup expression must fit into an int32, otherwise
63 /// an out-of-range error will be returned.
64 ///
65 Delta32,
66
67 /// A 64-bit negative delta.
68 ///
69 /// Delta from target back to the fixup.
70 ///
71 /// Fixup expression:
72 /// Fixup <- Fixup - Target + Addend : int64
73 ///
74 NegDelta64,
75
76 /// A 32-bit negative delta.
77 ///
78 /// Delta from the target back to the fixup.
79 ///
80 /// Fixup expression:
81 /// Fixup <- Fixup - Target + Addend : int32
82 ///
83 /// Errors:
84 /// - The result of the fixup expression must fit into an int32, otherwise
85 /// an out-of-range error will be returned.
86 NegDelta32,
87
88 /// A 26-bit PC-relative branch.
89 ///
90 /// Represents a PC-relative call or branch to a target within +/-128Mb. The
91 /// target must be 32-bit aligned.
92 ///
93 /// Fixup expression:
94 /// Fixup <- (Target - Fixup + Addend) >> 2 : int26
95 ///
96 /// Notes:
97 /// The '26' in the name refers to the number operand bits and follows the
98 /// naming convention used by the corresponding ELF and MachO relocations.
99 /// Since the low two bits must be zero (because of the 32-bit alignment of
100 /// the target) the operand is effectively a signed 28-bit number.
101 ///
102 ///
103 /// Errors:
104 /// - The result of the unshifted part of the fixup expression must be
105 /// 32-bit aligned otherwise an alignment error will be returned.
106 /// - The result of the fixup expression must fit into an int26 otherwise an
107 /// out-of-range error will be returned.
108 Branch26PCRel,
109
110 /// A 14-bit PC-relative test and branch.
111 ///
112 /// Represents a PC-relative test and branch to a target within +/-32Kb. The
113 /// target must be 32-bit aligned.
114 ///
115 /// Fixup expression:
116 /// Fixup <- (Target - Fixup + Addend) >> 2 : int14
117 ///
118 /// Notes:
119 /// The '14' in the name refers to the number operand bits and follows the
120 /// naming convention used by the corresponding ELF relocation.
121 /// Since the low two bits must be zero (because of the 32-bit alignment of
122 /// the target) the operand is effectively a signed 16-bit number.
123 ///
124 ///
125 /// Errors:
126 /// - The result of the unshifted part of the fixup expression must be
127 /// 32-bit aligned otherwise an alignment error will be returned.
128 /// - The result of the fixup expression must fit into an int14 otherwise an
129 /// out-of-range error will be returned.
130 TestAndBranch14PCRel,
131
132 /// A 19-bit PC-relative conditional branch.
133 ///
134 /// Represents a PC-relative conditional branch to a target within +/-1Mb. The
135 /// target must be 32-bit aligned.
136 ///
137 /// Fixup expression:
138 /// Fixup <- (Target - Fixup + Addend) >> 2 : int19
139 ///
140 /// Notes:
141 /// The '19' in the name refers to the number operand bits and follows the
142 /// naming convention used by the corresponding ELF relocation.
143 /// Since the low two bits must be zero (because of the 32-bit alignment of
144 /// the target) the operand is effectively a signed 21-bit number.
145 ///
146 ///
147 /// Errors:
148 /// - The result of the unshifted part of the fixup expression must be
149 /// 32-bit aligned otherwise an alignment error will be returned.
150 /// - The result of the fixup expression must fit into an int19 otherwise an
151 /// out-of-range error will be returned.
152 CondBranch19PCRel,
153
154 /// A 16-bit slice of the target address (which slice depends on the
155 /// instruction at the fixup location).
156 ///
157 /// Used to fix up MOVK/MOVN/MOVZ instructions.
158 ///
159 /// Fixup expression:
160 ///
161 /// Fixup <- (Target + Addend) >> Shift : uint16
162 ///
163 /// where Shift is encoded in the instruction at the fixup location.
164 ///
165 MoveWide16,
166
167 /// The signed 21-bit delta from the fixup to the target.
168 ///
169 /// Typically used to load a pointers at a PC-relative offset of +/- 1Mb. The
170 /// target must be 32-bit aligned.
171 ///
172 /// Fixup expression:
173 ///
174 /// Fixup <- (Target - Fixup + Addend) >> 2 : int19
175 ///
176 /// Notes:
177 /// The '19' in the name refers to the number operand bits and follows the
178 /// naming convention used by the corresponding ELF relocation.
179 /// Since the low two bits must be zero (because of the 32-bit alignment of
180 /// the target) the operand is effectively a signed 21-bit number.
181 ///
182 ///
183 /// Errors:
184 /// - The result of the unshifted part of the fixup expression must be
185 /// 32-bit aligned otherwise an alignment error will be returned.
186 /// - The result of the fixup expression must fit into an int19 or an
187 /// out-of-range error will be returned.
188 LDRLiteral19,
189
190 /// The signed 21-bit delta from the fixup to the target.
191 ///
192 /// Fixup expression:
193 ///
194 /// Fixup <- Target - Fixup + Addend : int21
195 ///
196 /// Notes:
197 /// For ADR fixups.
198 ///
199 /// Errors:
200 /// - The result of the fixup expression must fit into an int21 otherwise an
201 /// out-of-range error will be returned.
202 ADRLiteral21,
203
204 /// The signed 21-bit delta from the fixup page to the page containing the
205 /// target.
206 ///
207 /// Fixup expression:
208 ///
209 /// Fixup <- (((Target + Addend) & ~0xfff) - (Fixup & ~0xfff)) >> 12 : int21
210 ///
211 /// Notes:
212 /// For ADRP fixups.
213 ///
214 /// Errors:
215 /// - The result of the fixup expression must fit into an int21 otherwise an
216 /// out-of-range error will be returned.
217 Page21,
218
219 /// The 12-bit (potentially shifted) offset of the target within its page.
220 ///
221 /// Typically used to fix up LDR immediates.
222 ///
223 /// Fixup expression:
224 ///
225 /// Fixup <- ((Target + Addend) >> Shift) & 0xfff : uint12
226 ///
227 /// where Shift is encoded in the size field of the instruction.
228 ///
229 /// Errors:
230 /// - The result of the unshifted part of the fixup expression must be
231 /// aligned otherwise an alignment error will be returned.
232 /// - The result of the fixup expression must fit into a uint12 otherwise an
233 /// out-of-range error will be returned.
234 PageOffset12,
235
236 /// A GOT entry getter/constructor, transformed to Page21 pointing at the GOT
237 /// entry for the original target.
238 ///
239 /// Indicates that this edge should be transformed into a Page21 targeting
240 /// the GOT entry for the edge's current target, maintaining the same addend.
241 /// A GOT entry for the target should be created if one does not already
242 /// exist.
243 ///
244 /// Edges of this kind are usually handled by a GOT builder pass inserted by
245 /// default.
246 ///
247 /// Fixup expression:
248 /// NONE
249 ///
250 /// Errors:
251 /// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
252 /// phase will result in an assert/unreachable during the fixup phase.
253 ///
254 RequestGOTAndTransformToPage21,
255
256 /// A GOT entry getter/constructor, transformed to Pageoffset12 pointing at
257 /// the GOT entry for the original target.
258 ///
259 /// Indicates that this edge should be transformed into a PageOffset12
260 /// targeting the GOT entry for the edge's current target, maintaining the
261 /// same addend. A GOT entry for the target should be created if one does not
262 /// already exist.
263 ///
264 /// Edges of this kind are usually handled by a GOT builder pass inserted by
265 /// default.
266 ///
267 /// Fixup expression:
268 /// NONE
269 ///
270 /// Errors:
271 /// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
272 /// phase will result in an assert/unreachable during the fixup phase.
273 ///
274 RequestGOTAndTransformToPageOffset12,
275
276 /// A GOT entry getter/constructor, transformed to Delta32 pointing at the GOT
277 /// entry for the original target.
278 ///
279 /// Indicates that this edge should be transformed into a Delta32/ targeting
280 /// the GOT entry for the edge's current target, maintaining the same addend.
281 /// A GOT entry for the target should be created if one does not already
282 /// exist.
283 ///
284 /// Edges of this kind are usually handled by a GOT builder pass inserted by
285 /// default.
286 ///
287 /// Fixup expression:
288 /// NONE
289 ///
290 /// Errors:
291 /// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
292 /// phase will result in an assert/unreachable during the fixup phase.
293 ///
294 RequestGOTAndTransformToDelta32,
295
296 /// A TLVP entry getter/constructor, transformed to Page21.
297 ///
298 /// Indicates that this edge should be transformed into a Page21 targeting the
299 /// TLVP entry for the edge's current target. A TLVP entry for the target
300 /// should be created if one does not already exist.
301 ///
302 /// Fixup expression:
303 /// NONE
304 ///
305 /// Errors:
306 /// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
307 /// phase will result in an assert/unreachable during the fixup phase.
308 ///
309 RequestTLVPAndTransformToPage21,
310
311 /// A TLVP entry getter/constructor, transformed to PageOffset12.
312 ///
313 /// Indicates that this edge should be transformed into a PageOffset12
314 /// targeting the TLVP entry for the edge's current target. A TLVP entry for
315 /// the target should be created if one does not already exist.
316 ///
317 /// Fixup expression:
318 /// NONE
319 ///
320 /// Errors:
321 /// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
322 /// phase will result in an assert/unreachable during the fixup phase.
323 ///
324 RequestTLVPAndTransformToPageOffset12,
325
326 /// A TLSDesc entry getter/constructor, transformed to Page21.
327 ///
328 /// Indicates that this edge should be transformed into a Page21 targeting the
329 /// TLSDesc entry for the edge's current target. A TLSDesc entry for the
330 /// target should be created if one does not already exist.
331 ///
332 /// Fixup expression:
333 /// NONE
334 ///
335 /// Errors:
336 /// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
337 /// phase will result in an assert/unreachable during the fixup phase.
338 ///
339 RequestTLSDescEntryAndTransformToPage21,
340
341 /// A TLSDesc entry getter/constructor, transformed to PageOffset12.
342 ///
343 /// Indicates that this edge should be transformed into a PageOffset12
344 /// targeting the TLSDesc entry for the edge's current target. A TLSDesc entry
345 /// for the target should be created if one does not already exist.
346 ///
347 /// Fixup expression:
348 /// NONE
349 ///
350 /// Errors:
351 /// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
352 /// phase will result in an assert/unreachable during the fixup phase.
353 ///
354 RequestTLSDescEntryAndTransformToPageOffset12,
355 };
356
357 /// Returns a string name for the given aarch64 edge. For debugging purposes
358 /// only
359 const char *getEdgeKindName(Edge::Kind K);
360
361 // Returns whether the Instr is LD/ST (imm12)
isLoadStoreImm12(uint32_t Instr)362 inline bool isLoadStoreImm12(uint32_t Instr) {
363 constexpr uint32_t LoadStoreImm12Mask = 0x3b000000;
364 return (Instr & LoadStoreImm12Mask) == 0x39000000;
365 }
366
isTestAndBranchImm14(uint32_t Instr)367 inline bool isTestAndBranchImm14(uint32_t Instr) {
368 constexpr uint32_t TestAndBranchImm14Mask = 0x7e000000;
369 return (Instr & TestAndBranchImm14Mask) == 0x36000000;
370 }
371
isCondBranchImm19(uint32_t Instr)372 inline bool isCondBranchImm19(uint32_t Instr) {
373 constexpr uint32_t CondBranchImm19Mask = 0xfe000000;
374 return (Instr & CondBranchImm19Mask) == 0x54000000;
375 }
376
isCompAndBranchImm19(uint32_t Instr)377 inline bool isCompAndBranchImm19(uint32_t Instr) {
378 constexpr uint32_t CompAndBranchImm19Mask = 0x7e000000;
379 return (Instr & CompAndBranchImm19Mask) == 0x34000000;
380 }
381
isADR(uint32_t Instr)382 inline bool isADR(uint32_t Instr) {
383 constexpr uint32_t ADRMask = 0x9f000000;
384 return (Instr & ADRMask) == 0x10000000;
385 }
386
isLDRLiteral(uint32_t Instr)387 inline bool isLDRLiteral(uint32_t Instr) {
388 constexpr uint32_t LDRLitMask = 0x3b000000;
389 return (Instr & LDRLitMask) == 0x18000000;
390 }
391
392 // Returns the amount the address operand of LD/ST (imm12)
393 // should be shifted right by.
394 //
395 // The shift value varies by the data size of LD/ST instruction.
396 // For instance, LDH instructoin needs the address to be shifted
397 // right by 1.
getPageOffset12Shift(uint32_t Instr)398 inline unsigned getPageOffset12Shift(uint32_t Instr) {
399 constexpr uint32_t Vec128Mask = 0x04800000;
400
401 if (isLoadStoreImm12(Instr)) {
402 uint32_t ImplicitShift = Instr >> 30;
403 if (ImplicitShift == 0)
404 if ((Instr & Vec128Mask) == Vec128Mask)
405 ImplicitShift = 4;
406
407 return ImplicitShift;
408 }
409
410 return 0;
411 }
412
413 // Returns whether the Instr is MOVK/MOVZ (imm16) with a zero immediate field
isMoveWideImm16(uint32_t Instr)414 inline bool isMoveWideImm16(uint32_t Instr) {
415 constexpr uint32_t MoveWideImm16Mask = 0x5f9fffe0;
416 return (Instr & MoveWideImm16Mask) == 0x52800000;
417 }
418
419 // Returns the amount the address operand of MOVK/MOVZ (imm16)
420 // should be shifted right by.
421 //
422 // The shift value is specfied in the assembly as LSL #<shift>.
getMoveWide16Shift(uint32_t Instr)423 inline unsigned getMoveWide16Shift(uint32_t Instr) {
424 if (isMoveWideImm16(Instr)) {
425 uint32_t ImplicitShift = (Instr >> 21) & 0b11;
426 return ImplicitShift << 4;
427 }
428
429 return 0;
430 }
431
432 /// Apply fixup expression for edge to block content.
applyFixup(LinkGraph & G,Block & B,const Edge & E)433 inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E) {
434 using namespace support;
435
436 char *BlockWorkingMem = B.getAlreadyMutableContent().data();
437 char *FixupPtr = BlockWorkingMem + E.getOffset();
438 orc::ExecutorAddr FixupAddress = B.getAddress() + E.getOffset();
439
440 switch (E.getKind()) {
441 case Pointer64: {
442 uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
443 *(ulittle64_t *)FixupPtr = Value;
444 break;
445 }
446 case Pointer32: {
447 uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
448 if (Value > std::numeric_limits<uint32_t>::max())
449 return makeTargetOutOfRangeError(G, B, E);
450 *(ulittle32_t *)FixupPtr = Value;
451 break;
452 }
453 case Delta32:
454 case Delta64:
455 case NegDelta32:
456 case NegDelta64: {
457 int64_t Value;
458 if (E.getKind() == Delta32 || E.getKind() == Delta64)
459 Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
460 else
461 Value = FixupAddress - E.getTarget().getAddress() + E.getAddend();
462
463 if (E.getKind() == Delta32 || E.getKind() == NegDelta32) {
464 if (Value < std::numeric_limits<int32_t>::min() ||
465 Value > std::numeric_limits<int32_t>::max())
466 return makeTargetOutOfRangeError(G, B, E);
467 *(little32_t *)FixupPtr = Value;
468 } else
469 *(little64_t *)FixupPtr = Value;
470 break;
471 }
472 case Branch26PCRel: {
473 assert((FixupAddress.getValue() & 0x3) == 0 &&
474 "Branch-inst is not 32-bit aligned");
475
476 int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
477
478 if (static_cast<uint64_t>(Value) & 0x3)
479 return make_error<JITLinkError>("BranchPCRel26 target is not 32-bit "
480 "aligned");
481
482 if (Value < -(1 << 27) || Value > ((1 << 27) - 1))
483 return makeTargetOutOfRangeError(G, B, E);
484
485 uint32_t RawInstr = *(little32_t *)FixupPtr;
486 assert((RawInstr & 0x7fffffff) == 0x14000000 &&
487 "RawInstr isn't a B or BR immediate instruction");
488 uint32_t Imm = (static_cast<uint32_t>(Value) & ((1 << 28) - 1)) >> 2;
489 uint32_t FixedInstr = RawInstr | Imm;
490 *(little32_t *)FixupPtr = FixedInstr;
491 break;
492 }
493 case MoveWide16: {
494 uint64_t TargetOffset =
495 (E.getTarget().getAddress() + E.getAddend()).getValue();
496
497 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
498 assert(isMoveWideImm16(RawInstr) &&
499 "RawInstr isn't a MOVK/MOVZ instruction");
500
501 unsigned ImmShift = getMoveWide16Shift(RawInstr);
502 uint32_t Imm = (TargetOffset >> ImmShift) & 0xffff;
503 uint32_t FixedInstr = RawInstr | (Imm << 5);
504 *(ulittle32_t *)FixupPtr = FixedInstr;
505 break;
506 }
507 case LDRLiteral19: {
508 assert((FixupAddress.getValue() & 0x3) == 0 && "LDR is not 32-bit aligned");
509 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
510 assert(isLDRLiteral(RawInstr) && "RawInstr is not an LDR Literal");
511 int64_t Delta = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
512 if (Delta & 0x3)
513 return make_error<JITLinkError>("LDR literal target is not 32-bit "
514 "aligned");
515 if (!isInt<21>(Delta))
516 return makeTargetOutOfRangeError(G, B, E);
517 uint32_t EncodedImm = ((static_cast<uint32_t>(Delta) >> 2) & 0x7ffff) << 5;
518 uint32_t FixedInstr = RawInstr | EncodedImm;
519 *(ulittle32_t *)FixupPtr = FixedInstr;
520 break;
521 }
522 case ADRLiteral21: {
523 assert((FixupAddress.getValue() & 0x3) == 0 && "ADR is not 32-bit aligned");
524 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
525 assert(isADR(RawInstr) && "RawInstr is not an ADR");
526 int64_t Delta = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
527 if (!isInt<21>(Delta))
528 return makeTargetOutOfRangeError(G, B, E);
529 auto UDelta = static_cast<uint32_t>(Delta);
530 uint32_t EncodedImmHi = ((UDelta >> 2) & 0x7ffff) << 5;
531 uint32_t EncodedImmLo = (UDelta & 0x3) << 29;
532 uint32_t FixedInstr = RawInstr | EncodedImmHi | EncodedImmLo;
533 *(ulittle32_t *)FixupPtr = FixedInstr;
534 break;
535 }
536 case TestAndBranch14PCRel: {
537 assert((FixupAddress.getValue() & 0x3) == 0 &&
538 "Test and branch is not 32-bit aligned");
539 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
540 assert(isTestAndBranchImm14(RawInstr) &&
541 "RawInstr is not a test and branch");
542 int64_t Delta = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
543 if (Delta & 0x3)
544 return make_error<JITLinkError>(
545 "Test and branch literal target is not 32-bit aligned");
546 if (!isInt<16>(Delta))
547 return makeTargetOutOfRangeError(G, B, E);
548 uint32_t EncodedImm = ((static_cast<uint32_t>(Delta) >> 2) & 0x3fff) << 5;
549 uint32_t FixedInstr = RawInstr | EncodedImm;
550 *(ulittle32_t *)FixupPtr = FixedInstr;
551 break;
552 }
553 case CondBranch19PCRel: {
554 assert((FixupAddress.getValue() & 0x3) == 0 &&
555 "Conditional branch is not 32-bit aligned");
556 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
557 assert((isCondBranchImm19(RawInstr) || isCompAndBranchImm19(RawInstr)) &&
558 "RawInstr is not a conditional branch");
559 int64_t Delta = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
560 if (Delta & 0x3)
561 return make_error<JITLinkError>(
562 "Conditional branch literal target is not 32-bit "
563 "aligned");
564 if (!isInt<21>(Delta))
565 return makeTargetOutOfRangeError(G, B, E);
566 uint32_t EncodedImm = ((static_cast<uint32_t>(Delta) >> 2) & 0x7ffff) << 5;
567 uint32_t FixedInstr = RawInstr | EncodedImm;
568 *(ulittle32_t *)FixupPtr = FixedInstr;
569 break;
570 }
571 case Page21: {
572 uint64_t TargetPage =
573 (E.getTarget().getAddress().getValue() + E.getAddend()) &
574 ~static_cast<uint64_t>(4096 - 1);
575 uint64_t PCPage =
576 FixupAddress.getValue() & ~static_cast<uint64_t>(4096 - 1);
577
578 int64_t PageDelta = TargetPage - PCPage;
579 if (!isInt<33>(PageDelta))
580 return makeTargetOutOfRangeError(G, B, E);
581
582 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
583 assert((RawInstr & 0xffffffe0) == 0x90000000 &&
584 "RawInstr isn't an ADRP instruction");
585 uint32_t ImmLo = (static_cast<uint64_t>(PageDelta) >> 12) & 0x3;
586 uint32_t ImmHi = (static_cast<uint64_t>(PageDelta) >> 14) & 0x7ffff;
587 uint32_t FixedInstr = RawInstr | (ImmLo << 29) | (ImmHi << 5);
588 *(ulittle32_t *)FixupPtr = FixedInstr;
589 break;
590 }
591 case PageOffset12: {
592 uint64_t TargetOffset =
593 (E.getTarget().getAddress() + E.getAddend()).getValue() & 0xfff;
594
595 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
596 unsigned ImmShift = getPageOffset12Shift(RawInstr);
597
598 if (TargetOffset & ((1 << ImmShift) - 1))
599 return make_error<JITLinkError>("PAGEOFF12 target is not aligned");
600
601 uint32_t EncodedImm = (TargetOffset >> ImmShift) << 10;
602 uint32_t FixedInstr = RawInstr | EncodedImm;
603 *(ulittle32_t *)FixupPtr = FixedInstr;
604 break;
605 }
606 default:
607 return make_error<JITLinkError>(
608 "In graph " + G.getName() + ", section " + B.getSection().getName() +
609 " unsupported edge kind " + getEdgeKindName(E.getKind()));
610 }
611
612 return Error::success();
613 }
614
615 /// aarch64 pointer size.
616 constexpr uint64_t PointerSize = 8;
617
618 /// AArch64 null pointer content.
619 extern const char NullPointerContent[PointerSize];
620
621 /// AArch64 pointer jump stub content.
622 ///
623 /// Contains the instruction sequence for an indirect jump via an in-memory
624 /// pointer:
625 /// ADRP x16, ptr@page21
626 /// LDR x16, [x16, ptr@pageoff12]
627 /// BR x16
628 extern const char PointerJumpStubContent[12];
629
630 /// Creates a new pointer block in the given section and returns an
631 /// Anonymous symbol pointing to it.
632 ///
633 /// If InitialTarget is given then an Pointer64 relocation will be added to the
634 /// block pointing at InitialTarget.
635 ///
636 /// The pointer block will have the following default values:
637 /// alignment: 64-bit
638 /// alignment-offset: 0
639 /// address: highest allowable (~7U)
640 inline Symbol &createAnonymousPointer(LinkGraph &G, Section &PointerSection,
641 Symbol *InitialTarget = nullptr,
642 uint64_t InitialAddend = 0) {
643 auto &B = G.createContentBlock(PointerSection, NullPointerContent,
644 orc::ExecutorAddr(~uint64_t(7)), 8, 0);
645 if (InitialTarget)
646 B.addEdge(Pointer64, 0, *InitialTarget, InitialAddend);
647 return G.addAnonymousSymbol(B, 0, 8, false, false);
648 }
649
650 /// Create a jump stub block that jumps via the pointer at the given symbol.
651 ///
652 /// The stub block will have the following default values:
653 /// alignment: 32-bit
654 /// alignment-offset: 0
655 /// address: highest allowable: (~11U)
createPointerJumpStubBlock(LinkGraph & G,Section & StubSection,Symbol & PointerSymbol)656 inline Block &createPointerJumpStubBlock(LinkGraph &G, Section &StubSection,
657 Symbol &PointerSymbol) {
658 auto &B = G.createContentBlock(StubSection, PointerJumpStubContent,
659 orc::ExecutorAddr(~uint64_t(11)), 4, 0);
660 B.addEdge(Page21, 0, PointerSymbol, 0);
661 B.addEdge(PageOffset12, 4, PointerSymbol, 0);
662 return B;
663 }
664
665 /// Create a jump stub that jumps via the pointer at the given symbol and
666 /// an anonymous symbol pointing to it. Return the anonymous symbol.
667 ///
668 /// The stub block will be created by createPointerJumpStubBlock.
createAnonymousPointerJumpStub(LinkGraph & G,Section & StubSection,Symbol & PointerSymbol)669 inline Symbol &createAnonymousPointerJumpStub(LinkGraph &G,
670 Section &StubSection,
671 Symbol &PointerSymbol) {
672 return G.addAnonymousSymbol(
673 createPointerJumpStubBlock(G, StubSection, PointerSymbol), 0,
674 sizeof(PointerJumpStubContent), true, false);
675 }
676
677 /// Global Offset Table Builder.
678 class GOTTableManager : public TableManager<GOTTableManager> {
679 public:
getSectionName()680 static StringRef getSectionName() { return "$__GOT"; }
681
visitEdge(LinkGraph & G,Block * B,Edge & E)682 bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
683 Edge::Kind KindToSet = Edge::Invalid;
684 const char *BlockWorkingMem = B->getContent().data();
685 const char *FixupPtr = BlockWorkingMem + E.getOffset();
686
687 switch (E.getKind()) {
688 case aarch64::RequestGOTAndTransformToPage21:
689 case aarch64::RequestTLVPAndTransformToPage21: {
690 KindToSet = aarch64::Page21;
691 break;
692 }
693 case aarch64::RequestGOTAndTransformToPageOffset12:
694 case aarch64::RequestTLVPAndTransformToPageOffset12: {
695 KindToSet = aarch64::PageOffset12;
696 uint32_t RawInstr = *(const support::ulittle32_t *)FixupPtr;
697 (void)RawInstr;
698 assert(E.getAddend() == 0 &&
699 "GOTPageOffset12/TLVPageOffset12 with non-zero addend");
700 assert((RawInstr & 0xfffffc00) == 0xf9400000 &&
701 "RawInstr isn't a 64-bit LDR immediate");
702 break;
703 }
704 case aarch64::RequestGOTAndTransformToDelta32: {
705 KindToSet = aarch64::Delta32;
706 break;
707 }
708 default:
709 return false;
710 }
711 assert(KindToSet != Edge::Invalid &&
712 "Fell through switch, but no new kind to set");
713 DEBUG_WITH_TYPE("jitlink", {
714 dbgs() << " Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
715 << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
716 << formatv("{0:x}", E.getOffset()) << ")\n";
717 });
718 E.setKind(KindToSet);
719 E.setTarget(getEntryForTarget(G, E.getTarget()));
720 return true;
721 }
722
createEntry(LinkGraph & G,Symbol & Target)723 Symbol &createEntry(LinkGraph &G, Symbol &Target) {
724 return createAnonymousPointer(G, getGOTSection(G), &Target);
725 }
726
727 private:
getGOTSection(LinkGraph & G)728 Section &getGOTSection(LinkGraph &G) {
729 if (!GOTSection)
730 GOTSection = &G.createSection(getSectionName(),
731 orc::MemProt::Read | orc::MemProt::Exec);
732 return *GOTSection;
733 }
734
735 Section *GOTSection = nullptr;
736 };
737
738 /// Procedure Linkage Table Builder.
739 class PLTTableManager : public TableManager<PLTTableManager> {
740 public:
PLTTableManager(GOTTableManager & GOT)741 PLTTableManager(GOTTableManager &GOT) : GOT(GOT) {}
742
getSectionName()743 static StringRef getSectionName() { return "$__STUBS"; }
744
visitEdge(LinkGraph & G,Block * B,Edge & E)745 bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
746 if (E.getKind() == aarch64::Branch26PCRel && !E.getTarget().isDefined()) {
747 DEBUG_WITH_TYPE("jitlink", {
748 dbgs() << " Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
749 << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
750 << formatv("{0:x}", E.getOffset()) << ")\n";
751 });
752 E.setTarget(getEntryForTarget(G, E.getTarget()));
753 return true;
754 }
755 return false;
756 }
757
createEntry(LinkGraph & G,Symbol & Target)758 Symbol &createEntry(LinkGraph &G, Symbol &Target) {
759 return createAnonymousPointerJumpStub(G, getStubsSection(G),
760 GOT.getEntryForTarget(G, Target));
761 }
762
763 public:
getStubsSection(LinkGraph & G)764 Section &getStubsSection(LinkGraph &G) {
765 if (!StubsSection)
766 StubsSection = &G.createSection(getSectionName(),
767 orc::MemProt::Read | orc::MemProt::Exec);
768 return *StubsSection;
769 }
770
771 GOTTableManager &GOT;
772 Section *StubsSection = nullptr;
773 };
774
775 } // namespace aarch64
776 } // namespace jitlink
777 } // namespace llvm
778
779 #endif // LLVM_EXECUTIONENGINE_JITLINK_AARCH64_H
780