xref: /freebsd/contrib/llvm-project/lld/COFF/Chunks.cpp (revision d56accc7c3dcc897489b6a07834763a03b9f3d68)
1 //===- Chunks.cpp ---------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "Chunks.h"
10 #include "COFFLinkerContext.h"
11 #include "InputFiles.h"
12 #include "SymbolTable.h"
13 #include "Symbols.h"
14 #include "Writer.h"
15 #include "llvm/ADT/Twine.h"
16 #include "llvm/BinaryFormat/COFF.h"
17 #include "llvm/Object/COFF.h"
18 #include "llvm/Support/Debug.h"
19 #include "llvm/Support/Endian.h"
20 #include "llvm/Support/raw_ostream.h"
21 #include <algorithm>
22 
23 using namespace llvm;
24 using namespace llvm::object;
25 using namespace llvm::support::endian;
26 using namespace llvm::COFF;
27 using llvm::support::ulittle32_t;
28 
29 namespace lld {
30 namespace coff {
31 
32 SectionChunk::SectionChunk(ObjFile *f, const coff_section *h)
33     : Chunk(SectionKind), file(f), header(h), repl(this) {
34   // Initialize relocs.
35   if (file)
36     setRelocs(file->getCOFFObj()->getRelocations(header));
37 
38   // Initialize sectionName.
39   StringRef sectionName;
40   if (file) {
41     if (Expected<StringRef> e = file->getCOFFObj()->getSectionName(header))
42       sectionName = *e;
43   }
44   sectionNameData = sectionName.data();
45   sectionNameSize = sectionName.size();
46 
47   setAlignment(header->getAlignment());
48 
49   hasData = !(header->Characteristics & IMAGE_SCN_CNT_UNINITIALIZED_DATA);
50 
51   // If linker GC is disabled, every chunk starts out alive.  If linker GC is
52   // enabled, treat non-comdat sections as roots. Generally optimized object
53   // files will be built with -ffunction-sections or /Gy, so most things worth
54   // stripping will be in a comdat.
55   if (config)
56     live = !config->doGC || !isCOMDAT();
57   else
58     live = true;
59 }
60 
61 // SectionChunk is one of the most frequently allocated classes, so it is
62 // important to keep it as compact as possible. As of this writing, the number
63 // below is the size of this class on x64 platforms.
64 static_assert(sizeof(SectionChunk) <= 88, "SectionChunk grew unexpectedly");
65 
66 static void add16(uint8_t *p, int16_t v) { write16le(p, read16le(p) + v); }
67 static void add32(uint8_t *p, int32_t v) { write32le(p, read32le(p) + v); }
68 static void add64(uint8_t *p, int64_t v) { write64le(p, read64le(p) + v); }
69 static void or16(uint8_t *p, uint16_t v) { write16le(p, read16le(p) | v); }
70 static void or32(uint8_t *p, uint32_t v) { write32le(p, read32le(p) | v); }
71 
72 // Verify that given sections are appropriate targets for SECREL
73 // relocations. This check is relaxed because unfortunately debug
74 // sections have section-relative relocations against absolute symbols.
75 static bool checkSecRel(const SectionChunk *sec, OutputSection *os) {
76   if (os)
77     return true;
78   if (sec->isCodeView())
79     return false;
80   error("SECREL relocation cannot be applied to absolute symbols");
81   return false;
82 }
83 
84 static void applySecRel(const SectionChunk *sec, uint8_t *off,
85                         OutputSection *os, uint64_t s) {
86   if (!checkSecRel(sec, os))
87     return;
88   uint64_t secRel = s - os->getRVA();
89   if (secRel > UINT32_MAX) {
90     error("overflow in SECREL relocation in section: " + sec->getSectionName());
91     return;
92   }
93   add32(off, secRel);
94 }
95 
96 static void applySecIdx(uint8_t *off, OutputSection *os) {
97   // Absolute symbol doesn't have section index, but section index relocation
98   // against absolute symbol should be resolved to one plus the last output
99   // section index. This is required for compatibility with MSVC.
100   if (os)
101     add16(off, os->sectionIndex);
102   else
103     add16(off, DefinedAbsolute::numOutputSections + 1);
104 }
105 
106 void SectionChunk::applyRelX64(uint8_t *off, uint16_t type, OutputSection *os,
107                                uint64_t s, uint64_t p) const {
108   switch (type) {
109   case IMAGE_REL_AMD64_ADDR32:   add32(off, s + config->imageBase); break;
110   case IMAGE_REL_AMD64_ADDR64:   add64(off, s + config->imageBase); break;
111   case IMAGE_REL_AMD64_ADDR32NB: add32(off, s); break;
112   case IMAGE_REL_AMD64_REL32:    add32(off, s - p - 4); break;
113   case IMAGE_REL_AMD64_REL32_1:  add32(off, s - p - 5); break;
114   case IMAGE_REL_AMD64_REL32_2:  add32(off, s - p - 6); break;
115   case IMAGE_REL_AMD64_REL32_3:  add32(off, s - p - 7); break;
116   case IMAGE_REL_AMD64_REL32_4:  add32(off, s - p - 8); break;
117   case IMAGE_REL_AMD64_REL32_5:  add32(off, s - p - 9); break;
118   case IMAGE_REL_AMD64_SECTION:  applySecIdx(off, os); break;
119   case IMAGE_REL_AMD64_SECREL:   applySecRel(this, off, os, s); break;
120   default:
121     error("unsupported relocation type 0x" + Twine::utohexstr(type) + " in " +
122           toString(file));
123   }
124 }
125 
126 void SectionChunk::applyRelX86(uint8_t *off, uint16_t type, OutputSection *os,
127                                uint64_t s, uint64_t p) const {
128   switch (type) {
129   case IMAGE_REL_I386_ABSOLUTE: break;
130   case IMAGE_REL_I386_DIR32:    add32(off, s + config->imageBase); break;
131   case IMAGE_REL_I386_DIR32NB:  add32(off, s); break;
132   case IMAGE_REL_I386_REL32:    add32(off, s - p - 4); break;
133   case IMAGE_REL_I386_SECTION:  applySecIdx(off, os); break;
134   case IMAGE_REL_I386_SECREL:   applySecRel(this, off, os, s); break;
135   default:
136     error("unsupported relocation type 0x" + Twine::utohexstr(type) + " in " +
137           toString(file));
138   }
139 }
140 
141 static void applyMOV(uint8_t *off, uint16_t v) {
142   write16le(off, (read16le(off) & 0xfbf0) | ((v & 0x800) >> 1) | ((v >> 12) & 0xf));
143   write16le(off + 2, (read16le(off + 2) & 0x8f00) | ((v & 0x700) << 4) | (v & 0xff));
144 }
145 
146 static uint16_t readMOV(uint8_t *off, bool movt) {
147   uint16_t op1 = read16le(off);
148   if ((op1 & 0xfbf0) != (movt ? 0xf2c0 : 0xf240))
149     error("unexpected instruction in " + Twine(movt ? "MOVT" : "MOVW") +
150           " instruction in MOV32T relocation");
151   uint16_t op2 = read16le(off + 2);
152   if ((op2 & 0x8000) != 0)
153     error("unexpected instruction in " + Twine(movt ? "MOVT" : "MOVW") +
154           " instruction in MOV32T relocation");
155   return (op2 & 0x00ff) | ((op2 >> 4) & 0x0700) | ((op1 << 1) & 0x0800) |
156          ((op1 & 0x000f) << 12);
157 }
158 
159 void applyMOV32T(uint8_t *off, uint32_t v) {
160   uint16_t immW = readMOV(off, false);    // read MOVW operand
161   uint16_t immT = readMOV(off + 4, true); // read MOVT operand
162   uint32_t imm = immW | (immT << 16);
163   v += imm;                         // add the immediate offset
164   applyMOV(off, v);           // set MOVW operand
165   applyMOV(off + 4, v >> 16); // set MOVT operand
166 }
167 
168 static void applyBranch20T(uint8_t *off, int32_t v) {
169   if (!isInt<21>(v))
170     error("relocation out of range");
171   uint32_t s = v < 0 ? 1 : 0;
172   uint32_t j1 = (v >> 19) & 1;
173   uint32_t j2 = (v >> 18) & 1;
174   or16(off, (s << 10) | ((v >> 12) & 0x3f));
175   or16(off + 2, (j1 << 13) | (j2 << 11) | ((v >> 1) & 0x7ff));
176 }
177 
178 void applyBranch24T(uint8_t *off, int32_t v) {
179   if (!isInt<25>(v))
180     error("relocation out of range");
181   uint32_t s = v < 0 ? 1 : 0;
182   uint32_t j1 = ((~v >> 23) & 1) ^ s;
183   uint32_t j2 = ((~v >> 22) & 1) ^ s;
184   or16(off, (s << 10) | ((v >> 12) & 0x3ff));
185   // Clear out the J1 and J2 bits which may be set.
186   write16le(off + 2, (read16le(off + 2) & 0xd000) | (j1 << 13) | (j2 << 11) | ((v >> 1) & 0x7ff));
187 }
188 
189 void SectionChunk::applyRelARM(uint8_t *off, uint16_t type, OutputSection *os,
190                                uint64_t s, uint64_t p) const {
191   // Pointer to thumb code must have the LSB set.
192   uint64_t sx = s;
193   if (os && (os->header.Characteristics & IMAGE_SCN_MEM_EXECUTE))
194     sx |= 1;
195   switch (type) {
196   case IMAGE_REL_ARM_ADDR32:    add32(off, sx + config->imageBase); break;
197   case IMAGE_REL_ARM_ADDR32NB:  add32(off, sx); break;
198   case IMAGE_REL_ARM_MOV32T:    applyMOV32T(off, sx + config->imageBase); break;
199   case IMAGE_REL_ARM_BRANCH20T: applyBranch20T(off, sx - p - 4); break;
200   case IMAGE_REL_ARM_BRANCH24T: applyBranch24T(off, sx - p - 4); break;
201   case IMAGE_REL_ARM_BLX23T:    applyBranch24T(off, sx - p - 4); break;
202   case IMAGE_REL_ARM_SECTION:   applySecIdx(off, os); break;
203   case IMAGE_REL_ARM_SECREL:    applySecRel(this, off, os, s); break;
204   case IMAGE_REL_ARM_REL32:     add32(off, sx - p - 4); break;
205   default:
206     error("unsupported relocation type 0x" + Twine::utohexstr(type) + " in " +
207           toString(file));
208   }
209 }
210 
211 // Interpret the existing immediate value as a byte offset to the
212 // target symbol, then update the instruction with the immediate as
213 // the page offset from the current instruction to the target.
214 void applyArm64Addr(uint8_t *off, uint64_t s, uint64_t p, int shift) {
215   uint32_t orig = read32le(off);
216   int64_t imm =
217       SignExtend64<21>(((orig >> 29) & 0x3) | ((orig >> 3) & 0x1FFFFC));
218   s += imm;
219   imm = (s >> shift) - (p >> shift);
220   uint32_t immLo = (imm & 0x3) << 29;
221   uint32_t immHi = (imm & 0x1FFFFC) << 3;
222   uint64_t mask = (0x3 << 29) | (0x1FFFFC << 3);
223   write32le(off, (orig & ~mask) | immLo | immHi);
224 }
225 
226 // Update the immediate field in a AARCH64 ldr, str, and add instruction.
227 // Optionally limit the range of the written immediate by one or more bits
228 // (rangeLimit).
229 void applyArm64Imm(uint8_t *off, uint64_t imm, uint32_t rangeLimit) {
230   uint32_t orig = read32le(off);
231   imm += (orig >> 10) & 0xFFF;
232   orig &= ~(0xFFF << 10);
233   write32le(off, orig | ((imm & (0xFFF >> rangeLimit)) << 10));
234 }
235 
236 // Add the 12 bit page offset to the existing immediate.
237 // Ldr/str instructions store the opcode immediate scaled
238 // by the load/store size (giving a larger range for larger
239 // loads/stores). The immediate is always (both before and after
240 // fixing up the relocation) stored scaled similarly.
241 // Even if larger loads/stores have a larger range, limit the
242 // effective offset to 12 bit, since it is intended to be a
243 // page offset.
244 static void applyArm64Ldr(uint8_t *off, uint64_t imm) {
245   uint32_t orig = read32le(off);
246   uint32_t size = orig >> 30;
247   // 0x04000000 indicates SIMD/FP registers
248   // 0x00800000 indicates 128 bit
249   if ((orig & 0x4800000) == 0x4800000)
250     size += 4;
251   if ((imm & ((1 << size) - 1)) != 0)
252     error("misaligned ldr/str offset");
253   applyArm64Imm(off, imm >> size, size);
254 }
255 
256 static void applySecRelLow12A(const SectionChunk *sec, uint8_t *off,
257                               OutputSection *os, uint64_t s) {
258   if (checkSecRel(sec, os))
259     applyArm64Imm(off, (s - os->getRVA()) & 0xfff, 0);
260 }
261 
262 static void applySecRelHigh12A(const SectionChunk *sec, uint8_t *off,
263                                OutputSection *os, uint64_t s) {
264   if (!checkSecRel(sec, os))
265     return;
266   uint64_t secRel = (s - os->getRVA()) >> 12;
267   if (0xfff < secRel) {
268     error("overflow in SECREL_HIGH12A relocation in section: " +
269           sec->getSectionName());
270     return;
271   }
272   applyArm64Imm(off, secRel & 0xfff, 0);
273 }
274 
275 static void applySecRelLdr(const SectionChunk *sec, uint8_t *off,
276                            OutputSection *os, uint64_t s) {
277   if (checkSecRel(sec, os))
278     applyArm64Ldr(off, (s - os->getRVA()) & 0xfff);
279 }
280 
281 void applyArm64Branch26(uint8_t *off, int64_t v) {
282   if (!isInt<28>(v))
283     error("relocation out of range");
284   or32(off, (v & 0x0FFFFFFC) >> 2);
285 }
286 
287 static void applyArm64Branch19(uint8_t *off, int64_t v) {
288   if (!isInt<21>(v))
289     error("relocation out of range");
290   or32(off, (v & 0x001FFFFC) << 3);
291 }
292 
293 static void applyArm64Branch14(uint8_t *off, int64_t v) {
294   if (!isInt<16>(v))
295     error("relocation out of range");
296   or32(off, (v & 0x0000FFFC) << 3);
297 }
298 
299 void SectionChunk::applyRelARM64(uint8_t *off, uint16_t type, OutputSection *os,
300                                  uint64_t s, uint64_t p) const {
301   switch (type) {
302   case IMAGE_REL_ARM64_PAGEBASE_REL21: applyArm64Addr(off, s, p, 12); break;
303   case IMAGE_REL_ARM64_REL21:          applyArm64Addr(off, s, p, 0); break;
304   case IMAGE_REL_ARM64_PAGEOFFSET_12A: applyArm64Imm(off, s & 0xfff, 0); break;
305   case IMAGE_REL_ARM64_PAGEOFFSET_12L: applyArm64Ldr(off, s & 0xfff); break;
306   case IMAGE_REL_ARM64_BRANCH26:       applyArm64Branch26(off, s - p); break;
307   case IMAGE_REL_ARM64_BRANCH19:       applyArm64Branch19(off, s - p); break;
308   case IMAGE_REL_ARM64_BRANCH14:       applyArm64Branch14(off, s - p); break;
309   case IMAGE_REL_ARM64_ADDR32:         add32(off, s + config->imageBase); break;
310   case IMAGE_REL_ARM64_ADDR32NB:       add32(off, s); break;
311   case IMAGE_REL_ARM64_ADDR64:         add64(off, s + config->imageBase); break;
312   case IMAGE_REL_ARM64_SECREL:         applySecRel(this, off, os, s); break;
313   case IMAGE_REL_ARM64_SECREL_LOW12A:  applySecRelLow12A(this, off, os, s); break;
314   case IMAGE_REL_ARM64_SECREL_HIGH12A: applySecRelHigh12A(this, off, os, s); break;
315   case IMAGE_REL_ARM64_SECREL_LOW12L:  applySecRelLdr(this, off, os, s); break;
316   case IMAGE_REL_ARM64_SECTION:        applySecIdx(off, os); break;
317   case IMAGE_REL_ARM64_REL32:          add32(off, s - p - 4); break;
318   default:
319     error("unsupported relocation type 0x" + Twine::utohexstr(type) + " in " +
320           toString(file));
321   }
322 }
323 
324 static void maybeReportRelocationToDiscarded(const SectionChunk *fromChunk,
325                                              Defined *sym,
326                                              const coff_relocation &rel) {
327   // Don't report these errors when the relocation comes from a debug info
328   // section or in mingw mode. MinGW mode object files (built by GCC) can
329   // have leftover sections with relocations against discarded comdat
330   // sections. Such sections are left as is, with relocations untouched.
331   if (fromChunk->isCodeView() || fromChunk->isDWARF() || config->mingw)
332     return;
333 
334   // Get the name of the symbol. If it's null, it was discarded early, so we
335   // have to go back to the object file.
336   ObjFile *file = fromChunk->file;
337   StringRef name;
338   if (sym) {
339     name = sym->getName();
340   } else {
341     COFFSymbolRef coffSym =
342         check(file->getCOFFObj()->getSymbol(rel.SymbolTableIndex));
343     name = check(file->getCOFFObj()->getSymbolName(coffSym));
344   }
345 
346   std::vector<std::string> symbolLocations =
347       getSymbolLocations(file, rel.SymbolTableIndex);
348 
349   std::string out;
350   llvm::raw_string_ostream os(out);
351   os << "relocation against symbol in discarded section: " + name;
352   for (const std::string &s : symbolLocations)
353     os << s;
354   error(os.str());
355 }
356 
357 void SectionChunk::writeTo(uint8_t *buf) const {
358   if (!hasData)
359     return;
360   // Copy section contents from source object file to output file.
361   ArrayRef<uint8_t> a = getContents();
362   if (!a.empty())
363     memcpy(buf, a.data(), a.size());
364 
365   // Apply relocations.
366   size_t inputSize = getSize();
367   for (const coff_relocation &rel : getRelocs()) {
368     // Check for an invalid relocation offset. This check isn't perfect, because
369     // we don't have the relocation size, which is only known after checking the
370     // machine and relocation type. As a result, a relocation may overwrite the
371     // beginning of the following input section.
372     if (rel.VirtualAddress >= inputSize) {
373       error("relocation points beyond the end of its parent section");
374       continue;
375     }
376 
377     applyRelocation(buf + rel.VirtualAddress, rel);
378   }
379 }
380 
381 void SectionChunk::applyRelocation(uint8_t *off,
382                                    const coff_relocation &rel) const {
383   auto *sym = dyn_cast_or_null<Defined>(file->getSymbol(rel.SymbolTableIndex));
384 
385   // Get the output section of the symbol for this relocation.  The output
386   // section is needed to compute SECREL and SECTION relocations used in debug
387   // info.
388   Chunk *c = sym ? sym->getChunk() : nullptr;
389   OutputSection *os = c ? file->ctx.getOutputSection(c) : nullptr;
390 
391   // Skip the relocation if it refers to a discarded section, and diagnose it
392   // as an error if appropriate. If a symbol was discarded early, it may be
393   // null. If it was discarded late, the output section will be null, unless
394   // it was an absolute or synthetic symbol.
395   if (!sym ||
396       (!os && !isa<DefinedAbsolute>(sym) && !isa<DefinedSynthetic>(sym))) {
397     maybeReportRelocationToDiscarded(this, sym, rel);
398     return;
399   }
400 
401   uint64_t s = sym->getRVA();
402 
403   // Compute the RVA of the relocation for relative relocations.
404   uint64_t p = rva + rel.VirtualAddress;
405   switch (config->machine) {
406   case AMD64:
407     applyRelX64(off, rel.Type, os, s, p);
408     break;
409   case I386:
410     applyRelX86(off, rel.Type, os, s, p);
411     break;
412   case ARMNT:
413     applyRelARM(off, rel.Type, os, s, p);
414     break;
415   case ARM64:
416     applyRelARM64(off, rel.Type, os, s, p);
417     break;
418   default:
419     llvm_unreachable("unknown machine type");
420   }
421 }
422 
423 // Defend against unsorted relocations. This may be overly conservative.
424 void SectionChunk::sortRelocations() {
425   auto cmpByVa = [](const coff_relocation &l, const coff_relocation &r) {
426     return l.VirtualAddress < r.VirtualAddress;
427   };
428   if (llvm::is_sorted(getRelocs(), cmpByVa))
429     return;
430   warn("some relocations in " + file->getName() + " are not sorted");
431   MutableArrayRef<coff_relocation> newRelocs(
432       bAlloc().Allocate<coff_relocation>(relocsSize), relocsSize);
433   memcpy(newRelocs.data(), relocsData, relocsSize * sizeof(coff_relocation));
434   llvm::sort(newRelocs, cmpByVa);
435   setRelocs(newRelocs);
436 }
437 
438 // Similar to writeTo, but suitable for relocating a subsection of the overall
439 // section.
440 void SectionChunk::writeAndRelocateSubsection(ArrayRef<uint8_t> sec,
441                                               ArrayRef<uint8_t> subsec,
442                                               uint32_t &nextRelocIndex,
443                                               uint8_t *buf) const {
444   assert(!subsec.empty() && !sec.empty());
445   assert(sec.begin() <= subsec.begin() && subsec.end() <= sec.end() &&
446          "subsection is not part of this section");
447   size_t vaBegin = std::distance(sec.begin(), subsec.begin());
448   size_t vaEnd = std::distance(sec.begin(), subsec.end());
449   memcpy(buf, subsec.data(), subsec.size());
450   for (; nextRelocIndex < relocsSize; ++nextRelocIndex) {
451     const coff_relocation &rel = relocsData[nextRelocIndex];
452     // Only apply relocations that apply to this subsection. These checks
453     // assume that all subsections completely contain their relocations.
454     // Relocations must not straddle the beginning or end of a subsection.
455     if (rel.VirtualAddress < vaBegin)
456       continue;
457     if (rel.VirtualAddress + 1 >= vaEnd)
458       break;
459     applyRelocation(&buf[rel.VirtualAddress - vaBegin], rel);
460   }
461 }
462 
463 void SectionChunk::addAssociative(SectionChunk *child) {
464   // Insert the child section into the list of associated children. Keep the
465   // list ordered by section name so that ICF does not depend on section order.
466   assert(child->assocChildren == nullptr &&
467          "associated sections cannot have their own associated children");
468   SectionChunk *prev = this;
469   SectionChunk *next = assocChildren;
470   for (; next != nullptr; prev = next, next = next->assocChildren) {
471     if (next->getSectionName() <= child->getSectionName())
472       break;
473   }
474 
475   // Insert child between prev and next.
476   assert(prev->assocChildren == next);
477   prev->assocChildren = child;
478   child->assocChildren = next;
479 }
480 
481 static uint8_t getBaserelType(const coff_relocation &rel) {
482   switch (config->machine) {
483   case AMD64:
484     if (rel.Type == IMAGE_REL_AMD64_ADDR64)
485       return IMAGE_REL_BASED_DIR64;
486     if (rel.Type == IMAGE_REL_AMD64_ADDR32)
487       return IMAGE_REL_BASED_HIGHLOW;
488     return IMAGE_REL_BASED_ABSOLUTE;
489   case I386:
490     if (rel.Type == IMAGE_REL_I386_DIR32)
491       return IMAGE_REL_BASED_HIGHLOW;
492     return IMAGE_REL_BASED_ABSOLUTE;
493   case ARMNT:
494     if (rel.Type == IMAGE_REL_ARM_ADDR32)
495       return IMAGE_REL_BASED_HIGHLOW;
496     if (rel.Type == IMAGE_REL_ARM_MOV32T)
497       return IMAGE_REL_BASED_ARM_MOV32T;
498     return IMAGE_REL_BASED_ABSOLUTE;
499   case ARM64:
500     if (rel.Type == IMAGE_REL_ARM64_ADDR64)
501       return IMAGE_REL_BASED_DIR64;
502     return IMAGE_REL_BASED_ABSOLUTE;
503   default:
504     llvm_unreachable("unknown machine type");
505   }
506 }
507 
508 // Windows-specific.
509 // Collect all locations that contain absolute addresses, which need to be
510 // fixed by the loader if load-time relocation is needed.
511 // Only called when base relocation is enabled.
512 void SectionChunk::getBaserels(std::vector<Baserel> *res) {
513   for (const coff_relocation &rel : getRelocs()) {
514     uint8_t ty = getBaserelType(rel);
515     if (ty == IMAGE_REL_BASED_ABSOLUTE)
516       continue;
517     Symbol *target = file->getSymbol(rel.SymbolTableIndex);
518     if (!target || isa<DefinedAbsolute>(target))
519       continue;
520     res->emplace_back(rva + rel.VirtualAddress, ty);
521   }
522 }
523 
524 // MinGW specific.
525 // Check whether a static relocation of type Type can be deferred and
526 // handled at runtime as a pseudo relocation (for references to a module
527 // local variable, which turned out to actually need to be imported from
528 // another DLL) This returns the size the relocation is supposed to update,
529 // in bits, or 0 if the relocation cannot be handled as a runtime pseudo
530 // relocation.
531 static int getRuntimePseudoRelocSize(uint16_t type) {
532   // Relocations that either contain an absolute address, or a plain
533   // relative offset, since the runtime pseudo reloc implementation
534   // adds 8/16/32/64 bit values to a memory address.
535   //
536   // Given a pseudo relocation entry,
537   //
538   // typedef struct {
539   //   DWORD sym;
540   //   DWORD target;
541   //   DWORD flags;
542   // } runtime_pseudo_reloc_item_v2;
543   //
544   // the runtime relocation performs this adjustment:
545   //     *(base + .target) += *(base + .sym) - (base + .sym)
546   //
547   // This works for both absolute addresses (IMAGE_REL_*_ADDR32/64,
548   // IMAGE_REL_I386_DIR32, where the memory location initially contains
549   // the address of the IAT slot, and for relative addresses (IMAGE_REL*_REL32),
550   // where the memory location originally contains the relative offset to the
551   // IAT slot.
552   //
553   // This requires the target address to be writable, either directly out of
554   // the image, or temporarily changed at runtime with VirtualProtect.
555   // Since this only operates on direct address values, it doesn't work for
556   // ARM/ARM64 relocations, other than the plain ADDR32/ADDR64 relocations.
557   switch (config->machine) {
558   case AMD64:
559     switch (type) {
560     case IMAGE_REL_AMD64_ADDR64:
561       return 64;
562     case IMAGE_REL_AMD64_ADDR32:
563     case IMAGE_REL_AMD64_REL32:
564     case IMAGE_REL_AMD64_REL32_1:
565     case IMAGE_REL_AMD64_REL32_2:
566     case IMAGE_REL_AMD64_REL32_3:
567     case IMAGE_REL_AMD64_REL32_4:
568     case IMAGE_REL_AMD64_REL32_5:
569       return 32;
570     default:
571       return 0;
572     }
573   case I386:
574     switch (type) {
575     case IMAGE_REL_I386_DIR32:
576     case IMAGE_REL_I386_REL32:
577       return 32;
578     default:
579       return 0;
580     }
581   case ARMNT:
582     switch (type) {
583     case IMAGE_REL_ARM_ADDR32:
584       return 32;
585     default:
586       return 0;
587     }
588   case ARM64:
589     switch (type) {
590     case IMAGE_REL_ARM64_ADDR64:
591       return 64;
592     case IMAGE_REL_ARM64_ADDR32:
593       return 32;
594     default:
595       return 0;
596     }
597   default:
598     llvm_unreachable("unknown machine type");
599   }
600 }
601 
602 // MinGW specific.
603 // Append information to the provided vector about all relocations that
604 // need to be handled at runtime as runtime pseudo relocations (references
605 // to a module local variable, which turned out to actually need to be
606 // imported from another DLL).
607 void SectionChunk::getRuntimePseudoRelocs(
608     std::vector<RuntimePseudoReloc> &res) {
609   for (const coff_relocation &rel : getRelocs()) {
610     auto *target =
611         dyn_cast_or_null<Defined>(file->getSymbol(rel.SymbolTableIndex));
612     if (!target || !target->isRuntimePseudoReloc)
613       continue;
614     int sizeInBits = getRuntimePseudoRelocSize(rel.Type);
615     if (sizeInBits == 0) {
616       error("unable to automatically import from " + target->getName() +
617             " with relocation type " +
618             file->getCOFFObj()->getRelocationTypeName(rel.Type) + " in " +
619             toString(file));
620       continue;
621     }
622     // sizeInBits is used to initialize the Flags field; currently no
623     // other flags are defined.
624     res.emplace_back(
625         RuntimePseudoReloc(target, this, rel.VirtualAddress, sizeInBits));
626   }
627 }
628 
629 bool SectionChunk::isCOMDAT() const {
630   return header->Characteristics & IMAGE_SCN_LNK_COMDAT;
631 }
632 
633 void SectionChunk::printDiscardedMessage() const {
634   // Removed by dead-stripping. If it's removed by ICF, ICF already
635   // printed out the name, so don't repeat that here.
636   if (sym && this == repl)
637     log("Discarded " + sym->getName());
638 }
639 
640 StringRef SectionChunk::getDebugName() const {
641   if (sym)
642     return sym->getName();
643   return "";
644 }
645 
646 ArrayRef<uint8_t> SectionChunk::getContents() const {
647   ArrayRef<uint8_t> a;
648   cantFail(file->getCOFFObj()->getSectionContents(header, a));
649   return a;
650 }
651 
652 ArrayRef<uint8_t> SectionChunk::consumeDebugMagic() {
653   assert(isCodeView());
654   return consumeDebugMagic(getContents(), getSectionName());
655 }
656 
657 ArrayRef<uint8_t> SectionChunk::consumeDebugMagic(ArrayRef<uint8_t> data,
658                                                   StringRef sectionName) {
659   if (data.empty())
660     return {};
661 
662   // First 4 bytes are section magic.
663   if (data.size() < 4)
664     fatal("the section is too short: " + sectionName);
665 
666   if (!sectionName.startswith(".debug$"))
667     fatal("invalid section: " + sectionName);
668 
669   uint32_t magic = support::endian::read32le(data.data());
670   uint32_t expectedMagic = sectionName == ".debug$H"
671                                ? DEBUG_HASHES_SECTION_MAGIC
672                                : DEBUG_SECTION_MAGIC;
673   if (magic != expectedMagic) {
674     warn("ignoring section " + sectionName + " with unrecognized magic 0x" +
675          utohexstr(magic));
676     return {};
677   }
678   return data.slice(4);
679 }
680 
681 SectionChunk *SectionChunk::findByName(ArrayRef<SectionChunk *> sections,
682                                        StringRef name) {
683   for (SectionChunk *c : sections)
684     if (c->getSectionName() == name)
685       return c;
686   return nullptr;
687 }
688 
689 void SectionChunk::replace(SectionChunk *other) {
690   p2Align = std::max(p2Align, other->p2Align);
691   other->repl = repl;
692   other->live = false;
693 }
694 
695 uint32_t SectionChunk::getSectionNumber() const {
696   DataRefImpl r;
697   r.p = reinterpret_cast<uintptr_t>(header);
698   SectionRef s(r, file->getCOFFObj());
699   return s.getIndex() + 1;
700 }
701 
702 CommonChunk::CommonChunk(const COFFSymbolRef s) : sym(s) {
703   // The value of a common symbol is its size. Align all common symbols smaller
704   // than 32 bytes naturally, i.e. round the size up to the next power of two.
705   // This is what MSVC link.exe does.
706   setAlignment(std::min(32U, uint32_t(PowerOf2Ceil(sym.getValue()))));
707   hasData = false;
708 }
709 
710 uint32_t CommonChunk::getOutputCharacteristics() const {
711   return IMAGE_SCN_CNT_UNINITIALIZED_DATA | IMAGE_SCN_MEM_READ |
712          IMAGE_SCN_MEM_WRITE;
713 }
714 
715 void StringChunk::writeTo(uint8_t *buf) const {
716   memcpy(buf, str.data(), str.size());
717   buf[str.size()] = '\0';
718 }
719 
720 ImportThunkChunkX64::ImportThunkChunkX64(Defined *s) : ImportThunkChunk(s) {
721   // Intel Optimization Manual says that all branch targets
722   // should be 16-byte aligned. MSVC linker does this too.
723   setAlignment(16);
724 }
725 
726 void ImportThunkChunkX64::writeTo(uint8_t *buf) const {
727   memcpy(buf, importThunkX86, sizeof(importThunkX86));
728   // The first two bytes is a JMP instruction. Fill its operand.
729   write32le(buf + 2, impSymbol->getRVA() - rva - getSize());
730 }
731 
732 void ImportThunkChunkX86::getBaserels(std::vector<Baserel> *res) {
733   res->emplace_back(getRVA() + 2);
734 }
735 
736 void ImportThunkChunkX86::writeTo(uint8_t *buf) const {
737   memcpy(buf, importThunkX86, sizeof(importThunkX86));
738   // The first two bytes is a JMP instruction. Fill its operand.
739   write32le(buf + 2,
740             impSymbol->getRVA() + config->imageBase);
741 }
742 
743 void ImportThunkChunkARM::getBaserels(std::vector<Baserel> *res) {
744   res->emplace_back(getRVA(), IMAGE_REL_BASED_ARM_MOV32T);
745 }
746 
747 void ImportThunkChunkARM::writeTo(uint8_t *buf) const {
748   memcpy(buf, importThunkARM, sizeof(importThunkARM));
749   // Fix mov.w and mov.t operands.
750   applyMOV32T(buf, impSymbol->getRVA() + config->imageBase);
751 }
752 
753 void ImportThunkChunkARM64::writeTo(uint8_t *buf) const {
754   int64_t off = impSymbol->getRVA() & 0xfff;
755   memcpy(buf, importThunkARM64, sizeof(importThunkARM64));
756   applyArm64Addr(buf, impSymbol->getRVA(), rva, 12);
757   applyArm64Ldr(buf + 4, off);
758 }
759 
760 // A Thumb2, PIC, non-interworking range extension thunk.
761 const uint8_t armThunk[] = {
762     0x40, 0xf2, 0x00, 0x0c, // P:  movw ip,:lower16:S - (P + (L1-P) + 4)
763     0xc0, 0xf2, 0x00, 0x0c, //     movt ip,:upper16:S - (P + (L1-P) + 4)
764     0xe7, 0x44,             // L1: add  pc, ip
765 };
766 
767 size_t RangeExtensionThunkARM::getSize() const {
768   assert(config->machine == ARMNT);
769   return sizeof(armThunk);
770 }
771 
772 void RangeExtensionThunkARM::writeTo(uint8_t *buf) const {
773   assert(config->machine == ARMNT);
774   uint64_t offset = target->getRVA() - rva - 12;
775   memcpy(buf, armThunk, sizeof(armThunk));
776   applyMOV32T(buf, uint32_t(offset));
777 }
778 
779 // A position independent ARM64 adrp+add thunk, with a maximum range of
780 // +/- 4 GB, which is enough for any PE-COFF.
781 const uint8_t arm64Thunk[] = {
782     0x10, 0x00, 0x00, 0x90, // adrp x16, Dest
783     0x10, 0x02, 0x00, 0x91, // add  x16, x16, :lo12:Dest
784     0x00, 0x02, 0x1f, 0xd6, // br   x16
785 };
786 
787 size_t RangeExtensionThunkARM64::getSize() const {
788   assert(config->machine == ARM64);
789   return sizeof(arm64Thunk);
790 }
791 
792 void RangeExtensionThunkARM64::writeTo(uint8_t *buf) const {
793   assert(config->machine == ARM64);
794   memcpy(buf, arm64Thunk, sizeof(arm64Thunk));
795   applyArm64Addr(buf + 0, target->getRVA(), rva, 12);
796   applyArm64Imm(buf + 4, target->getRVA() & 0xfff, 0);
797 }
798 
799 void LocalImportChunk::getBaserels(std::vector<Baserel> *res) {
800   res->emplace_back(getRVA());
801 }
802 
803 size_t LocalImportChunk::getSize() const { return config->wordsize; }
804 
805 void LocalImportChunk::writeTo(uint8_t *buf) const {
806   if (config->is64()) {
807     write64le(buf, sym->getRVA() + config->imageBase);
808   } else {
809     write32le(buf, sym->getRVA() + config->imageBase);
810   }
811 }
812 
813 void RVATableChunk::writeTo(uint8_t *buf) const {
814   ulittle32_t *begin = reinterpret_cast<ulittle32_t *>(buf);
815   size_t cnt = 0;
816   for (const ChunkAndOffset &co : syms)
817     begin[cnt++] = co.inputChunk->getRVA() + co.offset;
818   std::sort(begin, begin + cnt);
819   assert(std::unique(begin, begin + cnt) == begin + cnt &&
820          "RVA tables should be de-duplicated");
821 }
822 
823 void RVAFlagTableChunk::writeTo(uint8_t *buf) const {
824   struct RVAFlag {
825     ulittle32_t rva;
826     uint8_t flag;
827   };
828   auto flags =
829       makeMutableArrayRef(reinterpret_cast<RVAFlag *>(buf), syms.size());
830   for (auto t : zip(syms, flags)) {
831     const auto &sym = std::get<0>(t);
832     auto &flag = std::get<1>(t);
833     flag.rva = sym.inputChunk->getRVA() + sym.offset;
834     flag.flag = 0;
835   }
836   llvm::sort(flags,
837              [](const RVAFlag &a, const RVAFlag &b) { return a.rva < b.rva; });
838   assert(llvm::unique(flags, [](const RVAFlag &a,
839                                 const RVAFlag &b) { return a.rva == b.rva; }) ==
840              flags.end() &&
841          "RVA tables should be de-duplicated");
842 }
843 
844 // MinGW specific, for the "automatic import of variables from DLLs" feature.
845 size_t PseudoRelocTableChunk::getSize() const {
846   if (relocs.empty())
847     return 0;
848   return 12 + 12 * relocs.size();
849 }
850 
851 // MinGW specific.
852 void PseudoRelocTableChunk::writeTo(uint8_t *buf) const {
853   if (relocs.empty())
854     return;
855 
856   ulittle32_t *table = reinterpret_cast<ulittle32_t *>(buf);
857   // This is the list header, to signal the runtime pseudo relocation v2
858   // format.
859   table[0] = 0;
860   table[1] = 0;
861   table[2] = 1;
862 
863   size_t idx = 3;
864   for (const RuntimePseudoReloc &rpr : relocs) {
865     table[idx + 0] = rpr.sym->getRVA();
866     table[idx + 1] = rpr.target->getRVA() + rpr.targetOffset;
867     table[idx + 2] = rpr.flags;
868     idx += 3;
869   }
870 }
871 
872 // Windows-specific. This class represents a block in .reloc section.
873 // The format is described here.
874 //
875 // On Windows, each DLL is linked against a fixed base address and
876 // usually loaded to that address. However, if there's already another
877 // DLL that overlaps, the loader has to relocate it. To do that, DLLs
878 // contain .reloc sections which contain offsets that need to be fixed
879 // up at runtime. If the loader finds that a DLL cannot be loaded to its
880 // desired base address, it loads it to somewhere else, and add <actual
881 // base address> - <desired base address> to each offset that is
882 // specified by the .reloc section. In ELF terms, .reloc sections
883 // contain relative relocations in REL format (as opposed to RELA.)
884 //
885 // This already significantly reduces the size of relocations compared
886 // to ELF .rel.dyn, but Windows does more to reduce it (probably because
887 // it was invented for PCs in the late '80s or early '90s.)  Offsets in
888 // .reloc are grouped by page where the page size is 12 bits, and
889 // offsets sharing the same page address are stored consecutively to
890 // represent them with less space. This is very similar to the page
891 // table which is grouped by (multiple stages of) pages.
892 //
893 // For example, let's say we have 0x00030, 0x00500, 0x00700, 0x00A00,
894 // 0x20004, and 0x20008 in a .reloc section for x64. The uppermost 4
895 // bits have a type IMAGE_REL_BASED_DIR64 or 0xA. In the section, they
896 // are represented like this:
897 //
898 //   0x00000  -- page address (4 bytes)
899 //   16       -- size of this block (4 bytes)
900 //     0xA030 -- entries (2 bytes each)
901 //     0xA500
902 //     0xA700
903 //     0xAA00
904 //   0x20000  -- page address (4 bytes)
905 //   12       -- size of this block (4 bytes)
906 //     0xA004 -- entries (2 bytes each)
907 //     0xA008
908 //
909 // Usually we have a lot of relocations for each page, so the number of
910 // bytes for one .reloc entry is close to 2 bytes on average.
911 BaserelChunk::BaserelChunk(uint32_t page, Baserel *begin, Baserel *end) {
912   // Block header consists of 4 byte page RVA and 4 byte block size.
913   // Each entry is 2 byte. Last entry may be padding.
914   data.resize(alignTo((end - begin) * 2 + 8, 4));
915   uint8_t *p = data.data();
916   write32le(p, page);
917   write32le(p + 4, data.size());
918   p += 8;
919   for (Baserel *i = begin; i != end; ++i) {
920     write16le(p, (i->type << 12) | (i->rva - page));
921     p += 2;
922   }
923 }
924 
925 void BaserelChunk::writeTo(uint8_t *buf) const {
926   memcpy(buf, data.data(), data.size());
927 }
928 
929 uint8_t Baserel::getDefaultType() {
930   switch (config->machine) {
931   case AMD64:
932   case ARM64:
933     return IMAGE_REL_BASED_DIR64;
934   case I386:
935   case ARMNT:
936     return IMAGE_REL_BASED_HIGHLOW;
937   default:
938     llvm_unreachable("unknown machine type");
939   }
940 }
941 
942 MergeChunk::MergeChunk(uint32_t alignment)
943     : builder(StringTableBuilder::RAW, alignment) {
944   setAlignment(alignment);
945 }
946 
947 void MergeChunk::addSection(COFFLinkerContext &ctx, SectionChunk *c) {
948   assert(isPowerOf2_32(c->getAlignment()));
949   uint8_t p2Align = llvm::Log2_32(c->getAlignment());
950   assert(p2Align < array_lengthof(ctx.mergeChunkInstances));
951   auto *&mc = ctx.mergeChunkInstances[p2Align];
952   if (!mc)
953     mc = make<MergeChunk>(c->getAlignment());
954   mc->sections.push_back(c);
955 }
956 
957 void MergeChunk::finalizeContents() {
958   assert(!finalized && "should only finalize once");
959   for (SectionChunk *c : sections)
960     if (c->live)
961       builder.add(toStringRef(c->getContents()));
962   builder.finalize();
963   finalized = true;
964 }
965 
966 void MergeChunk::assignSubsectionRVAs() {
967   for (SectionChunk *c : sections) {
968     if (!c->live)
969       continue;
970     size_t off = builder.getOffset(toStringRef(c->getContents()));
971     c->setRVA(rva + off);
972   }
973 }
974 
975 uint32_t MergeChunk::getOutputCharacteristics() const {
976   return IMAGE_SCN_MEM_READ | IMAGE_SCN_CNT_INITIALIZED_DATA;
977 }
978 
979 size_t MergeChunk::getSize() const {
980   return builder.getSize();
981 }
982 
983 void MergeChunk::writeTo(uint8_t *buf) const {
984   builder.write(buf);
985 }
986 
987 // MinGW specific.
988 size_t AbsolutePointerChunk::getSize() const { return config->wordsize; }
989 
990 void AbsolutePointerChunk::writeTo(uint8_t *buf) const {
991   if (config->is64()) {
992     write64le(buf, value);
993   } else {
994     write32le(buf, value);
995   }
996 }
997 
998 } // namespace coff
999 } // namespace lld
1000