xref: /freebsd/contrib/llvm-project/lld/COFF/Chunks.cpp (revision ec0ea6efa1ad229d75c394c1a9b9cac33af2b1d3)
1 //===- Chunks.cpp ---------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "Chunks.h"
10 #include "InputFiles.h"
11 #include "Symbols.h"
12 #include "Writer.h"
13 #include "SymbolTable.h"
14 #include "lld/Common/ErrorHandler.h"
15 #include "llvm/ADT/Twine.h"
16 #include "llvm/BinaryFormat/COFF.h"
17 #include "llvm/Object/COFF.h"
18 #include "llvm/Support/Debug.h"
19 #include "llvm/Support/Endian.h"
20 #include "llvm/Support/raw_ostream.h"
21 #include <algorithm>
22 
23 using namespace llvm;
24 using namespace llvm::object;
25 using namespace llvm::support::endian;
26 using namespace llvm::COFF;
27 using llvm::support::ulittle32_t;
28 
29 namespace lld {
30 namespace coff {
31 
32 SectionChunk::SectionChunk(ObjFile *f, const coff_section *h)
33     : Chunk(SectionKind), file(f), header(h), repl(this) {
34   // Initialize relocs.
35   if (file)
36     setRelocs(file->getCOFFObj()->getRelocations(header));
37 
38   // Initialize sectionName.
39   StringRef sectionName;
40   if (file) {
41     if (Expected<StringRef> e = file->getCOFFObj()->getSectionName(header))
42       sectionName = *e;
43   }
44   sectionNameData = sectionName.data();
45   sectionNameSize = sectionName.size();
46 
47   setAlignment(header->getAlignment());
48 
49   hasData = !(header->Characteristics & IMAGE_SCN_CNT_UNINITIALIZED_DATA);
50 
51   // If linker GC is disabled, every chunk starts out alive.  If linker GC is
52   // enabled, treat non-comdat sections as roots. Generally optimized object
53   // files will be built with -ffunction-sections or /Gy, so most things worth
54   // stripping will be in a comdat.
55   if (config)
56     live = !config->doGC || !isCOMDAT();
57   else
58     live = true;
59 }
60 
61 // SectionChunk is one of the most frequently allocated classes, so it is
62 // important to keep it as compact as possible. As of this writing, the number
63 // below is the size of this class on x64 platforms.
64 static_assert(sizeof(SectionChunk) <= 88, "SectionChunk grew unexpectedly");
65 
66 static void add16(uint8_t *p, int16_t v) { write16le(p, read16le(p) + v); }
67 static void add32(uint8_t *p, int32_t v) { write32le(p, read32le(p) + v); }
68 static void add64(uint8_t *p, int64_t v) { write64le(p, read64le(p) + v); }
69 static void or16(uint8_t *p, uint16_t v) { write16le(p, read16le(p) | v); }
70 static void or32(uint8_t *p, uint32_t v) { write32le(p, read32le(p) | v); }
71 
72 // Verify that given sections are appropriate targets for SECREL
73 // relocations. This check is relaxed because unfortunately debug
74 // sections have section-relative relocations against absolute symbols.
75 static bool checkSecRel(const SectionChunk *sec, OutputSection *os) {
76   if (os)
77     return true;
78   if (sec->isCodeView())
79     return false;
80   error("SECREL relocation cannot be applied to absolute symbols");
81   return false;
82 }
83 
84 static void applySecRel(const SectionChunk *sec, uint8_t *off,
85                         OutputSection *os, uint64_t s) {
86   if (!checkSecRel(sec, os))
87     return;
88   uint64_t secRel = s - os->getRVA();
89   if (secRel > UINT32_MAX) {
90     error("overflow in SECREL relocation in section: " + sec->getSectionName());
91     return;
92   }
93   add32(off, secRel);
94 }
95 
96 static void applySecIdx(uint8_t *off, OutputSection *os) {
97   // Absolute symbol doesn't have section index, but section index relocation
98   // against absolute symbol should be resolved to one plus the last output
99   // section index. This is required for compatibility with MSVC.
100   if (os)
101     add16(off, os->sectionIndex);
102   else
103     add16(off, DefinedAbsolute::numOutputSections + 1);
104 }
105 
106 void SectionChunk::applyRelX64(uint8_t *off, uint16_t type, OutputSection *os,
107                                uint64_t s, uint64_t p) const {
108   switch (type) {
109   case IMAGE_REL_AMD64_ADDR32:   add32(off, s + config->imageBase); break;
110   case IMAGE_REL_AMD64_ADDR64:   add64(off, s + config->imageBase); break;
111   case IMAGE_REL_AMD64_ADDR32NB: add32(off, s); break;
112   case IMAGE_REL_AMD64_REL32:    add32(off, s - p - 4); break;
113   case IMAGE_REL_AMD64_REL32_1:  add32(off, s - p - 5); break;
114   case IMAGE_REL_AMD64_REL32_2:  add32(off, s - p - 6); break;
115   case IMAGE_REL_AMD64_REL32_3:  add32(off, s - p - 7); break;
116   case IMAGE_REL_AMD64_REL32_4:  add32(off, s - p - 8); break;
117   case IMAGE_REL_AMD64_REL32_5:  add32(off, s - p - 9); break;
118   case IMAGE_REL_AMD64_SECTION:  applySecIdx(off, os); break;
119   case IMAGE_REL_AMD64_SECREL:   applySecRel(this, off, os, s); break;
120   default:
121     error("unsupported relocation type 0x" + Twine::utohexstr(type) + " in " +
122           toString(file));
123   }
124 }
125 
126 void SectionChunk::applyRelX86(uint8_t *off, uint16_t type, OutputSection *os,
127                                uint64_t s, uint64_t p) const {
128   switch (type) {
129   case IMAGE_REL_I386_ABSOLUTE: break;
130   case IMAGE_REL_I386_DIR32:    add32(off, s + config->imageBase); break;
131   case IMAGE_REL_I386_DIR32NB:  add32(off, s); break;
132   case IMAGE_REL_I386_REL32:    add32(off, s - p - 4); break;
133   case IMAGE_REL_I386_SECTION:  applySecIdx(off, os); break;
134   case IMAGE_REL_I386_SECREL:   applySecRel(this, off, os, s); break;
135   default:
136     error("unsupported relocation type 0x" + Twine::utohexstr(type) + " in " +
137           toString(file));
138   }
139 }
140 
141 static void applyMOV(uint8_t *off, uint16_t v) {
142   write16le(off, (read16le(off) & 0xfbf0) | ((v & 0x800) >> 1) | ((v >> 12) & 0xf));
143   write16le(off + 2, (read16le(off + 2) & 0x8f00) | ((v & 0x700) << 4) | (v & 0xff));
144 }
145 
146 static uint16_t readMOV(uint8_t *off, bool movt) {
147   uint16_t op1 = read16le(off);
148   if ((op1 & 0xfbf0) != (movt ? 0xf2c0 : 0xf240))
149     error("unexpected instruction in " + Twine(movt ? "MOVT" : "MOVW") +
150           " instruction in MOV32T relocation");
151   uint16_t op2 = read16le(off + 2);
152   if ((op2 & 0x8000) != 0)
153     error("unexpected instruction in " + Twine(movt ? "MOVT" : "MOVW") +
154           " instruction in MOV32T relocation");
155   return (op2 & 0x00ff) | ((op2 >> 4) & 0x0700) | ((op1 << 1) & 0x0800) |
156          ((op1 & 0x000f) << 12);
157 }
158 
159 void applyMOV32T(uint8_t *off, uint32_t v) {
160   uint16_t immW = readMOV(off, false);    // read MOVW operand
161   uint16_t immT = readMOV(off + 4, true); // read MOVT operand
162   uint32_t imm = immW | (immT << 16);
163   v += imm;                         // add the immediate offset
164   applyMOV(off, v);           // set MOVW operand
165   applyMOV(off + 4, v >> 16); // set MOVT operand
166 }
167 
168 static void applyBranch20T(uint8_t *off, int32_t v) {
169   if (!isInt<21>(v))
170     error("relocation out of range");
171   uint32_t s = v < 0 ? 1 : 0;
172   uint32_t j1 = (v >> 19) & 1;
173   uint32_t j2 = (v >> 18) & 1;
174   or16(off, (s << 10) | ((v >> 12) & 0x3f));
175   or16(off + 2, (j1 << 13) | (j2 << 11) | ((v >> 1) & 0x7ff));
176 }
177 
178 void applyBranch24T(uint8_t *off, int32_t v) {
179   if (!isInt<25>(v))
180     error("relocation out of range");
181   uint32_t s = v < 0 ? 1 : 0;
182   uint32_t j1 = ((~v >> 23) & 1) ^ s;
183   uint32_t j2 = ((~v >> 22) & 1) ^ s;
184   or16(off, (s << 10) | ((v >> 12) & 0x3ff));
185   // Clear out the J1 and J2 bits which may be set.
186   write16le(off + 2, (read16le(off + 2) & 0xd000) | (j1 << 13) | (j2 << 11) | ((v >> 1) & 0x7ff));
187 }
188 
189 void SectionChunk::applyRelARM(uint8_t *off, uint16_t type, OutputSection *os,
190                                uint64_t s, uint64_t p) const {
191   // Pointer to thumb code must have the LSB set.
192   uint64_t sx = s;
193   if (os && (os->header.Characteristics & IMAGE_SCN_MEM_EXECUTE))
194     sx |= 1;
195   switch (type) {
196   case IMAGE_REL_ARM_ADDR32:    add32(off, sx + config->imageBase); break;
197   case IMAGE_REL_ARM_ADDR32NB:  add32(off, sx); break;
198   case IMAGE_REL_ARM_MOV32T:    applyMOV32T(off, sx + config->imageBase); break;
199   case IMAGE_REL_ARM_BRANCH20T: applyBranch20T(off, sx - p - 4); break;
200   case IMAGE_REL_ARM_BRANCH24T: applyBranch24T(off, sx - p - 4); break;
201   case IMAGE_REL_ARM_BLX23T:    applyBranch24T(off, sx - p - 4); break;
202   case IMAGE_REL_ARM_SECTION:   applySecIdx(off, os); break;
203   case IMAGE_REL_ARM_SECREL:    applySecRel(this, off, os, s); break;
204   case IMAGE_REL_ARM_REL32:     add32(off, sx - p - 4); break;
205   default:
206     error("unsupported relocation type 0x" + Twine::utohexstr(type) + " in " +
207           toString(file));
208   }
209 }
210 
211 // Interpret the existing immediate value as a byte offset to the
212 // target symbol, then update the instruction with the immediate as
213 // the page offset from the current instruction to the target.
214 void applyArm64Addr(uint8_t *off, uint64_t s, uint64_t p, int shift) {
215   uint32_t orig = read32le(off);
216   uint64_t imm = ((orig >> 29) & 0x3) | ((orig >> 3) & 0x1FFFFC);
217   s += imm;
218   imm = (s >> shift) - (p >> shift);
219   uint32_t immLo = (imm & 0x3) << 29;
220   uint32_t immHi = (imm & 0x1FFFFC) << 3;
221   uint64_t mask = (0x3 << 29) | (0x1FFFFC << 3);
222   write32le(off, (orig & ~mask) | immLo | immHi);
223 }
224 
225 // Update the immediate field in a AARCH64 ldr, str, and add instruction.
226 // Optionally limit the range of the written immediate by one or more bits
227 // (rangeLimit).
228 void applyArm64Imm(uint8_t *off, uint64_t imm, uint32_t rangeLimit) {
229   uint32_t orig = read32le(off);
230   imm += (orig >> 10) & 0xFFF;
231   orig &= ~(0xFFF << 10);
232   write32le(off, orig | ((imm & (0xFFF >> rangeLimit)) << 10));
233 }
234 
235 // Add the 12 bit page offset to the existing immediate.
236 // Ldr/str instructions store the opcode immediate scaled
237 // by the load/store size (giving a larger range for larger
238 // loads/stores). The immediate is always (both before and after
239 // fixing up the relocation) stored scaled similarly.
240 // Even if larger loads/stores have a larger range, limit the
241 // effective offset to 12 bit, since it is intended to be a
242 // page offset.
243 static void applyArm64Ldr(uint8_t *off, uint64_t imm) {
244   uint32_t orig = read32le(off);
245   uint32_t size = orig >> 30;
246   // 0x04000000 indicates SIMD/FP registers
247   // 0x00800000 indicates 128 bit
248   if ((orig & 0x4800000) == 0x4800000)
249     size += 4;
250   if ((imm & ((1 << size) - 1)) != 0)
251     error("misaligned ldr/str offset");
252   applyArm64Imm(off, imm >> size, size);
253 }
254 
255 static void applySecRelLow12A(const SectionChunk *sec, uint8_t *off,
256                               OutputSection *os, uint64_t s) {
257   if (checkSecRel(sec, os))
258     applyArm64Imm(off, (s - os->getRVA()) & 0xfff, 0);
259 }
260 
261 static void applySecRelHigh12A(const SectionChunk *sec, uint8_t *off,
262                                OutputSection *os, uint64_t s) {
263   if (!checkSecRel(sec, os))
264     return;
265   uint64_t secRel = (s - os->getRVA()) >> 12;
266   if (0xfff < secRel) {
267     error("overflow in SECREL_HIGH12A relocation in section: " +
268           sec->getSectionName());
269     return;
270   }
271   applyArm64Imm(off, secRel & 0xfff, 0);
272 }
273 
274 static void applySecRelLdr(const SectionChunk *sec, uint8_t *off,
275                            OutputSection *os, uint64_t s) {
276   if (checkSecRel(sec, os))
277     applyArm64Ldr(off, (s - os->getRVA()) & 0xfff);
278 }
279 
280 void applyArm64Branch26(uint8_t *off, int64_t v) {
281   if (!isInt<28>(v))
282     error("relocation out of range");
283   or32(off, (v & 0x0FFFFFFC) >> 2);
284 }
285 
286 static void applyArm64Branch19(uint8_t *off, int64_t v) {
287   if (!isInt<21>(v))
288     error("relocation out of range");
289   or32(off, (v & 0x001FFFFC) << 3);
290 }
291 
292 static void applyArm64Branch14(uint8_t *off, int64_t v) {
293   if (!isInt<16>(v))
294     error("relocation out of range");
295   or32(off, (v & 0x0000FFFC) << 3);
296 }
297 
298 void SectionChunk::applyRelARM64(uint8_t *off, uint16_t type, OutputSection *os,
299                                  uint64_t s, uint64_t p) const {
300   switch (type) {
301   case IMAGE_REL_ARM64_PAGEBASE_REL21: applyArm64Addr(off, s, p, 12); break;
302   case IMAGE_REL_ARM64_REL21:          applyArm64Addr(off, s, p, 0); break;
303   case IMAGE_REL_ARM64_PAGEOFFSET_12A: applyArm64Imm(off, s & 0xfff, 0); break;
304   case IMAGE_REL_ARM64_PAGEOFFSET_12L: applyArm64Ldr(off, s & 0xfff); break;
305   case IMAGE_REL_ARM64_BRANCH26:       applyArm64Branch26(off, s - p); break;
306   case IMAGE_REL_ARM64_BRANCH19:       applyArm64Branch19(off, s - p); break;
307   case IMAGE_REL_ARM64_BRANCH14:       applyArm64Branch14(off, s - p); break;
308   case IMAGE_REL_ARM64_ADDR32:         add32(off, s + config->imageBase); break;
309   case IMAGE_REL_ARM64_ADDR32NB:       add32(off, s); break;
310   case IMAGE_REL_ARM64_ADDR64:         add64(off, s + config->imageBase); break;
311   case IMAGE_REL_ARM64_SECREL:         applySecRel(this, off, os, s); break;
312   case IMAGE_REL_ARM64_SECREL_LOW12A:  applySecRelLow12A(this, off, os, s); break;
313   case IMAGE_REL_ARM64_SECREL_HIGH12A: applySecRelHigh12A(this, off, os, s); break;
314   case IMAGE_REL_ARM64_SECREL_LOW12L:  applySecRelLdr(this, off, os, s); break;
315   case IMAGE_REL_ARM64_SECTION:        applySecIdx(off, os); break;
316   case IMAGE_REL_ARM64_REL32:          add32(off, s - p - 4); break;
317   default:
318     error("unsupported relocation type 0x" + Twine::utohexstr(type) + " in " +
319           toString(file));
320   }
321 }
322 
323 static void maybeReportRelocationToDiscarded(const SectionChunk *fromChunk,
324                                              Defined *sym,
325                                              const coff_relocation &rel) {
326   // Don't report these errors when the relocation comes from a debug info
327   // section or in mingw mode. MinGW mode object files (built by GCC) can
328   // have leftover sections with relocations against discarded comdat
329   // sections. Such sections are left as is, with relocations untouched.
330   if (fromChunk->isCodeView() || fromChunk->isDWARF() || config->mingw)
331     return;
332 
333   // Get the name of the symbol. If it's null, it was discarded early, so we
334   // have to go back to the object file.
335   ObjFile *file = fromChunk->file;
336   StringRef name;
337   if (sym) {
338     name = sym->getName();
339   } else {
340     COFFSymbolRef coffSym =
341         check(file->getCOFFObj()->getSymbol(rel.SymbolTableIndex));
342     name = check(file->getCOFFObj()->getSymbolName(coffSym));
343   }
344 
345   std::vector<std::string> symbolLocations =
346       getSymbolLocations(file, rel.SymbolTableIndex);
347 
348   std::string out;
349   llvm::raw_string_ostream os(out);
350   os << "relocation against symbol in discarded section: " + name;
351   for (const std::string &s : symbolLocations)
352     os << s;
353   error(os.str());
354 }
355 
356 void SectionChunk::writeTo(uint8_t *buf) const {
357   if (!hasData)
358     return;
359   // Copy section contents from source object file to output file.
360   ArrayRef<uint8_t> a = getContents();
361   if (!a.empty())
362     memcpy(buf, a.data(), a.size());
363 
364   // Apply relocations.
365   size_t inputSize = getSize();
366   for (const coff_relocation &rel : getRelocs()) {
367     // Check for an invalid relocation offset. This check isn't perfect, because
368     // we don't have the relocation size, which is only known after checking the
369     // machine and relocation type. As a result, a relocation may overwrite the
370     // beginning of the following input section.
371     if (rel.VirtualAddress >= inputSize) {
372       error("relocation points beyond the end of its parent section");
373       continue;
374     }
375 
376     applyRelocation(buf + rel.VirtualAddress, rel);
377   }
378 }
379 
380 void SectionChunk::applyRelocation(uint8_t *off,
381                                    const coff_relocation &rel) const {
382   auto *sym = dyn_cast_or_null<Defined>(file->getSymbol(rel.SymbolTableIndex));
383 
384   // Get the output section of the symbol for this relocation.  The output
385   // section is needed to compute SECREL and SECTION relocations used in debug
386   // info.
387   Chunk *c = sym ? sym->getChunk() : nullptr;
388   OutputSection *os = c ? c->getOutputSection() : nullptr;
389 
390   // Skip the relocation if it refers to a discarded section, and diagnose it
391   // as an error if appropriate. If a symbol was discarded early, it may be
392   // null. If it was discarded late, the output section will be null, unless
393   // it was an absolute or synthetic symbol.
394   if (!sym ||
395       (!os && !isa<DefinedAbsolute>(sym) && !isa<DefinedSynthetic>(sym))) {
396     maybeReportRelocationToDiscarded(this, sym, rel);
397     return;
398   }
399 
400   uint64_t s = sym->getRVA();
401 
402   // Compute the RVA of the relocation for relative relocations.
403   uint64_t p = rva + rel.VirtualAddress;
404   switch (config->machine) {
405   case AMD64:
406     applyRelX64(off, rel.Type, os, s, p);
407     break;
408   case I386:
409     applyRelX86(off, rel.Type, os, s, p);
410     break;
411   case ARMNT:
412     applyRelARM(off, rel.Type, os, s, p);
413     break;
414   case ARM64:
415     applyRelARM64(off, rel.Type, os, s, p);
416     break;
417   default:
418     llvm_unreachable("unknown machine type");
419   }
420 }
421 
422 // Defend against unsorted relocations. This may be overly conservative.
423 void SectionChunk::sortRelocations() {
424   auto cmpByVa = [](const coff_relocation &l, const coff_relocation &r) {
425     return l.VirtualAddress < r.VirtualAddress;
426   };
427   if (llvm::is_sorted(getRelocs(), cmpByVa))
428     return;
429   warn("some relocations in " + file->getName() + " are not sorted");
430   MutableArrayRef<coff_relocation> newRelocs(
431       bAlloc.Allocate<coff_relocation>(relocsSize), relocsSize);
432   memcpy(newRelocs.data(), relocsData, relocsSize * sizeof(coff_relocation));
433   llvm::sort(newRelocs, cmpByVa);
434   setRelocs(newRelocs);
435 }
436 
437 // Similar to writeTo, but suitable for relocating a subsection of the overall
438 // section.
439 void SectionChunk::writeAndRelocateSubsection(ArrayRef<uint8_t> sec,
440                                               ArrayRef<uint8_t> subsec,
441                                               uint32_t &nextRelocIndex,
442                                               uint8_t *buf) const {
443   assert(!subsec.empty() && !sec.empty());
444   assert(sec.begin() <= subsec.begin() && subsec.end() <= sec.end() &&
445          "subsection is not part of this section");
446   size_t vaBegin = std::distance(sec.begin(), subsec.begin());
447   size_t vaEnd = std::distance(sec.begin(), subsec.end());
448   memcpy(buf, subsec.data(), subsec.size());
449   for (; nextRelocIndex < relocsSize; ++nextRelocIndex) {
450     const coff_relocation &rel = relocsData[nextRelocIndex];
451     // Only apply relocations that apply to this subsection. These checks
452     // assume that all subsections completely contain their relocations.
453     // Relocations must not straddle the beginning or end of a subsection.
454     if (rel.VirtualAddress < vaBegin)
455       continue;
456     if (rel.VirtualAddress + 1 >= vaEnd)
457       break;
458     applyRelocation(&buf[rel.VirtualAddress - vaBegin], rel);
459   }
460 }
461 
462 void SectionChunk::addAssociative(SectionChunk *child) {
463   // Insert the child section into the list of associated children. Keep the
464   // list ordered by section name so that ICF does not depend on section order.
465   assert(child->assocChildren == nullptr &&
466          "associated sections cannot have their own associated children");
467   SectionChunk *prev = this;
468   SectionChunk *next = assocChildren;
469   for (; next != nullptr; prev = next, next = next->assocChildren) {
470     if (next->getSectionName() <= child->getSectionName())
471       break;
472   }
473 
474   // Insert child between prev and next.
475   assert(prev->assocChildren == next);
476   prev->assocChildren = child;
477   child->assocChildren = next;
478 }
479 
480 static uint8_t getBaserelType(const coff_relocation &rel) {
481   switch (config->machine) {
482   case AMD64:
483     if (rel.Type == IMAGE_REL_AMD64_ADDR64)
484       return IMAGE_REL_BASED_DIR64;
485     if (rel.Type == IMAGE_REL_AMD64_ADDR32)
486       return IMAGE_REL_BASED_HIGHLOW;
487     return IMAGE_REL_BASED_ABSOLUTE;
488   case I386:
489     if (rel.Type == IMAGE_REL_I386_DIR32)
490       return IMAGE_REL_BASED_HIGHLOW;
491     return IMAGE_REL_BASED_ABSOLUTE;
492   case ARMNT:
493     if (rel.Type == IMAGE_REL_ARM_ADDR32)
494       return IMAGE_REL_BASED_HIGHLOW;
495     if (rel.Type == IMAGE_REL_ARM_MOV32T)
496       return IMAGE_REL_BASED_ARM_MOV32T;
497     return IMAGE_REL_BASED_ABSOLUTE;
498   case ARM64:
499     if (rel.Type == IMAGE_REL_ARM64_ADDR64)
500       return IMAGE_REL_BASED_DIR64;
501     return IMAGE_REL_BASED_ABSOLUTE;
502   default:
503     llvm_unreachable("unknown machine type");
504   }
505 }
506 
507 // Windows-specific.
508 // Collect all locations that contain absolute addresses, which need to be
509 // fixed by the loader if load-time relocation is needed.
510 // Only called when base relocation is enabled.
511 void SectionChunk::getBaserels(std::vector<Baserel> *res) {
512   for (const coff_relocation &rel : getRelocs()) {
513     uint8_t ty = getBaserelType(rel);
514     if (ty == IMAGE_REL_BASED_ABSOLUTE)
515       continue;
516     Symbol *target = file->getSymbol(rel.SymbolTableIndex);
517     if (!target || isa<DefinedAbsolute>(target))
518       continue;
519     res->emplace_back(rva + rel.VirtualAddress, ty);
520   }
521 }
522 
523 // MinGW specific.
524 // Check whether a static relocation of type Type can be deferred and
525 // handled at runtime as a pseudo relocation (for references to a module
526 // local variable, which turned out to actually need to be imported from
527 // another DLL) This returns the size the relocation is supposed to update,
528 // in bits, or 0 if the relocation cannot be handled as a runtime pseudo
529 // relocation.
530 static int getRuntimePseudoRelocSize(uint16_t type) {
531   // Relocations that either contain an absolute address, or a plain
532   // relative offset, since the runtime pseudo reloc implementation
533   // adds 8/16/32/64 bit values to a memory address.
534   //
535   // Given a pseudo relocation entry,
536   //
537   // typedef struct {
538   //   DWORD sym;
539   //   DWORD target;
540   //   DWORD flags;
541   // } runtime_pseudo_reloc_item_v2;
542   //
543   // the runtime relocation performs this adjustment:
544   //     *(base + .target) += *(base + .sym) - (base + .sym)
545   //
546   // This works for both absolute addresses (IMAGE_REL_*_ADDR32/64,
547   // IMAGE_REL_I386_DIR32, where the memory location initially contains
548   // the address of the IAT slot, and for relative addresses (IMAGE_REL*_REL32),
549   // where the memory location originally contains the relative offset to the
550   // IAT slot.
551   //
552   // This requires the target address to be writable, either directly out of
553   // the image, or temporarily changed at runtime with VirtualProtect.
554   // Since this only operates on direct address values, it doesn't work for
555   // ARM/ARM64 relocations, other than the plain ADDR32/ADDR64 relocations.
556   switch (config->machine) {
557   case AMD64:
558     switch (type) {
559     case IMAGE_REL_AMD64_ADDR64:
560       return 64;
561     case IMAGE_REL_AMD64_ADDR32:
562     case IMAGE_REL_AMD64_REL32:
563     case IMAGE_REL_AMD64_REL32_1:
564     case IMAGE_REL_AMD64_REL32_2:
565     case IMAGE_REL_AMD64_REL32_3:
566     case IMAGE_REL_AMD64_REL32_4:
567     case IMAGE_REL_AMD64_REL32_5:
568       return 32;
569     default:
570       return 0;
571     }
572   case I386:
573     switch (type) {
574     case IMAGE_REL_I386_DIR32:
575     case IMAGE_REL_I386_REL32:
576       return 32;
577     default:
578       return 0;
579     }
580   case ARMNT:
581     switch (type) {
582     case IMAGE_REL_ARM_ADDR32:
583       return 32;
584     default:
585       return 0;
586     }
587   case ARM64:
588     switch (type) {
589     case IMAGE_REL_ARM64_ADDR64:
590       return 64;
591     case IMAGE_REL_ARM64_ADDR32:
592       return 32;
593     default:
594       return 0;
595     }
596   default:
597     llvm_unreachable("unknown machine type");
598   }
599 }
600 
601 // MinGW specific.
602 // Append information to the provided vector about all relocations that
603 // need to be handled at runtime as runtime pseudo relocations (references
604 // to a module local variable, which turned out to actually need to be
605 // imported from another DLL).
606 void SectionChunk::getRuntimePseudoRelocs(
607     std::vector<RuntimePseudoReloc> &res) {
608   for (const coff_relocation &rel : getRelocs()) {
609     auto *target =
610         dyn_cast_or_null<Defined>(file->getSymbol(rel.SymbolTableIndex));
611     if (!target || !target->isRuntimePseudoReloc)
612       continue;
613     int sizeInBits = getRuntimePseudoRelocSize(rel.Type);
614     if (sizeInBits == 0) {
615       error("unable to automatically import from " + target->getName() +
616             " with relocation type " +
617             file->getCOFFObj()->getRelocationTypeName(rel.Type) + " in " +
618             toString(file));
619       continue;
620     }
621     // sizeInBits is used to initialize the Flags field; currently no
622     // other flags are defined.
623     res.emplace_back(
624         RuntimePseudoReloc(target, this, rel.VirtualAddress, sizeInBits));
625   }
626 }
627 
628 bool SectionChunk::isCOMDAT() const {
629   return header->Characteristics & IMAGE_SCN_LNK_COMDAT;
630 }
631 
632 void SectionChunk::printDiscardedMessage() const {
633   // Removed by dead-stripping. If it's removed by ICF, ICF already
634   // printed out the name, so don't repeat that here.
635   if (sym && this == repl)
636     message("Discarded " + sym->getName());
637 }
638 
639 StringRef SectionChunk::getDebugName() const {
640   if (sym)
641     return sym->getName();
642   return "";
643 }
644 
645 ArrayRef<uint8_t> SectionChunk::getContents() const {
646   ArrayRef<uint8_t> a;
647   cantFail(file->getCOFFObj()->getSectionContents(header, a));
648   return a;
649 }
650 
651 ArrayRef<uint8_t> SectionChunk::consumeDebugMagic() {
652   assert(isCodeView());
653   return consumeDebugMagic(getContents(), getSectionName());
654 }
655 
656 ArrayRef<uint8_t> SectionChunk::consumeDebugMagic(ArrayRef<uint8_t> data,
657                                                   StringRef sectionName) {
658   if (data.empty())
659     return {};
660 
661   // First 4 bytes are section magic.
662   if (data.size() < 4)
663     fatal("the section is too short: " + sectionName);
664 
665   if (!sectionName.startswith(".debug$"))
666     fatal("invalid section: " + sectionName);
667 
668   uint32_t magic = support::endian::read32le(data.data());
669   uint32_t expectedMagic = sectionName == ".debug$H"
670                                ? DEBUG_HASHES_SECTION_MAGIC
671                                : DEBUG_SECTION_MAGIC;
672   if (magic != expectedMagic) {
673     warn("ignoring section " + sectionName + " with unrecognized magic 0x" +
674          utohexstr(magic));
675     return {};
676   }
677   return data.slice(4);
678 }
679 
680 SectionChunk *SectionChunk::findByName(ArrayRef<SectionChunk *> sections,
681                                        StringRef name) {
682   for (SectionChunk *c : sections)
683     if (c->getSectionName() == name)
684       return c;
685   return nullptr;
686 }
687 
688 void SectionChunk::replace(SectionChunk *other) {
689   p2Align = std::max(p2Align, other->p2Align);
690   other->repl = repl;
691   other->live = false;
692 }
693 
694 uint32_t SectionChunk::getSectionNumber() const {
695   DataRefImpl r;
696   r.p = reinterpret_cast<uintptr_t>(header);
697   SectionRef s(r, file->getCOFFObj());
698   return s.getIndex() + 1;
699 }
700 
701 CommonChunk::CommonChunk(const COFFSymbolRef s) : sym(s) {
702   // The value of a common symbol is its size. Align all common symbols smaller
703   // than 32 bytes naturally, i.e. round the size up to the next power of two.
704   // This is what MSVC link.exe does.
705   setAlignment(std::min(32U, uint32_t(PowerOf2Ceil(sym.getValue()))));
706   hasData = false;
707 }
708 
709 uint32_t CommonChunk::getOutputCharacteristics() const {
710   return IMAGE_SCN_CNT_UNINITIALIZED_DATA | IMAGE_SCN_MEM_READ |
711          IMAGE_SCN_MEM_WRITE;
712 }
713 
714 void StringChunk::writeTo(uint8_t *buf) const {
715   memcpy(buf, str.data(), str.size());
716   buf[str.size()] = '\0';
717 }
718 
719 ImportThunkChunkX64::ImportThunkChunkX64(Defined *s) : ImportThunkChunk(s) {
720   // Intel Optimization Manual says that all branch targets
721   // should be 16-byte aligned. MSVC linker does this too.
722   setAlignment(16);
723 }
724 
725 void ImportThunkChunkX64::writeTo(uint8_t *buf) const {
726   memcpy(buf, importThunkX86, sizeof(importThunkX86));
727   // The first two bytes is a JMP instruction. Fill its operand.
728   write32le(buf + 2, impSymbol->getRVA() - rva - getSize());
729 }
730 
731 void ImportThunkChunkX86::getBaserels(std::vector<Baserel> *res) {
732   res->emplace_back(getRVA() + 2);
733 }
734 
735 void ImportThunkChunkX86::writeTo(uint8_t *buf) const {
736   memcpy(buf, importThunkX86, sizeof(importThunkX86));
737   // The first two bytes is a JMP instruction. Fill its operand.
738   write32le(buf + 2,
739             impSymbol->getRVA() + config->imageBase);
740 }
741 
742 void ImportThunkChunkARM::getBaserels(std::vector<Baserel> *res) {
743   res->emplace_back(getRVA(), IMAGE_REL_BASED_ARM_MOV32T);
744 }
745 
746 void ImportThunkChunkARM::writeTo(uint8_t *buf) const {
747   memcpy(buf, importThunkARM, sizeof(importThunkARM));
748   // Fix mov.w and mov.t operands.
749   applyMOV32T(buf, impSymbol->getRVA() + config->imageBase);
750 }
751 
752 void ImportThunkChunkARM64::writeTo(uint8_t *buf) const {
753   int64_t off = impSymbol->getRVA() & 0xfff;
754   memcpy(buf, importThunkARM64, sizeof(importThunkARM64));
755   applyArm64Addr(buf, impSymbol->getRVA(), rva, 12);
756   applyArm64Ldr(buf + 4, off);
757 }
758 
759 // A Thumb2, PIC, non-interworking range extension thunk.
760 const uint8_t armThunk[] = {
761     0x40, 0xf2, 0x00, 0x0c, // P:  movw ip,:lower16:S - (P + (L1-P) + 4)
762     0xc0, 0xf2, 0x00, 0x0c, //     movt ip,:upper16:S - (P + (L1-P) + 4)
763     0xe7, 0x44,             // L1: add  pc, ip
764 };
765 
766 size_t RangeExtensionThunkARM::getSize() const {
767   assert(config->machine == ARMNT);
768   return sizeof(armThunk);
769 }
770 
771 void RangeExtensionThunkARM::writeTo(uint8_t *buf) const {
772   assert(config->machine == ARMNT);
773   uint64_t offset = target->getRVA() - rva - 12;
774   memcpy(buf, armThunk, sizeof(armThunk));
775   applyMOV32T(buf, uint32_t(offset));
776 }
777 
778 // A position independent ARM64 adrp+add thunk, with a maximum range of
779 // +/- 4 GB, which is enough for any PE-COFF.
780 const uint8_t arm64Thunk[] = {
781     0x10, 0x00, 0x00, 0x90, // adrp x16, Dest
782     0x10, 0x02, 0x00, 0x91, // add  x16, x16, :lo12:Dest
783     0x00, 0x02, 0x1f, 0xd6, // br   x16
784 };
785 
786 size_t RangeExtensionThunkARM64::getSize() const {
787   assert(config->machine == ARM64);
788   return sizeof(arm64Thunk);
789 }
790 
791 void RangeExtensionThunkARM64::writeTo(uint8_t *buf) const {
792   assert(config->machine == ARM64);
793   memcpy(buf, arm64Thunk, sizeof(arm64Thunk));
794   applyArm64Addr(buf + 0, target->getRVA(), rva, 12);
795   applyArm64Imm(buf + 4, target->getRVA() & 0xfff, 0);
796 }
797 
798 void LocalImportChunk::getBaserels(std::vector<Baserel> *res) {
799   res->emplace_back(getRVA());
800 }
801 
802 size_t LocalImportChunk::getSize() const { return config->wordsize; }
803 
804 void LocalImportChunk::writeTo(uint8_t *buf) const {
805   if (config->is64()) {
806     write64le(buf, sym->getRVA() + config->imageBase);
807   } else {
808     write32le(buf, sym->getRVA() + config->imageBase);
809   }
810 }
811 
812 void RVATableChunk::writeTo(uint8_t *buf) const {
813   ulittle32_t *begin = reinterpret_cast<ulittle32_t *>(buf);
814   size_t cnt = 0;
815   for (const ChunkAndOffset &co : syms)
816     begin[cnt++] = co.inputChunk->getRVA() + co.offset;
817   std::sort(begin, begin + cnt);
818   assert(std::unique(begin, begin + cnt) == begin + cnt &&
819          "RVA tables should be de-duplicated");
820 }
821 
822 void RVAFlagTableChunk::writeTo(uint8_t *buf) const {
823   struct RVAFlag {
824     ulittle32_t rva;
825     uint8_t flag;
826   };
827   auto flags =
828       makeMutableArrayRef(reinterpret_cast<RVAFlag *>(buf), syms.size());
829   for (auto t : zip(syms, flags)) {
830     const auto &sym = std::get<0>(t);
831     auto &flag = std::get<1>(t);
832     flag.rva = sym.inputChunk->getRVA() + sym.offset;
833     flag.flag = 0;
834   }
835   llvm::sort(flags,
836              [](const RVAFlag &a, const RVAFlag &b) { return a.rva < b.rva; });
837   assert(llvm::unique(flags, [](const RVAFlag &a,
838                                 const RVAFlag &b) { return a.rva == b.rva; }) ==
839              flags.end() &&
840          "RVA tables should be de-duplicated");
841 }
842 
843 // MinGW specific, for the "automatic import of variables from DLLs" feature.
844 size_t PseudoRelocTableChunk::getSize() const {
845   if (relocs.empty())
846     return 0;
847   return 12 + 12 * relocs.size();
848 }
849 
850 // MinGW specific.
851 void PseudoRelocTableChunk::writeTo(uint8_t *buf) const {
852   if (relocs.empty())
853     return;
854 
855   ulittle32_t *table = reinterpret_cast<ulittle32_t *>(buf);
856   // This is the list header, to signal the runtime pseudo relocation v2
857   // format.
858   table[0] = 0;
859   table[1] = 0;
860   table[2] = 1;
861 
862   size_t idx = 3;
863   for (const RuntimePseudoReloc &rpr : relocs) {
864     table[idx + 0] = rpr.sym->getRVA();
865     table[idx + 1] = rpr.target->getRVA() + rpr.targetOffset;
866     table[idx + 2] = rpr.flags;
867     idx += 3;
868   }
869 }
870 
871 // Windows-specific. This class represents a block in .reloc section.
872 // The format is described here.
873 //
874 // On Windows, each DLL is linked against a fixed base address and
875 // usually loaded to that address. However, if there's already another
876 // DLL that overlaps, the loader has to relocate it. To do that, DLLs
877 // contain .reloc sections which contain offsets that need to be fixed
878 // up at runtime. If the loader finds that a DLL cannot be loaded to its
879 // desired base address, it loads it to somewhere else, and add <actual
880 // base address> - <desired base address> to each offset that is
881 // specified by the .reloc section. In ELF terms, .reloc sections
882 // contain relative relocations in REL format (as opposed to RELA.)
883 //
884 // This already significantly reduces the size of relocations compared
885 // to ELF .rel.dyn, but Windows does more to reduce it (probably because
886 // it was invented for PCs in the late '80s or early '90s.)  Offsets in
887 // .reloc are grouped by page where the page size is 12 bits, and
888 // offsets sharing the same page address are stored consecutively to
889 // represent them with less space. This is very similar to the page
890 // table which is grouped by (multiple stages of) pages.
891 //
892 // For example, let's say we have 0x00030, 0x00500, 0x00700, 0x00A00,
893 // 0x20004, and 0x20008 in a .reloc section for x64. The uppermost 4
894 // bits have a type IMAGE_REL_BASED_DIR64 or 0xA. In the section, they
895 // are represented like this:
896 //
897 //   0x00000  -- page address (4 bytes)
898 //   16       -- size of this block (4 bytes)
899 //     0xA030 -- entries (2 bytes each)
900 //     0xA500
901 //     0xA700
902 //     0xAA00
903 //   0x20000  -- page address (4 bytes)
904 //   12       -- size of this block (4 bytes)
905 //     0xA004 -- entries (2 bytes each)
906 //     0xA008
907 //
908 // Usually we have a lot of relocations for each page, so the number of
909 // bytes for one .reloc entry is close to 2 bytes on average.
910 BaserelChunk::BaserelChunk(uint32_t page, Baserel *begin, Baserel *end) {
911   // Block header consists of 4 byte page RVA and 4 byte block size.
912   // Each entry is 2 byte. Last entry may be padding.
913   data.resize(alignTo((end - begin) * 2 + 8, 4));
914   uint8_t *p = data.data();
915   write32le(p, page);
916   write32le(p + 4, data.size());
917   p += 8;
918   for (Baserel *i = begin; i != end; ++i) {
919     write16le(p, (i->type << 12) | (i->rva - page));
920     p += 2;
921   }
922 }
923 
924 void BaserelChunk::writeTo(uint8_t *buf) const {
925   memcpy(buf, data.data(), data.size());
926 }
927 
928 uint8_t Baserel::getDefaultType() {
929   switch (config->machine) {
930   case AMD64:
931   case ARM64:
932     return IMAGE_REL_BASED_DIR64;
933   case I386:
934   case ARMNT:
935     return IMAGE_REL_BASED_HIGHLOW;
936   default:
937     llvm_unreachable("unknown machine type");
938   }
939 }
940 
941 MergeChunk *MergeChunk::instances[Log2MaxSectionAlignment + 1] = {};
942 
943 MergeChunk::MergeChunk(uint32_t alignment)
944     : builder(StringTableBuilder::RAW, alignment) {
945   setAlignment(alignment);
946 }
947 
948 void MergeChunk::addSection(SectionChunk *c) {
949   assert(isPowerOf2_32(c->getAlignment()));
950   uint8_t p2Align = llvm::Log2_32(c->getAlignment());
951   assert(p2Align < array_lengthof(instances));
952   auto *&mc = instances[p2Align];
953   if (!mc)
954     mc = make<MergeChunk>(c->getAlignment());
955   mc->sections.push_back(c);
956 }
957 
958 void MergeChunk::finalizeContents() {
959   assert(!finalized && "should only finalize once");
960   for (SectionChunk *c : sections)
961     if (c->live)
962       builder.add(toStringRef(c->getContents()));
963   builder.finalize();
964   finalized = true;
965 }
966 
967 void MergeChunk::assignSubsectionRVAs() {
968   for (SectionChunk *c : sections) {
969     if (!c->live)
970       continue;
971     size_t off = builder.getOffset(toStringRef(c->getContents()));
972     c->setRVA(rva + off);
973   }
974 }
975 
976 uint32_t MergeChunk::getOutputCharacteristics() const {
977   return IMAGE_SCN_MEM_READ | IMAGE_SCN_CNT_INITIALIZED_DATA;
978 }
979 
980 size_t MergeChunk::getSize() const {
981   return builder.getSize();
982 }
983 
984 void MergeChunk::writeTo(uint8_t *buf) const {
985   builder.write(buf);
986 }
987 
988 // MinGW specific.
989 size_t AbsolutePointerChunk::getSize() const { return config->wordsize; }
990 
991 void AbsolutePointerChunk::writeTo(uint8_t *buf) const {
992   if (config->is64()) {
993     write64le(buf, value);
994   } else {
995     write32le(buf, value);
996   }
997 }
998 
999 } // namespace coff
1000 } // namespace lld
1001