xref: /freebsd/contrib/llvm-project/clang/lib/CodeGen/PatternInit.cpp (revision aca928a50a42f00f344df934005b09dbcb4e2f77)
1  //===--- PatternInit.cpp - Pattern Initialization -------------------------===//
2  //
3  // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4  // See https://llvm.org/LICENSE.txt for license information.
5  // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6  //
7  //===----------------------------------------------------------------------===//
8  
9  #include "PatternInit.h"
10  #include "CodeGenModule.h"
11  #include "clang/Basic/TargetInfo.h"
12  #include "llvm/IR/Constant.h"
13  #include "llvm/IR/Type.h"
14  
15  llvm::Constant *clang::CodeGen::initializationPatternFor(CodeGenModule &CGM,
16                                                           llvm::Type *Ty) {
17    // The following value is a guaranteed unmappable pointer value and has a
18    // repeated byte-pattern which makes it easier to synthesize. We use it for
19    // pointers as well as integers so that aggregates are likely to be
20    // initialized with this repeated value.
21    // For 32-bit platforms it's a bit trickier because, across systems, only the
22    // zero page can reasonably be expected to be unmapped. We use max 0xFFFFFFFF
23    // assuming that memory access will overlap into zero page.
24    const uint64_t IntValue =
25        CGM.getContext().getTargetInfo().getMaxPointerWidth() < 64
26            ? 0xFFFFFFFFFFFFFFFFull
27            : 0xAAAAAAAAAAAAAAAAull;
28    // Floating-point values are initialized as NaNs because they propagate. Using
29    // a repeated byte pattern means that it will be easier to initialize
30    // all-floating-point aggregates and arrays with memset. Further, aggregates
31    // which mix integral and a few floats might also initialize with memset
32    // followed by a handful of stores for the floats. Using fairly unique NaNs
33    // also means they'll be easier to distinguish in a crash.
34    constexpr bool NegativeNaN = true;
35    constexpr uint64_t NaNPayload = 0xFFFFFFFFFFFFFFFFull;
36    if (Ty->isIntOrIntVectorTy()) {
37      unsigned BitWidth =
38          cast<llvm::IntegerType>(Ty->getScalarType())->getBitWidth();
39      if (BitWidth <= 64)
40        return llvm::ConstantInt::get(Ty, IntValue);
41      return llvm::ConstantInt::get(
42          Ty, llvm::APInt::getSplat(BitWidth, llvm::APInt(64, IntValue)));
43    }
44    if (Ty->isPtrOrPtrVectorTy()) {
45      auto *PtrTy = cast<llvm::PointerType>(Ty->getScalarType());
46      unsigned PtrWidth =
47          CGM.getDataLayout().getPointerSizeInBits(PtrTy->getAddressSpace());
48      if (PtrWidth > 64)
49        llvm_unreachable("pattern initialization of unsupported pointer width");
50      llvm::Type *IntTy = llvm::IntegerType::get(CGM.getLLVMContext(), PtrWidth);
51      auto *Int = llvm::ConstantInt::get(IntTy, IntValue);
52      return llvm::ConstantExpr::getIntToPtr(Int, PtrTy);
53    }
54    if (Ty->isFPOrFPVectorTy()) {
55      unsigned BitWidth = llvm::APFloat::semanticsSizeInBits(
56          Ty->getScalarType()->getFltSemantics());
57      llvm::APInt Payload(64, NaNPayload);
58      if (BitWidth >= 64)
59        Payload = llvm::APInt::getSplat(BitWidth, Payload);
60      return llvm::ConstantFP::getQNaN(Ty, NegativeNaN, &Payload);
61    }
62    if (Ty->isArrayTy()) {
63      // Note: this doesn't touch tail padding (at the end of an object, before
64      // the next array object). It is instead handled by replaceUndef.
65      auto *ArrTy = cast<llvm::ArrayType>(Ty);
66      llvm::SmallVector<llvm::Constant *, 8> Element(
67          ArrTy->getNumElements(),
68          initializationPatternFor(CGM, ArrTy->getElementType()));
69      return llvm::ConstantArray::get(ArrTy, Element);
70    }
71  
72    // Note: this doesn't touch struct padding. It will initialize as much union
73    // padding as is required for the largest type in the union. Padding is
74    // instead handled by replaceUndef. Stores to structs with volatile members
75    // don't have a volatile qualifier when initialized according to C++. This is
76    // fine because stack-based volatiles don't really have volatile semantics
77    // anyways, and the initialization shouldn't be observable.
78    auto *StructTy = cast<llvm::StructType>(Ty);
79    llvm::SmallVector<llvm::Constant *, 8> Struct(StructTy->getNumElements());
80    for (unsigned El = 0; El != Struct.size(); ++El)
81      Struct[El] = initializationPatternFor(CGM, StructTy->getElementType(El));
82    return llvm::ConstantStruct::get(StructTy, Struct);
83  }
84