1 //===- CIRGenModule.cpp - Per-Module state for CIR generation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This is the internal per-translation-unit state used for CIR translation.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CIRGenModule.h"
14 #include "CIRGenCXXABI.h"
15 #include "CIRGenConstantEmitter.h"
16 #include "CIRGenFunction.h"
17
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/DeclBase.h"
20 #include "clang/AST/DeclOpenACC.h"
21 #include "clang/AST/GlobalDecl.h"
22 #include "clang/AST/RecordLayout.h"
23 #include "clang/Basic/SourceManager.h"
24 #include "clang/CIR/Dialect/IR/CIRDialect.h"
25 #include "clang/CIR/Interfaces/CIROpInterfaces.h"
26 #include "clang/CIR/MissingFeatures.h"
27
28 #include "CIRGenFunctionInfo.h"
29 #include "mlir/IR/BuiltinOps.h"
30 #include "mlir/IR/Location.h"
31 #include "mlir/IR/MLIRContext.h"
32 #include "mlir/IR/Verifier.h"
33
34 using namespace clang;
35 using namespace clang::CIRGen;
36
createCXXABI(CIRGenModule & cgm)37 static CIRGenCXXABI *createCXXABI(CIRGenModule &cgm) {
38 switch (cgm.getASTContext().getCXXABIKind()) {
39 case TargetCXXABI::GenericItanium:
40 case TargetCXXABI::GenericAArch64:
41 case TargetCXXABI::AppleARM64:
42 return CreateCIRGenItaniumCXXABI(cgm);
43
44 case TargetCXXABI::Fuchsia:
45 case TargetCXXABI::GenericARM:
46 case TargetCXXABI::iOS:
47 case TargetCXXABI::WatchOS:
48 case TargetCXXABI::GenericMIPS:
49 case TargetCXXABI::WebAssembly:
50 case TargetCXXABI::XL:
51 case TargetCXXABI::Microsoft:
52 cgm.errorNYI("C++ ABI kind not yet implemented");
53 return nullptr;
54 }
55
56 llvm_unreachable("invalid C++ ABI kind");
57 }
58
CIRGenModule(mlir::MLIRContext & mlirContext,clang::ASTContext & astContext,const clang::CodeGenOptions & cgo,DiagnosticsEngine & diags)59 CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
60 clang::ASTContext &astContext,
61 const clang::CodeGenOptions &cgo,
62 DiagnosticsEngine &diags)
63 : builder(mlirContext, *this), astContext(astContext),
64 langOpts(astContext.getLangOpts()), codeGenOpts(cgo),
65 theModule{mlir::ModuleOp::create(mlir::UnknownLoc::get(&mlirContext))},
66 diags(diags), target(astContext.getTargetInfo()),
67 abi(createCXXABI(*this)), genTypes(*this) {
68
69 // Initialize cached types
70 VoidTy = cir::VoidType::get(&getMLIRContext());
71 VoidPtrTy = cir::PointerType::get(VoidTy);
72 SInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/true);
73 SInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/true);
74 SInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/true);
75 SInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/true);
76 SInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/true);
77 UInt8Ty = cir::IntType::get(&getMLIRContext(), 8, /*isSigned=*/false);
78 UInt16Ty = cir::IntType::get(&getMLIRContext(), 16, /*isSigned=*/false);
79 UInt32Ty = cir::IntType::get(&getMLIRContext(), 32, /*isSigned=*/false);
80 UInt64Ty = cir::IntType::get(&getMLIRContext(), 64, /*isSigned=*/false);
81 UInt128Ty = cir::IntType::get(&getMLIRContext(), 128, /*isSigned=*/false);
82 FP16Ty = cir::FP16Type::get(&getMLIRContext());
83 BFloat16Ty = cir::BF16Type::get(&getMLIRContext());
84 FloatTy = cir::SingleType::get(&getMLIRContext());
85 DoubleTy = cir::DoubleType::get(&getMLIRContext());
86 FP80Ty = cir::FP80Type::get(&getMLIRContext());
87 FP128Ty = cir::FP128Type::get(&getMLIRContext());
88
89 PointerAlignInBytes =
90 astContext
91 .toCharUnitsFromBits(
92 astContext.getTargetInfo().getPointerAlign(LangAS::Default))
93 .getQuantity();
94
95 // TODO(CIR): Should be updated once TypeSizeInfoAttr is upstreamed
96 const unsigned sizeTypeSize =
97 astContext.getTypeSize(astContext.getSignedSizeType());
98 SizeAlignInBytes = astContext.toCharUnitsFromBits(sizeTypeSize).getQuantity();
99 // In CIRGenTypeCache, UIntPtrTy and SizeType are fields of the same union
100 UIntPtrTy =
101 cir::IntType::get(&getMLIRContext(), sizeTypeSize, /*isSigned=*/false);
102 PtrDiffTy =
103 cir::IntType::get(&getMLIRContext(), sizeTypeSize, /*isSigned=*/true);
104
105 theModule->setAttr(cir::CIRDialect::getTripleAttrName(),
106 builder.getStringAttr(getTriple().str()));
107
108 if (cgo.OptimizationLevel > 0 || cgo.OptimizeSize > 0)
109 theModule->setAttr(cir::CIRDialect::getOptInfoAttrName(),
110 cir::OptInfoAttr::get(&mlirContext,
111 cgo.OptimizationLevel,
112 cgo.OptimizeSize));
113 }
114
115 CIRGenModule::~CIRGenModule() = default;
116
117 /// FIXME: this could likely be a common helper and not necessarily related
118 /// with codegen.
119 /// Return the best known alignment for an unknown pointer to a
120 /// particular class.
getClassPointerAlignment(const CXXRecordDecl * rd)121 CharUnits CIRGenModule::getClassPointerAlignment(const CXXRecordDecl *rd) {
122 if (!rd->hasDefinition())
123 return CharUnits::One(); // Hopefully won't be used anywhere.
124
125 auto &layout = astContext.getASTRecordLayout(rd);
126
127 // If the class is final, then we know that the pointer points to an
128 // object of that type and can use the full alignment.
129 if (rd->isEffectivelyFinal())
130 return layout.getAlignment();
131
132 // Otherwise, we have to assume it could be a subclass.
133 return layout.getNonVirtualAlignment();
134 }
135
getNaturalTypeAlignment(QualType t,LValueBaseInfo * baseInfo)136 CharUnits CIRGenModule::getNaturalTypeAlignment(QualType t,
137 LValueBaseInfo *baseInfo) {
138 assert(!cir::MissingFeatures::opTBAA());
139
140 // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown, but
141 // that doesn't return the information we need to compute baseInfo.
142
143 // Honor alignment typedef attributes even on incomplete types.
144 // We also honor them straight for C++ class types, even as pointees;
145 // there's an expressivity gap here.
146 if (const auto *tt = t->getAs<TypedefType>()) {
147 if (unsigned align = tt->getDecl()->getMaxAlignment()) {
148 if (baseInfo)
149 *baseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
150 return astContext.toCharUnitsFromBits(align);
151 }
152 }
153
154 // Analyze the base element type, so we don't get confused by incomplete
155 // array types.
156 t = astContext.getBaseElementType(t);
157
158 if (t->isIncompleteType()) {
159 // We could try to replicate the logic from
160 // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the
161 // type is incomplete, so it's impossible to test. We could try to reuse
162 // getTypeAlignIfKnown, but that doesn't return the information we need
163 // to set baseInfo. So just ignore the possibility that the alignment is
164 // greater than one.
165 if (baseInfo)
166 *baseInfo = LValueBaseInfo(AlignmentSource::Type);
167 return CharUnits::One();
168 }
169
170 if (baseInfo)
171 *baseInfo = LValueBaseInfo(AlignmentSource::Type);
172
173 CharUnits alignment;
174 if (t.getQualifiers().hasUnaligned()) {
175 alignment = CharUnits::One();
176 } else {
177 assert(!cir::MissingFeatures::alignCXXRecordDecl());
178 alignment = astContext.getTypeAlignInChars(t);
179 }
180
181 // Cap to the global maximum type alignment unless the alignment
182 // was somehow explicit on the type.
183 if (unsigned maxAlign = astContext.getLangOpts().MaxTypeAlign) {
184 if (alignment.getQuantity() > maxAlign &&
185 !astContext.isAlignmentRequired(t))
186 alignment = CharUnits::fromQuantity(maxAlign);
187 }
188 return alignment;
189 }
190
getTargetCIRGenInfo()191 const TargetCIRGenInfo &CIRGenModule::getTargetCIRGenInfo() {
192 if (theTargetCIRGenInfo)
193 return *theTargetCIRGenInfo;
194
195 const llvm::Triple &triple = getTarget().getTriple();
196 switch (triple.getArch()) {
197 default:
198 assert(!cir::MissingFeatures::targetCIRGenInfoArch());
199
200 // Currently we just fall through to x86_64.
201 [[fallthrough]];
202
203 case llvm::Triple::x86_64: {
204 switch (triple.getOS()) {
205 default:
206 assert(!cir::MissingFeatures::targetCIRGenInfoOS());
207
208 // Currently we just fall through to x86_64.
209 [[fallthrough]];
210
211 case llvm::Triple::Linux:
212 theTargetCIRGenInfo = createX8664TargetCIRGenInfo(genTypes);
213 return *theTargetCIRGenInfo;
214 }
215 }
216 }
217 }
218
getLoc(SourceLocation cLoc)219 mlir::Location CIRGenModule::getLoc(SourceLocation cLoc) {
220 assert(cLoc.isValid() && "expected valid source location");
221 const SourceManager &sm = astContext.getSourceManager();
222 PresumedLoc pLoc = sm.getPresumedLoc(cLoc);
223 StringRef filename = pLoc.getFilename();
224 return mlir::FileLineColLoc::get(builder.getStringAttr(filename),
225 pLoc.getLine(), pLoc.getColumn());
226 }
227
getLoc(SourceRange cRange)228 mlir::Location CIRGenModule::getLoc(SourceRange cRange) {
229 assert(cRange.isValid() && "expected a valid source range");
230 mlir::Location begin = getLoc(cRange.getBegin());
231 mlir::Location end = getLoc(cRange.getEnd());
232 mlir::Attribute metadata;
233 return mlir::FusedLoc::get({begin, end}, metadata, builder.getContext());
234 }
235
236 mlir::Operation *
getAddrOfGlobal(GlobalDecl gd,ForDefinition_t isForDefinition)237 CIRGenModule::getAddrOfGlobal(GlobalDecl gd, ForDefinition_t isForDefinition) {
238 const Decl *d = gd.getDecl();
239
240 if (isa<CXXConstructorDecl>(d) || isa<CXXDestructorDecl>(d))
241 return getAddrOfCXXStructor(gd, /*FnInfo=*/nullptr, /*FnType=*/nullptr,
242 /*DontDefer=*/false, isForDefinition);
243
244 if (isa<CXXMethodDecl>(d)) {
245 const CIRGenFunctionInfo &fi =
246 getTypes().arrangeCXXMethodDeclaration(cast<CXXMethodDecl>(d));
247 cir::FuncType ty = getTypes().getFunctionType(fi);
248 return getAddrOfFunction(gd, ty, /*ForVTable=*/false, /*DontDefer=*/false,
249 isForDefinition);
250 }
251
252 if (isa<FunctionDecl>(d)) {
253 const CIRGenFunctionInfo &fi = getTypes().arrangeGlobalDeclaration(gd);
254 cir::FuncType ty = getTypes().getFunctionType(fi);
255 return getAddrOfFunction(gd, ty, /*ForVTable=*/false, /*DontDefer=*/false,
256 isForDefinition);
257 }
258
259 return getAddrOfGlobalVar(cast<VarDecl>(d), /*ty=*/nullptr, isForDefinition)
260 .getDefiningOp();
261 }
262
emitGlobalDecl(const clang::GlobalDecl & d)263 void CIRGenModule::emitGlobalDecl(const clang::GlobalDecl &d) {
264 // We call getAddrOfGlobal with isForDefinition set to ForDefinition in
265 // order to get a Value with exactly the type we need, not something that
266 // might have been created for another decl with the same mangled name but
267 // different type.
268 mlir::Operation *op = getAddrOfGlobal(d, ForDefinition);
269
270 // In case of different address spaces, we may still get a cast, even with
271 // IsForDefinition equal to ForDefinition. Query mangled names table to get
272 // GlobalValue.
273 if (!op)
274 op = getGlobalValue(getMangledName(d));
275
276 assert(op && "expected a valid global op");
277
278 // Check to see if we've already emitted this. This is necessary for a
279 // couple of reasons: first, decls can end up in deferred-decls queue
280 // multiple times, and second, decls can end up with definitions in unusual
281 // ways (e.g. by an extern inline function acquiring a strong function
282 // redefinition). Just ignore those cases.
283 // TODO: Not sure what to map this to for MLIR
284 mlir::Operation *globalValueOp = op;
285 if (auto gv = dyn_cast<cir::GetGlobalOp>(op))
286 globalValueOp =
287 mlir::SymbolTable::lookupSymbolIn(getModule(), gv.getNameAttr());
288
289 if (auto cirGlobalValue =
290 dyn_cast<cir::CIRGlobalValueInterface>(globalValueOp))
291 if (!cirGlobalValue.isDeclaration())
292 return;
293
294 // If this is OpenMP, check if it is legal to emit this global normally.
295 assert(!cir::MissingFeatures::openMP());
296
297 // Otherwise, emit the definition and move on to the next one.
298 emitGlobalDefinition(d, op);
299 }
300
emitDeferred()301 void CIRGenModule::emitDeferred() {
302 // Emit code for any potentially referenced deferred decls. Since a previously
303 // unused static decl may become used during the generation of code for a
304 // static function, iterate until no changes are made.
305
306 assert(!cir::MissingFeatures::openMP());
307 assert(!cir::MissingFeatures::deferredVtables());
308 assert(!cir::MissingFeatures::cudaSupport());
309
310 // Stop if we're out of both deferred vtables and deferred declarations.
311 if (deferredDeclsToEmit.empty())
312 return;
313
314 // Grab the list of decls to emit. If emitGlobalDefinition schedules more
315 // work, it will not interfere with this.
316 std::vector<GlobalDecl> curDeclsToEmit;
317 curDeclsToEmit.swap(deferredDeclsToEmit);
318
319 for (const GlobalDecl &d : curDeclsToEmit) {
320 emitGlobalDecl(d);
321
322 // If we found out that we need to emit more decls, do that recursively.
323 // This has the advantage that the decls are emitted in a DFS and related
324 // ones are close together, which is convenient for testing.
325 if (!deferredDeclsToEmit.empty()) {
326 emitDeferred();
327 assert(deferredDeclsToEmit.empty());
328 }
329 }
330 }
331
emitGlobal(clang::GlobalDecl gd)332 void CIRGenModule::emitGlobal(clang::GlobalDecl gd) {
333 if (const auto *cd = dyn_cast<clang::OpenACCConstructDecl>(gd.getDecl())) {
334 emitGlobalOpenACCDecl(cd);
335 return;
336 }
337
338 const auto *global = cast<ValueDecl>(gd.getDecl());
339
340 if (const auto *fd = dyn_cast<FunctionDecl>(global)) {
341 // Update deferred annotations with the latest declaration if the function
342 // was already used or defined.
343 if (fd->hasAttr<AnnotateAttr>())
344 errorNYI(fd->getSourceRange(), "deferredAnnotations");
345 if (!fd->doesThisDeclarationHaveABody()) {
346 if (!fd->doesDeclarationForceExternallyVisibleDefinition())
347 return;
348
349 errorNYI(fd->getSourceRange(),
350 "function declaration that forces code gen");
351 return;
352 }
353 } else {
354 const auto *vd = cast<VarDecl>(global);
355 assert(vd->isFileVarDecl() && "Cannot emit local var decl as global.");
356 if (vd->isThisDeclarationADefinition() != VarDecl::Definition &&
357 !astContext.isMSStaticDataMemberInlineDefinition(vd)) {
358 assert(!cir::MissingFeatures::openMP());
359 // If this declaration may have caused an inline variable definition to
360 // change linkage, make sure that it's emitted.
361 if (astContext.getInlineVariableDefinitionKind(vd) ==
362 ASTContext::InlineVariableDefinitionKind::Strong)
363 getAddrOfGlobalVar(vd);
364 // Otherwise, we can ignore this declaration. The variable will be emitted
365 // on its first use.
366 return;
367 }
368 }
369
370 // Defer code generation to first use when possible, e.g. if this is an inline
371 // function. If the global must always be emitted, do it eagerly if possible
372 // to benefit from cache locality. Deferring code generation is necessary to
373 // avoid adding initializers to external declarations.
374 if (mustBeEmitted(global) && mayBeEmittedEagerly(global)) {
375 // Emit the definition if it can't be deferred.
376 emitGlobalDefinition(gd);
377 return;
378 }
379
380 // If we're deferring emission of a C++ variable with an initializer, remember
381 // the order in which it appeared on the file.
382 assert(!cir::MissingFeatures::deferredCXXGlobalInit());
383
384 llvm::StringRef mangledName = getMangledName(gd);
385 if (getGlobalValue(mangledName) != nullptr) {
386 // The value has already been used and should therefore be emitted.
387 addDeferredDeclToEmit(gd);
388 } else if (mustBeEmitted(global)) {
389 // The value must be emitted, but cannot be emitted eagerly.
390 assert(!mayBeEmittedEagerly(global));
391 addDeferredDeclToEmit(gd);
392 } else {
393 // Otherwise, remember that we saw a deferred decl with this name. The first
394 // use of the mangled name will cause it to move into deferredDeclsToEmit.
395 deferredDecls[mangledName] = gd;
396 }
397 }
398
emitGlobalFunctionDefinition(clang::GlobalDecl gd,mlir::Operation * op)399 void CIRGenModule::emitGlobalFunctionDefinition(clang::GlobalDecl gd,
400 mlir::Operation *op) {
401 auto const *funcDecl = cast<FunctionDecl>(gd.getDecl());
402 const CIRGenFunctionInfo &fi = getTypes().arrangeGlobalDeclaration(gd);
403 cir::FuncType funcType = getTypes().getFunctionType(fi);
404 cir::FuncOp funcOp = dyn_cast_if_present<cir::FuncOp>(op);
405 if (!funcOp || funcOp.getFunctionType() != funcType) {
406 funcOp = getAddrOfFunction(gd, funcType, /*ForVTable=*/false,
407 /*DontDefer=*/true, ForDefinition);
408 }
409
410 // Already emitted.
411 if (!funcOp.isDeclaration())
412 return;
413
414 setFunctionLinkage(gd, funcOp);
415 setGVProperties(funcOp, funcDecl);
416 assert(!cir::MissingFeatures::opFuncMaybeHandleStaticInExternC());
417 maybeSetTrivialComdat(*funcDecl, funcOp);
418 assert(!cir::MissingFeatures::setLLVMFunctionFEnvAttributes());
419
420 CIRGenFunction cgf(*this, builder);
421 curCGF = &cgf;
422 {
423 mlir::OpBuilder::InsertionGuard guard(builder);
424 cgf.generateCode(gd, funcOp, funcType);
425 }
426 curCGF = nullptr;
427
428 setNonAliasAttributes(gd, funcOp);
429 assert(!cir::MissingFeatures::opFuncAttributesForDefinition());
430
431 if (funcDecl->getAttr<ConstructorAttr>())
432 errorNYI(funcDecl->getSourceRange(), "constructor attribute");
433 if (funcDecl->getAttr<DestructorAttr>())
434 errorNYI(funcDecl->getSourceRange(), "destructor attribute");
435
436 if (funcDecl->getAttr<AnnotateAttr>())
437 errorNYI(funcDecl->getSourceRange(), "deferredAnnotations");
438 }
439
getGlobalValue(StringRef name)440 mlir::Operation *CIRGenModule::getGlobalValue(StringRef name) {
441 return mlir::SymbolTable::lookupSymbolIn(theModule, name);
442 }
443
createGlobalOp(CIRGenModule & cgm,mlir::Location loc,StringRef name,mlir::Type t,mlir::Operation * insertPoint)444 cir::GlobalOp CIRGenModule::createGlobalOp(CIRGenModule &cgm,
445 mlir::Location loc, StringRef name,
446 mlir::Type t,
447 mlir::Operation *insertPoint) {
448 cir::GlobalOp g;
449 CIRGenBuilderTy &builder = cgm.getBuilder();
450
451 {
452 mlir::OpBuilder::InsertionGuard guard(builder);
453
454 // If an insertion point is provided, we're replacing an existing global,
455 // otherwise, create the new global immediately after the last gloabl we
456 // emitted.
457 if (insertPoint) {
458 builder.setInsertionPoint(insertPoint);
459 } else {
460 // Group global operations together at the top of the module.
461 if (cgm.lastGlobalOp)
462 builder.setInsertionPointAfter(cgm.lastGlobalOp);
463 else
464 builder.setInsertionPointToStart(cgm.getModule().getBody());
465 }
466
467 g = builder.create<cir::GlobalOp>(loc, name, t);
468 if (!insertPoint)
469 cgm.lastGlobalOp = g;
470
471 // Default to private until we can judge based on the initializer,
472 // since MLIR doesn't allow public declarations.
473 mlir::SymbolTable::setSymbolVisibility(
474 g, mlir::SymbolTable::Visibility::Private);
475 }
476 return g;
477 }
478
setCommonAttributes(GlobalDecl gd,mlir::Operation * gv)479 void CIRGenModule::setCommonAttributes(GlobalDecl gd, mlir::Operation *gv) {
480 const Decl *d = gd.getDecl();
481 if (isa_and_nonnull<NamedDecl>(d))
482 setGVProperties(gv, dyn_cast<NamedDecl>(d));
483 assert(!cir::MissingFeatures::defaultVisibility());
484 assert(!cir::MissingFeatures::opGlobalUsedOrCompilerUsed());
485 }
486
setNonAliasAttributes(GlobalDecl gd,mlir::Operation * op)487 void CIRGenModule::setNonAliasAttributes(GlobalDecl gd, mlir::Operation *op) {
488 setCommonAttributes(gd, op);
489
490 assert(!cir::MissingFeatures::opGlobalUsedOrCompilerUsed());
491 assert(!cir::MissingFeatures::opGlobalSection());
492 assert(!cir::MissingFeatures::opFuncCPUAndFeaturesAttributes());
493 assert(!cir::MissingFeatures::opFuncSection());
494
495 assert(!cir::MissingFeatures::setTargetAttributes());
496 }
497
setLinkageForGV(cir::GlobalOp & gv,const NamedDecl * nd)498 static void setLinkageForGV(cir::GlobalOp &gv, const NamedDecl *nd) {
499 // Set linkage and visibility in case we never see a definition.
500 LinkageInfo lv = nd->getLinkageAndVisibility();
501 // Don't set internal linkage on declarations.
502 // "extern_weak" is overloaded in LLVM; we probably should have
503 // separate linkage types for this.
504 if (isExternallyVisible(lv.getLinkage()) &&
505 (nd->hasAttr<WeakAttr>() || nd->isWeakImported()))
506 gv.setLinkage(cir::GlobalLinkageKind::ExternalWeakLinkage);
507 }
508
509 /// If the specified mangled name is not in the module,
510 /// create and return an mlir GlobalOp with the specified type (TODO(cir):
511 /// address space).
512 ///
513 /// TODO(cir):
514 /// 1. If there is something in the module with the specified name, return
515 /// it potentially bitcasted to the right type.
516 ///
517 /// 2. If \p d is non-null, it specifies a decl that correspond to this. This
518 /// is used to set the attributes on the global when it is first created.
519 ///
520 /// 3. If \p isForDefinition is true, it is guaranteed that an actual global
521 /// with type \p ty will be returned, not conversion of a variable with the same
522 /// mangled name but some other type.
523 cir::GlobalOp
getOrCreateCIRGlobal(StringRef mangledName,mlir::Type ty,LangAS langAS,const VarDecl * d,ForDefinition_t isForDefinition)524 CIRGenModule::getOrCreateCIRGlobal(StringRef mangledName, mlir::Type ty,
525 LangAS langAS, const VarDecl *d,
526 ForDefinition_t isForDefinition) {
527 // Lookup the entry, lazily creating it if necessary.
528 cir::GlobalOp entry;
529 if (mlir::Operation *v = getGlobalValue(mangledName)) {
530 if (!isa<cir::GlobalOp>(v))
531 errorNYI(d->getSourceRange(), "global with non-GlobalOp type");
532 entry = cast<cir::GlobalOp>(v);
533 }
534
535 if (entry) {
536 assert(!cir::MissingFeatures::addressSpace());
537 assert(!cir::MissingFeatures::opGlobalWeakRef());
538
539 assert(!cir::MissingFeatures::setDLLStorageClass());
540 assert(!cir::MissingFeatures::openMP());
541
542 if (entry.getSymType() == ty)
543 return entry;
544
545 // If there are two attempts to define the same mangled name, issue an
546 // error.
547 //
548 // TODO(cir): look at mlir::GlobalValue::isDeclaration for all aspects of
549 // recognizing the global as a declaration, for now only check if
550 // initializer is present.
551 if (isForDefinition && !entry.isDeclaration()) {
552 errorNYI(d->getSourceRange(), "global with conflicting type");
553 }
554
555 // Address space check removed because it is unnecessary because CIR records
556 // address space info in types.
557
558 // (If global is requested for a definition, we always need to create a new
559 // global, not just return a bitcast.)
560 if (!isForDefinition)
561 return entry;
562 }
563
564 mlir::Location loc = getLoc(d->getSourceRange());
565
566 // mlir::SymbolTable::Visibility::Public is the default, no need to explicitly
567 // mark it as such.
568 cir::GlobalOp gv =
569 CIRGenModule::createGlobalOp(*this, loc, mangledName, ty,
570 /*insertPoint=*/entry.getOperation());
571
572 // This is the first use or definition of a mangled name. If there is a
573 // deferred decl with this name, remember that we need to emit it at the end
574 // of the file.
575 auto ddi = deferredDecls.find(mangledName);
576 if (ddi != deferredDecls.end()) {
577 // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
578 // list, and remove it from DeferredDecls (since we don't need it anymore).
579 addDeferredDeclToEmit(ddi->second);
580 deferredDecls.erase(ddi);
581 }
582
583 // Handle things which are present even on external declarations.
584 if (d) {
585 if (langOpts.OpenMP && !langOpts.OpenMPSimd)
586 errorNYI(d->getSourceRange(), "OpenMP target global variable");
587
588 gv.setAlignmentAttr(getSize(astContext.getDeclAlign(d)));
589 assert(!cir::MissingFeatures::opGlobalConstant());
590
591 setLinkageForGV(gv, d);
592
593 if (d->getTLSKind())
594 errorNYI(d->getSourceRange(), "thread local global variable");
595
596 setGVProperties(gv, d);
597
598 // If required by the ABI, treat declarations of static data members with
599 // inline initializers as definitions.
600 if (astContext.isMSStaticDataMemberInlineDefinition(d))
601 errorNYI(d->getSourceRange(), "MS static data member inline definition");
602
603 assert(!cir::MissingFeatures::opGlobalSection());
604 gv.setGlobalVisibilityAttr(getGlobalVisibilityAttrFromDecl(d));
605
606 // Handle XCore specific ABI requirements.
607 if (getTriple().getArch() == llvm::Triple::xcore)
608 errorNYI(d->getSourceRange(), "XCore specific ABI requirements");
609
610 // Check if we a have a const declaration with an initializer, we may be
611 // able to emit it as available_externally to expose it's value to the
612 // optimizer.
613 if (getLangOpts().CPlusPlus && gv.isPublic() &&
614 d->getType().isConstQualified() && gv.isDeclaration() &&
615 !d->hasDefinition() && d->hasInit() && !d->hasAttr<DLLImportAttr>())
616 errorNYI(d->getSourceRange(),
617 "external const declaration with initializer");
618 }
619
620 return gv;
621 }
622
623 cir::GlobalOp
getOrCreateCIRGlobal(const VarDecl * d,mlir::Type ty,ForDefinition_t isForDefinition)624 CIRGenModule::getOrCreateCIRGlobal(const VarDecl *d, mlir::Type ty,
625 ForDefinition_t isForDefinition) {
626 assert(d->hasGlobalStorage() && "Not a global variable");
627 QualType astTy = d->getType();
628 if (!ty)
629 ty = getTypes().convertTypeForMem(astTy);
630
631 StringRef mangledName = getMangledName(d);
632 return getOrCreateCIRGlobal(mangledName, ty, astTy.getAddressSpace(), d,
633 isForDefinition);
634 }
635
636 /// Return the mlir::Value for the address of the given global variable. If
637 /// \p ty is non-null and if the global doesn't exist, then it will be created
638 /// with the specified type instead of whatever the normal requested type would
639 /// be. If \p isForDefinition is true, it is guaranteed that an actual global
640 /// with type \p ty will be returned, not conversion of a variable with the same
641 /// mangled name but some other type.
getAddrOfGlobalVar(const VarDecl * d,mlir::Type ty,ForDefinition_t isForDefinition)642 mlir::Value CIRGenModule::getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty,
643 ForDefinition_t isForDefinition) {
644 assert(d->hasGlobalStorage() && "Not a global variable");
645 QualType astTy = d->getType();
646 if (!ty)
647 ty = getTypes().convertTypeForMem(astTy);
648
649 assert(!cir::MissingFeatures::opGlobalThreadLocal());
650
651 cir::GlobalOp g = getOrCreateCIRGlobal(d, ty, isForDefinition);
652 mlir::Type ptrTy = builder.getPointerTo(g.getSymType());
653 return builder.create<cir::GetGlobalOp>(getLoc(d->getSourceRange()), ptrTy,
654 g.getSymName());
655 }
656
emitGlobalVarDefinition(const clang::VarDecl * vd,bool isTentative)657 void CIRGenModule::emitGlobalVarDefinition(const clang::VarDecl *vd,
658 bool isTentative) {
659 const QualType astTy = vd->getType();
660
661 if (getLangOpts().OpenCL || getLangOpts().OpenMPIsTargetDevice) {
662 errorNYI(vd->getSourceRange(), "emit OpenCL/OpenMP global variable");
663 return;
664 }
665
666 // Whether the definition of the variable is available externally.
667 // If yes, we shouldn't emit the GloablCtor and GlobalDtor for the variable
668 // since this is the job for its original source.
669 bool isDefinitionAvailableExternally =
670 astContext.GetGVALinkageForVariable(vd) == GVA_AvailableExternally;
671 assert(!cir::MissingFeatures::needsGlobalCtorDtor());
672
673 // It is useless to emit the definition for an available_externally variable
674 // which can't be marked as const.
675 if (isDefinitionAvailableExternally &&
676 (!vd->hasConstantInitialization() ||
677 // TODO: Update this when we have interface to check constexpr
678 // destructor.
679 vd->needsDestruction(astContext) ||
680 !vd->getType().isConstantStorage(astContext, true, true)))
681 return;
682
683 mlir::Attribute init;
684 const VarDecl *initDecl;
685 const Expr *initExpr = vd->getAnyInitializer(initDecl);
686
687 std::optional<ConstantEmitter> emitter;
688
689 assert(!cir::MissingFeatures::cudaSupport());
690
691 if (vd->hasAttr<LoaderUninitializedAttr>()) {
692 errorNYI(vd->getSourceRange(), "loader uninitialized attribute");
693 return;
694 } else if (!initExpr) {
695 // This is a tentative definition; tentative definitions are
696 // implicitly initialized with { 0 }.
697 //
698 // Note that tentative definitions are only emitted at the end of
699 // a translation unit, so they should never have incomplete
700 // type. In addition, EmitTentativeDefinition makes sure that we
701 // never attempt to emit a tentative definition if a real one
702 // exists. A use may still exists, however, so we still may need
703 // to do a RAUW.
704 assert(!astTy->isIncompleteType() && "Unexpected incomplete type");
705 init = builder.getZeroInitAttr(convertType(vd->getType()));
706 } else {
707 emitter.emplace(*this);
708 mlir::Attribute initializer = emitter->tryEmitForInitializer(*initDecl);
709 if (!initializer) {
710 QualType qt = initExpr->getType();
711 if (vd->getType()->isReferenceType())
712 qt = vd->getType();
713
714 if (getLangOpts().CPlusPlus) {
715 if (initDecl->hasFlexibleArrayInit(astContext))
716 errorNYI(vd->getSourceRange(), "flexible array initializer");
717 init = builder.getZeroInitAttr(convertType(qt));
718 if (astContext.GetGVALinkageForVariable(vd) != GVA_AvailableExternally)
719 errorNYI(vd->getSourceRange(), "global constructor");
720 } else {
721 errorNYI(vd->getSourceRange(), "static initializer");
722 }
723 } else {
724 init = initializer;
725 // We don't need an initializer, so remove the entry for the delayed
726 // initializer position (just in case this entry was delayed) if we
727 // also don't need to register a destructor.
728 if (vd->needsDestruction(astContext) == QualType::DK_cxx_destructor)
729 errorNYI(vd->getSourceRange(), "delayed destructor");
730 }
731 }
732
733 mlir::Type initType;
734 if (mlir::isa<mlir::SymbolRefAttr>(init)) {
735 errorNYI(vd->getSourceRange(), "global initializer is a symbol reference");
736 return;
737 } else {
738 assert(mlir::isa<mlir::TypedAttr>(init) && "This should have a type");
739 auto typedInitAttr = mlir::cast<mlir::TypedAttr>(init);
740 initType = typedInitAttr.getType();
741 }
742 assert(!mlir::isa<mlir::NoneType>(initType) && "Should have a type by now");
743
744 cir::GlobalOp gv =
745 getOrCreateCIRGlobal(vd, initType, ForDefinition_t(!isTentative));
746 // TODO(cir): Strip off pointer casts from Entry if we get them?
747
748 if (!gv || gv.getSymType() != initType) {
749 errorNYI(vd->getSourceRange(), "global initializer with type mismatch");
750 return;
751 }
752
753 assert(!cir::MissingFeatures::maybeHandleStaticInExternC());
754
755 if (vd->hasAttr<AnnotateAttr>()) {
756 errorNYI(vd->getSourceRange(), "annotate global variable");
757 }
758
759 if (langOpts.CUDA) {
760 errorNYI(vd->getSourceRange(), "CUDA global variable");
761 }
762
763 // Set initializer and finalize emission
764 CIRGenModule::setInitializer(gv, init);
765 if (emitter)
766 emitter->finalize(gv);
767
768 // Set CIR's linkage type as appropriate.
769 cir::GlobalLinkageKind linkage =
770 getCIRLinkageVarDefinition(vd, /*IsConstant=*/false);
771
772 // Set CIR linkage and DLL storage class.
773 gv.setLinkage(linkage);
774 // FIXME(cir): setLinkage should likely set MLIR's visibility automatically.
775 gv.setVisibility(getMLIRVisibilityFromCIRLinkage(linkage));
776 assert(!cir::MissingFeatures::opGlobalDLLImportExport());
777 if (linkage == cir::GlobalLinkageKind::CommonLinkage)
778 errorNYI(initExpr->getSourceRange(), "common linkage");
779
780 setNonAliasAttributes(vd, gv);
781
782 assert(!cir::MissingFeatures::opGlobalThreadLocal());
783
784 maybeSetTrivialComdat(*vd, gv);
785 }
786
emitGlobalDefinition(clang::GlobalDecl gd,mlir::Operation * op)787 void CIRGenModule::emitGlobalDefinition(clang::GlobalDecl gd,
788 mlir::Operation *op) {
789 const auto *decl = cast<ValueDecl>(gd.getDecl());
790 if (const auto *fd = dyn_cast<FunctionDecl>(decl)) {
791 // TODO(CIR): Skip generation of CIR for functions with available_externally
792 // linkage at -O0.
793
794 if (const auto *method = dyn_cast<CXXMethodDecl>(decl)) {
795 // Make sure to emit the definition(s) before we emit the thunks. This is
796 // necessary for the generation of certain thunks.
797 if (isa<CXXConstructorDecl>(method) || isa<CXXDestructorDecl>(method))
798 abi->emitCXXStructor(gd);
799 else if (fd->isMultiVersion())
800 errorNYI(method->getSourceRange(), "multiversion functions");
801 else
802 emitGlobalFunctionDefinition(gd, op);
803
804 if (method->isVirtual())
805 errorNYI(method->getSourceRange(), "virtual member function");
806
807 return;
808 }
809
810 if (fd->isMultiVersion())
811 errorNYI(fd->getSourceRange(), "multiversion functions");
812 emitGlobalFunctionDefinition(gd, op);
813 return;
814 }
815
816 if (const auto *vd = dyn_cast<VarDecl>(decl))
817 return emitGlobalVarDefinition(vd, !vd->hasDefinition());
818
819 llvm_unreachable("Invalid argument to CIRGenModule::emitGlobalDefinition");
820 }
821
822 mlir::Attribute
getConstantArrayFromStringLiteral(const StringLiteral * e)823 CIRGenModule::getConstantArrayFromStringLiteral(const StringLiteral *e) {
824 assert(!e->getType()->isPointerType() && "Strings are always arrays");
825
826 // Don't emit it as the address of the string, emit the string data itself
827 // as an inline array.
828 if (e->getCharByteWidth() == 1) {
829 SmallString<64> str(e->getString());
830
831 // Resize the string to the right size, which is indicated by its type.
832 const ConstantArrayType *cat =
833 astContext.getAsConstantArrayType(e->getType());
834 uint64_t finalSize = cat->getZExtSize();
835 str.resize(finalSize);
836
837 mlir::Type eltTy = convertType(cat->getElementType());
838 return builder.getString(str, eltTy, finalSize);
839 }
840
841 errorNYI(e->getSourceRange(),
842 "getConstantArrayFromStringLiteral: wide characters");
843 return mlir::Attribute();
844 }
845
supportsCOMDAT() const846 bool CIRGenModule::supportsCOMDAT() const {
847 return getTriple().supportsCOMDAT();
848 }
849
shouldBeInCOMDAT(CIRGenModule & cgm,const Decl & d)850 static bool shouldBeInCOMDAT(CIRGenModule &cgm, const Decl &d) {
851 if (!cgm.supportsCOMDAT())
852 return false;
853
854 if (d.hasAttr<SelectAnyAttr>())
855 return true;
856
857 GVALinkage linkage;
858 if (auto *vd = dyn_cast<VarDecl>(&d))
859 linkage = cgm.getASTContext().GetGVALinkageForVariable(vd);
860 else
861 linkage =
862 cgm.getASTContext().GetGVALinkageForFunction(cast<FunctionDecl>(&d));
863
864 switch (linkage) {
865 case clang::GVA_Internal:
866 case clang::GVA_AvailableExternally:
867 case clang::GVA_StrongExternal:
868 return false;
869 case clang::GVA_DiscardableODR:
870 case clang::GVA_StrongODR:
871 return true;
872 }
873 llvm_unreachable("No such linkage");
874 }
875
maybeSetTrivialComdat(const Decl & d,mlir::Operation * op)876 void CIRGenModule::maybeSetTrivialComdat(const Decl &d, mlir::Operation *op) {
877 if (!shouldBeInCOMDAT(*this, d))
878 return;
879 if (auto globalOp = dyn_cast_or_null<cir::GlobalOp>(op)) {
880 globalOp.setComdat(true);
881 } else {
882 auto funcOp = cast<cir::FuncOp>(op);
883 funcOp.setComdat(true);
884 }
885 }
886
updateCompletedType(const TagDecl * td)887 void CIRGenModule::updateCompletedType(const TagDecl *td) {
888 // Make sure that this type is translated.
889 genTypes.updateCompletedType(td);
890 }
891
addReplacement(StringRef name,mlir::Operation * op)892 void CIRGenModule::addReplacement(StringRef name, mlir::Operation *op) {
893 replacements[name] = op;
894 }
895
replacePointerTypeArgs(cir::FuncOp oldF,cir::FuncOp newF)896 void CIRGenModule::replacePointerTypeArgs(cir::FuncOp oldF, cir::FuncOp newF) {
897 std::optional<mlir::SymbolTable::UseRange> optionalUseRange =
898 oldF.getSymbolUses(theModule);
899 if (!optionalUseRange)
900 return;
901
902 for (const mlir::SymbolTable::SymbolUse &u : *optionalUseRange) {
903 // CallTryOp only shows up after FlattenCFG.
904 auto call = mlir::dyn_cast<cir::CallOp>(u.getUser());
905 if (!call)
906 continue;
907
908 for (const auto [argOp, fnArgType] :
909 llvm::zip(call.getArgs(), newF.getFunctionType().getInputs())) {
910 if (argOp.getType() == fnArgType)
911 continue;
912
913 // The purpose of this entire function is to insert bitcasts in the case
914 // where these types don't match, but I haven't seen a case where that
915 // happens.
916 errorNYI(call.getLoc(), "replace call with mismatched types");
917 }
918 }
919 }
920
applyReplacements()921 void CIRGenModule::applyReplacements() {
922 for (auto &i : replacements) {
923 StringRef mangledName = i.first();
924 mlir::Operation *replacement = i.second;
925 mlir::Operation *entry = getGlobalValue(mangledName);
926 if (!entry)
927 continue;
928 assert(isa<cir::FuncOp>(entry) && "expected function");
929 auto oldF = cast<cir::FuncOp>(entry);
930 auto newF = dyn_cast<cir::FuncOp>(replacement);
931 if (!newF) {
932 // In classic codegen, this can be a global alias, a bitcast, or a GEP.
933 errorNYI(replacement->getLoc(), "replacement is not a function");
934 continue;
935 }
936
937 // LLVM has opaque pointer but CIR not. So we may have to handle these
938 // different pointer types when performing replacement.
939 replacePointerTypeArgs(oldF, newF);
940
941 // Replace old with new, but keep the old order.
942 if (oldF.replaceAllSymbolUses(newF.getSymNameAttr(), theModule).failed())
943 llvm_unreachable("internal error, cannot RAUW symbol");
944 if (newF) {
945 newF->moveBefore(oldF);
946 oldF->erase();
947 }
948 }
949 }
950
951 // TODO(CIR): this could be a common method between LLVM codegen.
isVarDeclStrongDefinition(const ASTContext & astContext,CIRGenModule & cgm,const VarDecl * vd,bool noCommon)952 static bool isVarDeclStrongDefinition(const ASTContext &astContext,
953 CIRGenModule &cgm, const VarDecl *vd,
954 bool noCommon) {
955 // Don't give variables common linkage if -fno-common was specified unless it
956 // was overridden by a NoCommon attribute.
957 if ((noCommon || vd->hasAttr<NoCommonAttr>()) && !vd->hasAttr<CommonAttr>())
958 return true;
959
960 // C11 6.9.2/2:
961 // A declaration of an identifier for an object that has file scope without
962 // an initializer, and without a storage-class specifier or with the
963 // storage-class specifier static, constitutes a tentative definition.
964 if (vd->getInit() || vd->hasExternalStorage())
965 return true;
966
967 // A variable cannot be both common and exist in a section.
968 if (vd->hasAttr<SectionAttr>())
969 return true;
970
971 // A variable cannot be both common and exist in a section.
972 // We don't try to determine which is the right section in the front-end.
973 // If no specialized section name is applicable, it will resort to default.
974 if (vd->hasAttr<PragmaClangBSSSectionAttr>() ||
975 vd->hasAttr<PragmaClangDataSectionAttr>() ||
976 vd->hasAttr<PragmaClangRelroSectionAttr>() ||
977 vd->hasAttr<PragmaClangRodataSectionAttr>())
978 return true;
979
980 // Thread local vars aren't considered common linkage.
981 if (vd->getTLSKind())
982 return true;
983
984 // Tentative definitions marked with WeakImportAttr are true definitions.
985 if (vd->hasAttr<WeakImportAttr>())
986 return true;
987
988 // A variable cannot be both common and exist in a comdat.
989 if (shouldBeInCOMDAT(cgm, *vd))
990 return true;
991
992 // Declarations with a required alignment do not have common linkage in MSVC
993 // mode.
994 if (astContext.getTargetInfo().getCXXABI().isMicrosoft()) {
995 if (vd->hasAttr<AlignedAttr>())
996 return true;
997 QualType varType = vd->getType();
998 if (astContext.isAlignmentRequired(varType))
999 return true;
1000
1001 if (const auto *rt = varType->getAs<RecordType>()) {
1002 const RecordDecl *rd = rt->getDecl();
1003 for (const FieldDecl *fd : rd->fields()) {
1004 if (fd->isBitField())
1005 continue;
1006 if (fd->hasAttr<AlignedAttr>())
1007 return true;
1008 if (astContext.isAlignmentRequired(fd->getType()))
1009 return true;
1010 }
1011 }
1012 }
1013
1014 // Microsoft's link.exe doesn't support alignments greater than 32 bytes for
1015 // common symbols, so symbols with greater alignment requirements cannot be
1016 // common.
1017 // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two
1018 // alignments for common symbols via the aligncomm directive, so this
1019 // restriction only applies to MSVC environments.
1020 if (astContext.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() &&
1021 astContext.getTypeAlignIfKnown(vd->getType()) >
1022 astContext.toBits(CharUnits::fromQuantity(32)))
1023 return true;
1024
1025 return false;
1026 }
1027
getCIRLinkageForDeclarator(const DeclaratorDecl * dd,GVALinkage linkage,bool isConstantVariable)1028 cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator(
1029 const DeclaratorDecl *dd, GVALinkage linkage, bool isConstantVariable) {
1030 if (linkage == GVA_Internal)
1031 return cir::GlobalLinkageKind::InternalLinkage;
1032
1033 if (dd->hasAttr<WeakAttr>()) {
1034 if (isConstantVariable)
1035 return cir::GlobalLinkageKind::WeakODRLinkage;
1036 return cir::GlobalLinkageKind::WeakAnyLinkage;
1037 }
1038
1039 if (const auto *fd = dd->getAsFunction())
1040 if (fd->isMultiVersion() && linkage == GVA_AvailableExternally)
1041 return cir::GlobalLinkageKind::LinkOnceAnyLinkage;
1042
1043 // We are guaranteed to have a strong definition somewhere else,
1044 // so we can use available_externally linkage.
1045 if (linkage == GVA_AvailableExternally)
1046 return cir::GlobalLinkageKind::AvailableExternallyLinkage;
1047
1048 // Note that Apple's kernel linker doesn't support symbol
1049 // coalescing, so we need to avoid linkonce and weak linkages there.
1050 // Normally, this means we just map to internal, but for explicit
1051 // instantiations we'll map to external.
1052
1053 // In C++, the compiler has to emit a definition in every translation unit
1054 // that references the function. We should use linkonce_odr because
1055 // a) if all references in this translation unit are optimized away, we
1056 // don't need to codegen it. b) if the function persists, it needs to be
1057 // merged with other definitions. c) C++ has the ODR, so we know the
1058 // definition is dependable.
1059 if (linkage == GVA_DiscardableODR)
1060 return !astContext.getLangOpts().AppleKext
1061 ? cir::GlobalLinkageKind::LinkOnceODRLinkage
1062 : cir::GlobalLinkageKind::InternalLinkage;
1063
1064 // An explicit instantiation of a template has weak linkage, since
1065 // explicit instantiations can occur in multiple translation units
1066 // and must all be equivalent. However, we are not allowed to
1067 // throw away these explicit instantiations.
1068 //
1069 // CUDA/HIP: For -fno-gpu-rdc case, device code is limited to one TU,
1070 // so say that CUDA templates are either external (for kernels) or internal.
1071 // This lets llvm perform aggressive inter-procedural optimizations. For
1072 // -fgpu-rdc case, device function calls across multiple TU's are allowed,
1073 // therefore we need to follow the normal linkage paradigm.
1074 if (linkage == GVA_StrongODR) {
1075 if (getLangOpts().AppleKext)
1076 return cir::GlobalLinkageKind::ExternalLinkage;
1077 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
1078 !getLangOpts().GPURelocatableDeviceCode)
1079 return dd->hasAttr<CUDAGlobalAttr>()
1080 ? cir::GlobalLinkageKind::ExternalLinkage
1081 : cir::GlobalLinkageKind::InternalLinkage;
1082 return cir::GlobalLinkageKind::WeakODRLinkage;
1083 }
1084
1085 // C++ doesn't have tentative definitions and thus cannot have common
1086 // linkage.
1087 if (!getLangOpts().CPlusPlus && isa<VarDecl>(dd) &&
1088 !isVarDeclStrongDefinition(astContext, *this, cast<VarDecl>(dd),
1089 getCodeGenOpts().NoCommon)) {
1090 errorNYI(dd->getBeginLoc(), "common linkage", dd->getDeclKindName());
1091 return cir::GlobalLinkageKind::CommonLinkage;
1092 }
1093
1094 // selectany symbols are externally visible, so use weak instead of
1095 // linkonce. MSVC optimizes away references to const selectany globals, so
1096 // all definitions should be the same and ODR linkage should be used.
1097 // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx
1098 if (dd->hasAttr<SelectAnyAttr>())
1099 return cir::GlobalLinkageKind::WeakODRLinkage;
1100
1101 // Otherwise, we have strong external linkage.
1102 assert(linkage == GVA_StrongExternal);
1103 return cir::GlobalLinkageKind::ExternalLinkage;
1104 }
1105
1106 cir::GlobalLinkageKind
getCIRLinkageVarDefinition(const VarDecl * vd,bool isConstant)1107 CIRGenModule::getCIRLinkageVarDefinition(const VarDecl *vd, bool isConstant) {
1108 assert(!isConstant && "constant variables NYI");
1109 GVALinkage linkage = astContext.GetGVALinkageForVariable(vd);
1110 return getCIRLinkageForDeclarator(vd, linkage, isConstant);
1111 }
1112
getFunctionLinkage(GlobalDecl gd)1113 cir::GlobalLinkageKind CIRGenModule::getFunctionLinkage(GlobalDecl gd) {
1114 const auto *d = cast<FunctionDecl>(gd.getDecl());
1115
1116 GVALinkage linkage = astContext.GetGVALinkageForFunction(d);
1117
1118 if (const auto *dtor = dyn_cast<CXXDestructorDecl>(d))
1119 return getCXXABI().getCXXDestructorLinkage(linkage, dtor, gd.getDtorType());
1120
1121 return getCIRLinkageForDeclarator(d, linkage, /*isConstantVariable=*/false);
1122 }
1123
1124 static cir::GlobalOp
generateStringLiteral(mlir::Location loc,mlir::TypedAttr c,cir::GlobalLinkageKind lt,CIRGenModule & cgm,StringRef globalName,CharUnits alignment)1125 generateStringLiteral(mlir::Location loc, mlir::TypedAttr c,
1126 cir::GlobalLinkageKind lt, CIRGenModule &cgm,
1127 StringRef globalName, CharUnits alignment) {
1128 assert(!cir::MissingFeatures::addressSpace());
1129
1130 // Create a global variable for this string
1131 // FIXME(cir): check for insertion point in module level.
1132 cir::GlobalOp gv =
1133 CIRGenModule::createGlobalOp(cgm, loc, globalName, c.getType());
1134
1135 // Set up extra information and add to the module
1136 gv.setAlignmentAttr(cgm.getSize(alignment));
1137 gv.setLinkageAttr(
1138 cir::GlobalLinkageKindAttr::get(cgm.getBuilder().getContext(), lt));
1139 assert(!cir::MissingFeatures::opGlobalThreadLocal());
1140 assert(!cir::MissingFeatures::opGlobalUnnamedAddr());
1141 CIRGenModule::setInitializer(gv, c);
1142 if (gv.isWeakForLinker()) {
1143 assert(cgm.supportsCOMDAT() && "Only COFF uses weak string literals");
1144 gv.setComdat(true);
1145 }
1146 cgm.setDSOLocal(static_cast<mlir::Operation *>(gv));
1147 return gv;
1148 }
1149
1150 // LLVM IR automatically uniques names when new llvm::GlobalVariables are
1151 // created. This is handy, for example, when creating globals for string
1152 // literals. Since we don't do that when creating cir::GlobalOp's, we need
1153 // a mechanism to generate a unique name in advance.
1154 //
1155 // For now, this mechanism is only used in cases where we know that the
1156 // name is compiler-generated, so we don't use the MLIR symbol table for
1157 // the lookup.
getUniqueGlobalName(const std::string & baseName)1158 std::string CIRGenModule::getUniqueGlobalName(const std::string &baseName) {
1159 // If this is the first time we've generated a name for this basename, use
1160 // it as is and start a counter for this base name.
1161 auto it = cgGlobalNames.find(baseName);
1162 if (it == cgGlobalNames.end()) {
1163 cgGlobalNames[baseName] = 1;
1164 return baseName;
1165 }
1166
1167 std::string result =
1168 baseName + "." + std::to_string(cgGlobalNames[baseName]++);
1169 // There should not be any symbol with this name in the module.
1170 assert(!mlir::SymbolTable::lookupSymbolIn(theModule, result));
1171 return result;
1172 }
1173
1174 /// Return a pointer to a constant array for the given string literal.
getGlobalForStringLiteral(const StringLiteral * s,StringRef name)1175 cir::GlobalOp CIRGenModule::getGlobalForStringLiteral(const StringLiteral *s,
1176 StringRef name) {
1177 CharUnits alignment =
1178 astContext.getAlignOfGlobalVarInChars(s->getType(), /*VD=*/nullptr);
1179
1180 mlir::Attribute c = getConstantArrayFromStringLiteral(s);
1181
1182 if (getLangOpts().WritableStrings) {
1183 errorNYI(s->getSourceRange(),
1184 "getGlobalForStringLiteral: Writable strings");
1185 }
1186
1187 // Mangle the string literal if that's how the ABI merges duplicate strings.
1188 // Don't do it if they are writable, since we don't want writes in one TU to
1189 // affect strings in another.
1190 if (getCXXABI().getMangleContext().shouldMangleStringLiteral(s) &&
1191 !getLangOpts().WritableStrings) {
1192 errorNYI(s->getSourceRange(),
1193 "getGlobalForStringLiteral: mangle string literals");
1194 }
1195
1196 // Unlike LLVM IR, CIR doesn't automatically unique names for globals, so
1197 // we need to do that explicitly.
1198 std::string uniqueName = getUniqueGlobalName(name.str());
1199 mlir::Location loc = getLoc(s->getSourceRange());
1200 auto typedC = llvm::cast<mlir::TypedAttr>(c);
1201 cir::GlobalOp gv =
1202 generateStringLiteral(loc, typedC, cir::GlobalLinkageKind::PrivateLinkage,
1203 *this, uniqueName, alignment);
1204 setDSOLocal(static_cast<mlir::Operation *>(gv));
1205
1206 assert(!cir::MissingFeatures::sanitizers());
1207
1208 return gv;
1209 }
1210
emitDeclContext(const DeclContext * dc)1211 void CIRGenModule::emitDeclContext(const DeclContext *dc) {
1212 for (Decl *decl : dc->decls()) {
1213 // Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope
1214 // are themselves considered "top-level", so EmitTopLevelDecl on an
1215 // ObjCImplDecl does not recursively visit them. We need to do that in
1216 // case they're nested inside another construct (LinkageSpecDecl /
1217 // ExportDecl) that does stop them from being considered "top-level".
1218 if (auto *oid = dyn_cast<ObjCImplDecl>(decl))
1219 errorNYI(oid->getSourceRange(), "emitDeclConext: ObjCImplDecl");
1220
1221 emitTopLevelDecl(decl);
1222 }
1223 }
1224
1225 // Emit code for a single top level declaration.
emitTopLevelDecl(Decl * decl)1226 void CIRGenModule::emitTopLevelDecl(Decl *decl) {
1227
1228 // Ignore dependent declarations.
1229 if (decl->isTemplated())
1230 return;
1231
1232 switch (decl->getKind()) {
1233 default:
1234 errorNYI(decl->getBeginLoc(), "declaration of kind",
1235 decl->getDeclKindName());
1236 break;
1237
1238 case Decl::CXXMethod:
1239 case Decl::Function: {
1240 auto *fd = cast<FunctionDecl>(decl);
1241 // Consteval functions shouldn't be emitted.
1242 if (!fd->isConsteval())
1243 emitGlobal(fd);
1244 break;
1245 }
1246
1247 case Decl::Var: {
1248 auto *vd = cast<VarDecl>(decl);
1249 emitGlobal(vd);
1250 break;
1251 }
1252 case Decl::OpenACCRoutine:
1253 emitGlobalOpenACCDecl(cast<OpenACCRoutineDecl>(decl));
1254 break;
1255 case Decl::OpenACCDeclare:
1256 emitGlobalOpenACCDecl(cast<OpenACCDeclareDecl>(decl));
1257 break;
1258 case Decl::Enum:
1259 case Decl::Using: // using X; [C++]
1260 case Decl::UsingDirective: // using namespace X; [C++]
1261 case Decl::UsingEnum: // using enum X; [C++]
1262 case Decl::NamespaceAlias:
1263 case Decl::Typedef:
1264 case Decl::TypeAlias: // using foo = bar; [C++11]
1265 case Decl::Record:
1266 assert(!cir::MissingFeatures::generateDebugInfo());
1267 break;
1268
1269 // No code generation needed.
1270 case Decl::UsingShadow:
1271 case Decl::Empty:
1272 break;
1273
1274 case Decl::CXXConstructor:
1275 getCXXABI().emitCXXConstructors(cast<CXXConstructorDecl>(decl));
1276 break;
1277 case Decl::CXXDestructor:
1278 getCXXABI().emitCXXDestructors(cast<CXXDestructorDecl>(decl));
1279 break;
1280
1281 // C++ Decls
1282 case Decl::LinkageSpec:
1283 case Decl::Namespace:
1284 emitDeclContext(Decl::castToDeclContext(decl));
1285 break;
1286
1287 case Decl::ClassTemplateSpecialization:
1288 case Decl::CXXRecord:
1289 assert(!cir::MissingFeatures::generateDebugInfo());
1290 assert(!cir::MissingFeatures::cxxRecordStaticMembers());
1291 break;
1292 }
1293 }
1294
setInitializer(cir::GlobalOp & op,mlir::Attribute value)1295 void CIRGenModule::setInitializer(cir::GlobalOp &op, mlir::Attribute value) {
1296 // Recompute visibility when updating initializer.
1297 op.setInitialValueAttr(value);
1298 assert(!cir::MissingFeatures::opGlobalVisibility());
1299 }
1300
getAddrAndTypeOfCXXStructor(GlobalDecl gd,const CIRGenFunctionInfo * fnInfo,cir::FuncType fnType,bool dontDefer,ForDefinition_t isForDefinition)1301 std::pair<cir::FuncType, cir::FuncOp> CIRGenModule::getAddrAndTypeOfCXXStructor(
1302 GlobalDecl gd, const CIRGenFunctionInfo *fnInfo, cir::FuncType fnType,
1303 bool dontDefer, ForDefinition_t isForDefinition) {
1304 auto *md = cast<CXXMethodDecl>(gd.getDecl());
1305
1306 if (isa<CXXDestructorDecl>(md)) {
1307 // Always alias equivalent complete destructors to base destructors in the
1308 // MS ABI.
1309 if (getTarget().getCXXABI().isMicrosoft() &&
1310 gd.getDtorType() == Dtor_Complete &&
1311 md->getParent()->getNumVBases() == 0)
1312 errorNYI(md->getSourceRange(),
1313 "getAddrAndTypeOfCXXStructor: MS ABI complete destructor");
1314 }
1315
1316 if (!fnType) {
1317 if (!fnInfo)
1318 fnInfo = &getTypes().arrangeCXXStructorDeclaration(gd);
1319 fnType = getTypes().getFunctionType(*fnInfo);
1320 }
1321
1322 auto fn = getOrCreateCIRFunction(getMangledName(gd), fnType, gd,
1323 /*ForVtable=*/false, dontDefer,
1324 /*IsThunk=*/false, isForDefinition);
1325
1326 return {fnType, fn};
1327 }
1328
getAddrOfFunction(clang::GlobalDecl gd,mlir::Type funcType,bool forVTable,bool dontDefer,ForDefinition_t isForDefinition)1329 cir::FuncOp CIRGenModule::getAddrOfFunction(clang::GlobalDecl gd,
1330 mlir::Type funcType, bool forVTable,
1331 bool dontDefer,
1332 ForDefinition_t isForDefinition) {
1333 assert(!cast<FunctionDecl>(gd.getDecl())->isConsteval() &&
1334 "consteval function should never be emitted");
1335
1336 if (!funcType) {
1337 const auto *fd = cast<FunctionDecl>(gd.getDecl());
1338 funcType = convertType(fd->getType());
1339 }
1340
1341 // Devirtualized destructor calls may come through here instead of via
1342 // getAddrOfCXXStructor. Make sure we use the MS ABI base destructor instead
1343 // of the complete destructor when necessary.
1344 if (const auto *dd = dyn_cast<CXXDestructorDecl>(gd.getDecl())) {
1345 if (getTarget().getCXXABI().isMicrosoft() &&
1346 gd.getDtorType() == Dtor_Complete &&
1347 dd->getParent()->getNumVBases() == 0)
1348 errorNYI(dd->getSourceRange(),
1349 "getAddrOfFunction: MS ABI complete destructor");
1350 }
1351
1352 StringRef mangledName = getMangledName(gd);
1353 cir::FuncOp func =
1354 getOrCreateCIRFunction(mangledName, funcType, gd, forVTable, dontDefer,
1355 /*isThunk=*/false, isForDefinition);
1356 return func;
1357 }
1358
getMangledNameImpl(CIRGenModule & cgm,GlobalDecl gd,const NamedDecl * nd)1359 static std::string getMangledNameImpl(CIRGenModule &cgm, GlobalDecl gd,
1360 const NamedDecl *nd) {
1361 SmallString<256> buffer;
1362
1363 llvm::raw_svector_ostream out(buffer);
1364 MangleContext &mc = cgm.getCXXABI().getMangleContext();
1365
1366 assert(!cir::MissingFeatures::moduleNameHash());
1367
1368 if (mc.shouldMangleDeclName(nd)) {
1369 mc.mangleName(gd.getWithDecl(nd), out);
1370 } else {
1371 IdentifierInfo *ii = nd->getIdentifier();
1372 assert(ii && "Attempt to mangle unnamed decl.");
1373
1374 const auto *fd = dyn_cast<FunctionDecl>(nd);
1375 if (fd &&
1376 fd->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
1377 cgm.errorNYI(nd->getSourceRange(), "getMangledName: X86RegCall");
1378 } else if (fd && fd->hasAttr<CUDAGlobalAttr>() &&
1379 gd.getKernelReferenceKind() == KernelReferenceKind::Stub) {
1380 cgm.errorNYI(nd->getSourceRange(), "getMangledName: CUDA device stub");
1381 }
1382 out << ii->getName();
1383 }
1384
1385 // Check if the module name hash should be appended for internal linkage
1386 // symbols. This should come before multi-version target suffixes are
1387 // appendded. This is to keep the name and module hash suffix of the internal
1388 // linkage function together. The unique suffix should only be added when name
1389 // mangling is done to make sure that the final name can be properly
1390 // demangled. For example, for C functions without prototypes, name mangling
1391 // is not done and the unique suffix should not be appended then.
1392 assert(!cir::MissingFeatures::moduleNameHash());
1393
1394 if (const auto *fd = dyn_cast<FunctionDecl>(nd)) {
1395 if (fd->isMultiVersion()) {
1396 cgm.errorNYI(nd->getSourceRange(),
1397 "getMangledName: multi-version functions");
1398 }
1399 }
1400 if (cgm.getLangOpts().GPURelocatableDeviceCode) {
1401 cgm.errorNYI(nd->getSourceRange(),
1402 "getMangledName: GPU relocatable device code");
1403 }
1404
1405 return std::string(out.str());
1406 }
1407
getMangledName(GlobalDecl gd)1408 StringRef CIRGenModule::getMangledName(GlobalDecl gd) {
1409 GlobalDecl canonicalGd = gd.getCanonicalDecl();
1410
1411 // Some ABIs don't have constructor variants. Make sure that base and complete
1412 // constructors get mangled the same.
1413 if (const auto *cd = dyn_cast<CXXConstructorDecl>(canonicalGd.getDecl())) {
1414 if (!getTarget().getCXXABI().hasConstructorVariants()) {
1415 errorNYI(cd->getSourceRange(),
1416 "getMangledName: C++ constructor without variants");
1417 return cast<NamedDecl>(gd.getDecl())->getIdentifier()->getName();
1418 }
1419 }
1420
1421 // Keep the first result in the case of a mangling collision.
1422 const auto *nd = cast<NamedDecl>(gd.getDecl());
1423 std::string mangledName = getMangledNameImpl(*this, gd, nd);
1424
1425 auto result = manglings.insert(std::make_pair(mangledName, gd));
1426 return mangledDeclNames[canonicalGd] = result.first->first();
1427 }
1428
emitTentativeDefinition(const VarDecl * d)1429 void CIRGenModule::emitTentativeDefinition(const VarDecl *d) {
1430 assert(!d->getInit() && "Cannot emit definite definitions here!");
1431
1432 StringRef mangledName = getMangledName(d);
1433 mlir::Operation *gv = getGlobalValue(mangledName);
1434
1435 // If we already have a definition, not declaration, with the same mangled
1436 // name, emitting of declaration is not required (and would actually overwrite
1437 // the emitted definition).
1438 if (gv && !mlir::cast<cir::GlobalOp>(gv).isDeclaration())
1439 return;
1440
1441 // If we have not seen a reference to this variable yet, place it into the
1442 // deferred declarations table to be emitted if needed later.
1443 if (!mustBeEmitted(d) && !gv) {
1444 deferredDecls[mangledName] = d;
1445 return;
1446 }
1447
1448 // The tentative definition is the only definition.
1449 emitGlobalVarDefinition(d);
1450 }
1451
mustBeEmitted(const ValueDecl * global)1452 bool CIRGenModule::mustBeEmitted(const ValueDecl *global) {
1453 // Never defer when EmitAllDecls is specified.
1454 if (langOpts.EmitAllDecls)
1455 return true;
1456
1457 const auto *vd = dyn_cast<VarDecl>(global);
1458 if (vd &&
1459 ((codeGenOpts.KeepPersistentStorageVariables &&
1460 (vd->getStorageDuration() == SD_Static ||
1461 vd->getStorageDuration() == SD_Thread)) ||
1462 (codeGenOpts.KeepStaticConsts && vd->getStorageDuration() == SD_Static &&
1463 vd->getType().isConstQualified())))
1464 return true;
1465
1466 return getASTContext().DeclMustBeEmitted(global);
1467 }
1468
mayBeEmittedEagerly(const ValueDecl * global)1469 bool CIRGenModule::mayBeEmittedEagerly(const ValueDecl *global) {
1470 // In OpenMP 5.0 variables and function may be marked as
1471 // device_type(host/nohost) and we should not emit them eagerly unless we sure
1472 // that they must be emitted on the host/device. To be sure we need to have
1473 // seen a declare target with an explicit mentioning of the function, we know
1474 // we have if the level of the declare target attribute is -1. Note that we
1475 // check somewhere else if we should emit this at all.
1476 if (langOpts.OpenMP >= 50 && !langOpts.OpenMPSimd) {
1477 std::optional<OMPDeclareTargetDeclAttr *> activeAttr =
1478 OMPDeclareTargetDeclAttr::getActiveAttr(global);
1479 if (!activeAttr || (*activeAttr)->getLevel() != (unsigned)-1)
1480 return false;
1481 }
1482
1483 const auto *fd = dyn_cast<FunctionDecl>(global);
1484 if (fd) {
1485 // Implicit template instantiations may change linkage if they are later
1486 // explicitly instantiated, so they should not be emitted eagerly.
1487 if (fd->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
1488 return false;
1489 // Defer until all versions have been semantically checked.
1490 if (fd->hasAttr<TargetVersionAttr>() && !fd->isMultiVersion())
1491 return false;
1492 if (langOpts.SYCLIsDevice) {
1493 errorNYI(fd->getSourceRange(), "mayBeEmittedEagerly: SYCL");
1494 return false;
1495 }
1496 }
1497 const auto *vd = dyn_cast<VarDecl>(global);
1498 if (vd)
1499 if (astContext.getInlineVariableDefinitionKind(vd) ==
1500 ASTContext::InlineVariableDefinitionKind::WeakUnknown)
1501 // A definition of an inline constexpr static data member may change
1502 // linkage later if it's redeclared outside the class.
1503 return false;
1504
1505 // If OpenMP is enabled and threadprivates must be generated like TLS, delay
1506 // codegen for global variables, because they may be marked as threadprivate.
1507 if (langOpts.OpenMP && langOpts.OpenMPUseTLS &&
1508 astContext.getTargetInfo().isTLSSupported() && isa<VarDecl>(global) &&
1509 !global->getType().isConstantStorage(astContext, false, false) &&
1510 !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(global))
1511 return false;
1512
1513 assert((fd || vd) &&
1514 "Only FunctionDecl and VarDecl should hit this path so far.");
1515 return true;
1516 }
1517
shouldAssumeDSOLocal(const CIRGenModule & cgm,cir::CIRGlobalValueInterface gv)1518 static bool shouldAssumeDSOLocal(const CIRGenModule &cgm,
1519 cir::CIRGlobalValueInterface gv) {
1520 if (gv.hasLocalLinkage())
1521 return true;
1522
1523 if (!gv.hasDefaultVisibility() && !gv.hasExternalWeakLinkage())
1524 return true;
1525
1526 // DLLImport explicitly marks the GV as external.
1527 // so it shouldn't be dso_local
1528 // But we don't have the info set now
1529 assert(!cir::MissingFeatures::opGlobalDLLImportExport());
1530
1531 const llvm::Triple &tt = cgm.getTriple();
1532 const CodeGenOptions &cgOpts = cgm.getCodeGenOpts();
1533 if (tt.isWindowsGNUEnvironment()) {
1534 // In MinGW, variables without DLLImport can still be automatically
1535 // imported from a DLL by the linker; don't mark variables that
1536 // potentially could come from another DLL as DSO local.
1537
1538 // With EmulatedTLS, TLS variables can be autoimported from other DLLs
1539 // (and this actually happens in the public interface of libstdc++), so
1540 // such variables can't be marked as DSO local. (Native TLS variables
1541 // can't be dllimported at all, though.)
1542 cgm.errorNYI("shouldAssumeDSOLocal: MinGW");
1543 }
1544
1545 // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols
1546 // remain unresolved in the link, they can be resolved to zero, which is
1547 // outside the current DSO.
1548 if (tt.isOSBinFormatCOFF() && gv.hasExternalWeakLinkage())
1549 return false;
1550
1551 // Every other GV is local on COFF.
1552 // Make an exception for windows OS in the triple: Some firmware builds use
1553 // *-win32-macho triples. This (accidentally?) produced windows relocations
1554 // without GOT tables in older clang versions; Keep this behaviour.
1555 // FIXME: even thread local variables?
1556 if (tt.isOSBinFormatCOFF() || (tt.isOSWindows() && tt.isOSBinFormatMachO()))
1557 return true;
1558
1559 // Only handle COFF and ELF for now.
1560 if (!tt.isOSBinFormatELF())
1561 return false;
1562
1563 llvm::Reloc::Model rm = cgOpts.RelocationModel;
1564 const LangOptions &lOpts = cgm.getLangOpts();
1565 if (rm != llvm::Reloc::Static && !lOpts.PIE) {
1566 // On ELF, if -fno-semantic-interposition is specified and the target
1567 // supports local aliases, there will be neither CC1
1568 // -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set
1569 // dso_local on the function if using a local alias is preferable (can avoid
1570 // PLT indirection).
1571 if (!(isa<cir::FuncOp>(gv) && gv.canBenefitFromLocalAlias()))
1572 return false;
1573 return !(lOpts.SemanticInterposition || lOpts.HalfNoSemanticInterposition);
1574 }
1575
1576 // A definition cannot be preempted from an executable.
1577 if (!gv.isDeclarationForLinker())
1578 return true;
1579
1580 // Most PIC code sequences that assume that a symbol is local cannot produce a
1581 // 0 if it turns out the symbol is undefined. While this is ABI and relocation
1582 // depended, it seems worth it to handle it here.
1583 if (rm == llvm::Reloc::PIC_ && gv.hasExternalWeakLinkage())
1584 return false;
1585
1586 // PowerPC64 prefers TOC indirection to avoid copy relocations.
1587 if (tt.isPPC64())
1588 return false;
1589
1590 if (cgOpts.DirectAccessExternalData) {
1591 // If -fdirect-access-external-data (default for -fno-pic), set dso_local
1592 // for non-thread-local variables. If the symbol is not defined in the
1593 // executable, a copy relocation will be needed at link time. dso_local is
1594 // excluded for thread-local variables because they generally don't support
1595 // copy relocations.
1596 if (auto globalOp = dyn_cast<cir::GlobalOp>(gv.getOperation())) {
1597 // Assume variables are not thread-local until that support is added.
1598 assert(!cir::MissingFeatures::opGlobalThreadLocal());
1599 return true;
1600 }
1601
1602 // -fno-pic sets dso_local on a function declaration to allow direct
1603 // accesses when taking its address (similar to a data symbol). If the
1604 // function is not defined in the executable, a canonical PLT entry will be
1605 // needed at link time. -fno-direct-access-external-data can avoid the
1606 // canonical PLT entry. We don't generalize this condition to -fpie/-fpic as
1607 // it could just cause trouble without providing perceptible benefits.
1608 if (isa<cir::FuncOp>(gv) && !cgOpts.NoPLT && rm == llvm::Reloc::Static)
1609 return true;
1610 }
1611
1612 // If we can use copy relocations we can assume it is local.
1613
1614 // Otherwise don't assume it is local.
1615
1616 return false;
1617 }
1618
setGlobalVisibility(mlir::Operation * gv,const NamedDecl * d) const1619 void CIRGenModule::setGlobalVisibility(mlir::Operation *gv,
1620 const NamedDecl *d) const {
1621 assert(!cir::MissingFeatures::opGlobalVisibility());
1622 }
1623
setDSOLocal(cir::CIRGlobalValueInterface gv) const1624 void CIRGenModule::setDSOLocal(cir::CIRGlobalValueInterface gv) const {
1625 gv.setDSOLocal(shouldAssumeDSOLocal(*this, gv));
1626 }
1627
setDSOLocal(mlir::Operation * op) const1628 void CIRGenModule::setDSOLocal(mlir::Operation *op) const {
1629 if (auto globalValue = dyn_cast<cir::CIRGlobalValueInterface>(op))
1630 setDSOLocal(globalValue);
1631 }
1632
setGVProperties(mlir::Operation * op,const NamedDecl * d) const1633 void CIRGenModule::setGVProperties(mlir::Operation *op,
1634 const NamedDecl *d) const {
1635 assert(!cir::MissingFeatures::opGlobalDLLImportExport());
1636 setGVPropertiesAux(op, d);
1637 }
1638
setGVPropertiesAux(mlir::Operation * op,const NamedDecl * d) const1639 void CIRGenModule::setGVPropertiesAux(mlir::Operation *op,
1640 const NamedDecl *d) const {
1641 setGlobalVisibility(op, d);
1642 setDSOLocal(op);
1643 assert(!cir::MissingFeatures::opGlobalPartition());
1644 }
1645
setFunctionAttributes(GlobalDecl globalDecl,cir::FuncOp func,bool isIncompleteFunction,bool isThunk)1646 void CIRGenModule::setFunctionAttributes(GlobalDecl globalDecl,
1647 cir::FuncOp func,
1648 bool isIncompleteFunction,
1649 bool isThunk) {
1650 // NOTE(cir): Original CodeGen checks if this is an intrinsic. In CIR we
1651 // represent them in dedicated ops. The correct attributes are ensured during
1652 // translation to LLVM. Thus, we don't need to check for them here.
1653
1654 assert(!cir::MissingFeatures::setFunctionAttributes());
1655 assert(!cir::MissingFeatures::setTargetAttributes());
1656
1657 // TODO(cir): This needs a lot of work to better match CodeGen. That
1658 // ultimately ends up in setGlobalVisibility, which already has the linkage of
1659 // the LLVM GV (corresponding to our FuncOp) computed, so it doesn't have to
1660 // recompute it here. This is a minimal fix for now.
1661 if (!isLocalLinkage(getFunctionLinkage(globalDecl))) {
1662 const Decl *decl = globalDecl.getDecl();
1663 func.setGlobalVisibilityAttr(getGlobalVisibilityAttrFromDecl(decl));
1664 }
1665 }
1666
getOrCreateCIRFunction(StringRef mangledName,mlir::Type funcType,GlobalDecl gd,bool forVTable,bool dontDefer,bool isThunk,ForDefinition_t isForDefinition,mlir::ArrayAttr extraAttrs)1667 cir::FuncOp CIRGenModule::getOrCreateCIRFunction(
1668 StringRef mangledName, mlir::Type funcType, GlobalDecl gd, bool forVTable,
1669 bool dontDefer, bool isThunk, ForDefinition_t isForDefinition,
1670 mlir::ArrayAttr extraAttrs) {
1671 const Decl *d = gd.getDecl();
1672
1673 if (isThunk)
1674 errorNYI(d->getSourceRange(), "getOrCreateCIRFunction: thunk");
1675
1676 // In what follows, we continue past 'errorNYI' as if nothing happened because
1677 // the rest of the implementation is better than doing nothing.
1678
1679 if (const auto *fd = cast_or_null<FunctionDecl>(d)) {
1680 // For the device mark the function as one that should be emitted.
1681 if (getLangOpts().OpenMPIsTargetDevice && fd->isDefined() && !dontDefer &&
1682 !isForDefinition)
1683 errorNYI(fd->getSourceRange(),
1684 "getOrCreateCIRFunction: OpenMP target function");
1685
1686 // Any attempts to use a MultiVersion function should result in retrieving
1687 // the iFunc instead. Name mangling will handle the rest of the changes.
1688 if (fd->isMultiVersion())
1689 errorNYI(fd->getSourceRange(), "getOrCreateCIRFunction: multi-version");
1690 }
1691
1692 // Lookup the entry, lazily creating it if necessary.
1693 mlir::Operation *entry = getGlobalValue(mangledName);
1694 if (entry) {
1695 if (!isa<cir::FuncOp>(entry))
1696 errorNYI(d->getSourceRange(), "getOrCreateCIRFunction: non-FuncOp");
1697
1698 assert(!cir::MissingFeatures::weakRefReference());
1699
1700 // Handle dropped DLL attributes.
1701 if (d && !d->hasAttr<DLLImportAttr>() && !d->hasAttr<DLLExportAttr>()) {
1702 assert(!cir::MissingFeatures::setDLLStorageClass());
1703 setDSOLocal(entry);
1704 }
1705
1706 // If there are two attempts to define the same mangled name, issue an
1707 // error.
1708 auto fn = cast<cir::FuncOp>(entry);
1709 if (isForDefinition && fn && !fn.isDeclaration()) {
1710 errorNYI(d->getSourceRange(), "Duplicate function definition");
1711 }
1712 if (fn && fn.getFunctionType() == funcType) {
1713 return fn;
1714 }
1715
1716 if (!isForDefinition) {
1717 return fn;
1718 }
1719
1720 // TODO(cir): classic codegen checks here if this is a llvm::GlobalAlias.
1721 // How will we support this?
1722 }
1723
1724 auto *funcDecl = llvm::cast_or_null<FunctionDecl>(gd.getDecl());
1725 bool invalidLoc = !funcDecl ||
1726 funcDecl->getSourceRange().getBegin().isInvalid() ||
1727 funcDecl->getSourceRange().getEnd().isInvalid();
1728 cir::FuncOp funcOp = createCIRFunction(
1729 invalidLoc ? theModule->getLoc() : getLoc(funcDecl->getSourceRange()),
1730 mangledName, mlir::cast<cir::FuncType>(funcType), funcDecl);
1731
1732 if (d)
1733 setFunctionAttributes(gd, funcOp, /*isIncompleteFunction=*/false, isThunk);
1734
1735 // 'dontDefer' actually means don't move this to the deferredDeclsToEmit list.
1736 if (dontDefer) {
1737 // TODO(cir): This assertion will need an additional condition when we
1738 // support incomplete functions.
1739 assert(funcOp.getFunctionType() == funcType);
1740 return funcOp;
1741 }
1742
1743 // All MSVC dtors other than the base dtor are linkonce_odr and delegate to
1744 // each other bottoming out wiht the base dtor. Therefore we emit non-base
1745 // dtors on usage, even if there is no dtor definition in the TU.
1746 if (isa_and_nonnull<CXXDestructorDecl>(d) &&
1747 getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(d),
1748 gd.getDtorType()))
1749 errorNYI(d->getSourceRange(), "getOrCreateCIRFunction: dtor");
1750
1751 // This is the first use or definition of a mangled name. If there is a
1752 // deferred decl with this name, remember that we need to emit it at the end
1753 // of the file.
1754 auto ddi = deferredDecls.find(mangledName);
1755 if (ddi != deferredDecls.end()) {
1756 // Move the potentially referenced deferred decl to the
1757 // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we
1758 // don't need it anymore).
1759 addDeferredDeclToEmit(ddi->second);
1760 deferredDecls.erase(ddi);
1761
1762 // Otherwise, there are cases we have to worry about where we're using a
1763 // declaration for which we must emit a definition but where we might not
1764 // find a top-level definition.
1765 // - member functions defined inline in their classes
1766 // - friend functions defined inline in some class
1767 // - special member functions with implicit definitions
1768 // If we ever change our AST traversal to walk into class methods, this
1769 // will be unnecessary.
1770 //
1771 // We also don't emit a definition for a function if it's going to be an
1772 // entry in a vtable, unless it's already marked as used.
1773 } else if (getLangOpts().CPlusPlus && d) {
1774 // Look for a declaration that's lexically in a record.
1775 for (const auto *fd = cast<FunctionDecl>(d)->getMostRecentDecl(); fd;
1776 fd = fd->getPreviousDecl()) {
1777 if (isa<CXXRecordDecl>(fd->getLexicalDeclContext())) {
1778 if (fd->doesThisDeclarationHaveABody()) {
1779 addDeferredDeclToEmit(gd.getWithDecl(fd));
1780 break;
1781 }
1782 }
1783 }
1784 }
1785
1786 return funcOp;
1787 }
1788
1789 cir::FuncOp
createCIRFunction(mlir::Location loc,StringRef name,cir::FuncType funcType,const clang::FunctionDecl * funcDecl)1790 CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name,
1791 cir::FuncType funcType,
1792 const clang::FunctionDecl *funcDecl) {
1793 cir::FuncOp func;
1794 {
1795 mlir::OpBuilder::InsertionGuard guard(builder);
1796
1797 // Some global emissions are triggered while emitting a function, e.g.
1798 // void s() { x.method() }
1799 //
1800 // Be sure to insert a new function before a current one.
1801 CIRGenFunction *cgf = this->curCGF;
1802 if (cgf)
1803 builder.setInsertionPoint(cgf->curFn);
1804
1805 func = builder.create<cir::FuncOp>(loc, name, funcType);
1806
1807 assert(!cir::MissingFeatures::opFuncAstDeclAttr());
1808 assert(!cir::MissingFeatures::opFuncNoProto());
1809
1810 assert(func.isDeclaration() && "expected empty body");
1811
1812 // A declaration gets private visibility by default, but external linkage
1813 // as the default linkage.
1814 func.setLinkageAttr(cir::GlobalLinkageKindAttr::get(
1815 &getMLIRContext(), cir::GlobalLinkageKind::ExternalLinkage));
1816 mlir::SymbolTable::setSymbolVisibility(
1817 func, mlir::SymbolTable::Visibility::Private);
1818
1819 assert(!cir::MissingFeatures::opFuncExtraAttrs());
1820
1821 if (!cgf)
1822 theModule.push_back(func);
1823 }
1824 return func;
1825 }
1826
1827 mlir::SymbolTable::Visibility
getMLIRVisibilityFromCIRLinkage(cir::GlobalLinkageKind glk)1828 CIRGenModule::getMLIRVisibilityFromCIRLinkage(cir::GlobalLinkageKind glk) {
1829 switch (glk) {
1830 case cir::GlobalLinkageKind::InternalLinkage:
1831 case cir::GlobalLinkageKind::PrivateLinkage:
1832 return mlir::SymbolTable::Visibility::Private;
1833 case cir::GlobalLinkageKind::ExternalLinkage:
1834 case cir::GlobalLinkageKind::ExternalWeakLinkage:
1835 case cir::GlobalLinkageKind::LinkOnceODRLinkage:
1836 case cir::GlobalLinkageKind::AvailableExternallyLinkage:
1837 case cir::GlobalLinkageKind::CommonLinkage:
1838 case cir::GlobalLinkageKind::WeakAnyLinkage:
1839 case cir::GlobalLinkageKind::WeakODRLinkage:
1840 return mlir::SymbolTable::Visibility::Public;
1841 default: {
1842 llvm::errs() << "visibility not implemented for '"
1843 << stringifyGlobalLinkageKind(glk) << "'\n";
1844 assert(0 && "not implemented");
1845 }
1846 }
1847 llvm_unreachable("linkage should be handled above!");
1848 }
1849
getGlobalVisibilityKindFromClangVisibility(clang::VisibilityAttr::VisibilityType visibility)1850 cir::VisibilityKind CIRGenModule::getGlobalVisibilityKindFromClangVisibility(
1851 clang::VisibilityAttr::VisibilityType visibility) {
1852 switch (visibility) {
1853 case clang::VisibilityAttr::VisibilityType::Default:
1854 return cir::VisibilityKind::Default;
1855 case clang::VisibilityAttr::VisibilityType::Hidden:
1856 return cir::VisibilityKind::Hidden;
1857 case clang::VisibilityAttr::VisibilityType::Protected:
1858 return cir::VisibilityKind::Protected;
1859 }
1860 llvm_unreachable("unexpected visibility value");
1861 }
1862
1863 cir::VisibilityAttr
getGlobalVisibilityAttrFromDecl(const Decl * decl)1864 CIRGenModule::getGlobalVisibilityAttrFromDecl(const Decl *decl) {
1865 const clang::VisibilityAttr *va = decl->getAttr<clang::VisibilityAttr>();
1866 cir::VisibilityAttr cirVisibility =
1867 cir::VisibilityAttr::get(&getMLIRContext());
1868 if (va) {
1869 cirVisibility = cir::VisibilityAttr::get(
1870 &getMLIRContext(),
1871 getGlobalVisibilityKindFromClangVisibility(va->getVisibility()));
1872 }
1873 return cirVisibility;
1874 }
1875
release()1876 void CIRGenModule::release() {
1877 emitDeferred();
1878 applyReplacements();
1879
1880 // There's a lot of code that is not implemented yet.
1881 assert(!cir::MissingFeatures::cgmRelease());
1882 }
1883
emitAliasForGlobal(StringRef mangledName,mlir::Operation * op,GlobalDecl aliasGD,cir::FuncOp aliasee,cir::GlobalLinkageKind linkage)1884 void CIRGenModule::emitAliasForGlobal(StringRef mangledName,
1885 mlir::Operation *op, GlobalDecl aliasGD,
1886 cir::FuncOp aliasee,
1887 cir::GlobalLinkageKind linkage) {
1888
1889 auto *aliasFD = dyn_cast<FunctionDecl>(aliasGD.getDecl());
1890 assert(aliasFD && "expected FunctionDecl");
1891
1892 // The aliasee function type is different from the alias one, this difference
1893 // is specific to CIR because in LLVM the ptr types are already erased at this
1894 // point.
1895 const CIRGenFunctionInfo &fnInfo =
1896 getTypes().arrangeCXXStructorDeclaration(aliasGD);
1897 cir::FuncType fnType = getTypes().getFunctionType(fnInfo);
1898
1899 cir::FuncOp alias =
1900 createCIRFunction(getLoc(aliasGD.getDecl()->getSourceRange()),
1901 mangledName, fnType, aliasFD);
1902 alias.setAliasee(aliasee.getName());
1903 alias.setLinkage(linkage);
1904 // Declarations cannot have public MLIR visibility, just mark them private
1905 // but this really should have no meaning since CIR should not be using
1906 // this information to derive linkage information.
1907 mlir::SymbolTable::setSymbolVisibility(
1908 alias, mlir::SymbolTable::Visibility::Private);
1909
1910 // Alias constructors and destructors are always unnamed_addr.
1911 assert(!cir::MissingFeatures::opGlobalUnnamedAddr());
1912
1913 // Switch any previous uses to the alias.
1914 if (op) {
1915 errorNYI(aliasFD->getSourceRange(), "emitAliasForGlobal: previous uses");
1916 } else {
1917 // Name already set by createCIRFunction
1918 }
1919
1920 // Finally, set up the alias with its proper name and attributes.
1921 setCommonAttributes(aliasGD, alias);
1922 }
1923
convertType(QualType type)1924 mlir::Type CIRGenModule::convertType(QualType type) {
1925 return genTypes.convertType(type);
1926 }
1927
verifyModule() const1928 bool CIRGenModule::verifyModule() const {
1929 // Verify the module after we have finished constructing it, this will
1930 // check the structural properties of the IR and invoke any specific
1931 // verifiers we have on the CIR operations.
1932 return mlir::verify(theModule).succeeded();
1933 }
1934
1935 // TODO(cir): this can be shared with LLVM codegen.
computeNonVirtualBaseClassOffset(const CXXRecordDecl * derivedClass,llvm::iterator_range<CastExpr::path_const_iterator> path)1936 CharUnits CIRGenModule::computeNonVirtualBaseClassOffset(
1937 const CXXRecordDecl *derivedClass,
1938 llvm::iterator_range<CastExpr::path_const_iterator> path) {
1939 CharUnits offset = CharUnits::Zero();
1940
1941 const ASTContext &astContext = getASTContext();
1942 const CXXRecordDecl *rd = derivedClass;
1943
1944 for (const CXXBaseSpecifier *base : path) {
1945 assert(!base->isVirtual() && "Should not see virtual bases here!");
1946
1947 // Get the layout.
1948 const ASTRecordLayout &layout = astContext.getASTRecordLayout(rd);
1949
1950 const auto *baseDecl = cast<CXXRecordDecl>(
1951 base->getType()->castAs<clang::RecordType>()->getDecl());
1952
1953 // Add the offset.
1954 offset += layout.getBaseClassOffset(baseDecl);
1955
1956 rd = baseDecl;
1957 }
1958
1959 return offset;
1960 }
1961
errorNYI(SourceLocation loc,llvm::StringRef feature)1962 DiagnosticBuilder CIRGenModule::errorNYI(SourceLocation loc,
1963 llvm::StringRef feature) {
1964 unsigned diagID = diags.getCustomDiagID(
1965 DiagnosticsEngine::Error, "ClangIR code gen Not Yet Implemented: %0");
1966 return diags.Report(loc, diagID) << feature;
1967 }
1968
errorNYI(SourceRange loc,llvm::StringRef feature)1969 DiagnosticBuilder CIRGenModule::errorNYI(SourceRange loc,
1970 llvm::StringRef feature) {
1971 return errorNYI(loc.getBegin(), feature) << loc;
1972 }
1973