diff --git a/include/swift/AST/Builtins.def b/include/swift/AST/Builtins.def index b39972c7be964..b98c135217d99 100644 --- a/include/swift/AST/Builtins.def +++ b/include/swift/AST/Builtins.def @@ -209,10 +209,11 @@ BUILTIN_BINARY_PREDICATE(FCMP_ULE, "fcmp_ule", "n", FloatOrVector) BUILTIN_BINARY_PREDICATE(FCMP_UNE, "fcmp_une", "n", FloatOrVector) BUILTIN_BINARY_PREDICATE(FCMP_UNO, "fcmp_uno", "n", FloatOrVector) -// BUILTIN_SIL_OPERATION - Operations that can be lowered to SIL instructions. -// These have various types. -// Since these operations will be lowered to SIL Instructions, we do not -// assign any attributes on them. +// BUILTIN_SIL_OPERATION - Operations that can be lowered to SIL +// instructions. They can never be invoked via a BuiltinInst and are instead +// expected to just be used by SILGen to emit sequences of non-BuiltinInst +// instructions. These have various types. Since these operations will be +// lowered to SIL Instructions, we do not assign any attributes on them. #ifndef BUILTIN_SIL_OPERATION #define BUILTIN_SIL_OPERATION(Id, Name, Overload) BUILTIN(Id, Name, "") #endif @@ -558,8 +559,9 @@ BUILTIN_RUNTIME_CALL(ErrorInMain, "errorInMain", "") /// nominal type is Optional. BUILTIN_RUNTIME_CALL(IsOptionalType, "isOptional", "") -// BUILTIN_MISC_OPERATION - Miscellaneous operations without a unifying class. -// These have various types. +/// BUILTIN_MISC_OPERATION - Miscellaneous operations without a unifying class. +/// These have various types. They are emitted as BuiltinInst in a standard way +/// by SILGen. #ifndef BUILTIN_MISC_OPERATION #define BUILTIN_MISC_OPERATION(Id, Name, Attrs, Overload) \ BUILTIN(Id, Name, Attrs) diff --git a/lib/IRGen/GenBuiltin.cpp b/lib/IRGen/GenBuiltin.cpp index b2d38181012da..6a32864fd2715 100644 --- a/lib/IRGen/GenBuiltin.cpp +++ b/lib/IRGen/GenBuiltin.cpp @@ -129,6 +129,58 @@ getMaximallyAbstractedLoweredTypeAndTypeInfo(IRGenModule &IGM, Type unloweredTyp return {lowered, IGM.getTypeInfo(lowered)}; } +static bool emitLLVMIRIntrinsicCall(IRGenFunction &IGF, Identifier FnID, + Explosion &args, Explosion &out) { + const IntrinsicInfo &IInfo = IGF.getSILModule().getIntrinsicInfo(FnID); + llvm::Intrinsic::ID IID = IInfo.ID; + + if (IID == llvm::Intrinsic::not_intrinsic) + return false; + + // Emit non-mergeable traps only. + if (IGF.Builder.isTrapIntrinsic(IID)) { + IGF.Builder.CreateNonMergeableTrap(IGF.IGM, StringRef()); + return true; + } + + // Implement the ptrauth builtins as no-ops when the Clang + // intrinsics are disabled. + if ((IID == llvm::Intrinsic::ptrauth_sign || + IID == llvm::Intrinsic::ptrauth_auth || + IID == llvm::Intrinsic::ptrauth_resign || + IID == llvm::Intrinsic::ptrauth_strip) && + !IGF.IGM.getClangASTContext().getLangOpts().PointerAuthIntrinsics) { + out.add(args.claimNext()); // Return the input pointer. + (void)args.claimNext(); // Ignore the key. + if (IID != llvm::Intrinsic::ptrauth_strip) { + (void)args.claimNext(); // Ignore the discriminator. + } + if (IID == llvm::Intrinsic::ptrauth_resign) { + (void)args.claimNext(); // Ignore the new key. + (void)args.claimNext(); // Ignore the new discriminator. + } + return true; + } + + SmallVector ArgTys; + for (auto T : IInfo.Types) + ArgTys.push_back(IGF.IGM.getStorageTypeForLowered(T->getCanonicalType())); + + auto F = llvm::Intrinsic::getOrInsertDeclaration( + &IGF.IGM.Module, (llvm::Intrinsic::ID)IID, ArgTys); + llvm::FunctionType *FT = F->getFunctionType(); + SmallVector IRArgs; + for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) + IRArgs.push_back(args.claimNext()); + llvm::Value *TheCall = + IGF.Builder.CreateIntrinsicCall((llvm::Intrinsic::ID)IID, ArgTys, IRArgs); + + if (!TheCall->getType()->isVoidTy()) + extractScalarResults(IGF, TheCall->getType(), TheCall, out); + + return true; +} + /// emitBuiltinCall - Emit a call to a builtin function. void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, BuiltinInst *Inst, ArrayRef argTypes, @@ -136,24 +188,31 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, auto &IGM = IGF.IGM; Identifier FnId = Inst->getName(); + + // Before we do anything, lets see if we have an LLVM IR Intrinsic Call. If we + // did, we can return early. + if (emitLLVMIRIntrinsicCall(IGF, FnId, args, out)) + return; + SILType resultType = Inst->getType(); SubstitutionMap substitutions = Inst->getSubstitutions(); - if (Builtin.ID == BuiltinValueKind::COWBufferForReading) { + switch (Builtin.ID) { + case BuiltinValueKind::COWBufferForReading: { // Just forward the incoming argument. assert(args.size() == 1 && "Expecting one incoming argument"); out = std::move(args); return; } - if (Builtin.ID == BuiltinValueKind::OnFastPath) { + case BuiltinValueKind::OnFastPath: { // The onFastPath builtin has only an effect on SIL level, so we lower it // to a no-op. return; } // These builtins don't care about their argument: - if (Builtin.ID == BuiltinValueKind::Sizeof) { + case BuiltinValueKind::Sizeof: { (void)args.claimAll(); auto valueTy = getMaximallyAbstractedLoweredTypeAndTypeInfo(IGF.IGM, substitutions.getReplacementTypes()[0]); @@ -161,7 +220,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::Strideof) { + case BuiltinValueKind::Strideof: { (void)args.claimAll(); auto valueTy = getMaximallyAbstractedLoweredTypeAndTypeInfo(IGF.IGM, substitutions.getReplacementTypes()[0]); @@ -169,7 +228,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::Alignof) { + case BuiltinValueKind::Alignof: { (void)args.claimAll(); auto valueTy = getMaximallyAbstractedLoweredTypeAndTypeInfo(IGF.IGM, substitutions.getReplacementTypes()[0]); @@ -180,7 +239,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::IsPOD) { + case BuiltinValueKind::IsPOD: { (void)args.claimAll(); auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM, substitutions.getReplacementTypes()[0]); @@ -188,14 +247,14 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::IsConcrete) { + case BuiltinValueKind::IsConcrete: { (void)args.claimAll(); auto isConcrete = !substitutions.getReplacementTypes()[0]->hasArchetype(); out.add(llvm::ConstantInt::get(IGF.IGM.Int1Ty, isConcrete)); return; } - if (Builtin.ID == BuiltinValueKind::IsBitwiseTakable) { + case BuiltinValueKind::IsBitwiseTakable: { (void)args.claimAll(); auto valueTy = getLoweredTypeAndTypeInfo(IGF.IGM, substitutions.getReplacementTypes()[0]); @@ -203,17 +262,8 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - // addressof expects an lvalue argument. - if (Builtin.ID == BuiltinValueKind::AddressOf) { - llvm::Value *address = args.claimNext(); - llvm::Value *value = IGF.Builder.CreateBitCast(address, - IGF.IGM.Int8PtrTy); - out.add(value); - return; - } - // getCurrentAsyncTask has no arguments. - if (Builtin.ID == BuiltinValueKind::GetCurrentAsyncTask) { + case BuiltinValueKind::GetCurrentAsyncTask: { auto task = IGF.getAsyncTask(); if (!task->getType()->isPointerTy()) { out.add(IGF.Builder.CreateIntToPtr(task, IGF.IGM.RefCountedPtrTy)); @@ -224,12 +274,12 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, } // emitGetCurrentExecutor has no arguments. - if (Builtin.ID == BuiltinValueKind::GetCurrentExecutor) { + case BuiltinValueKind::GetCurrentExecutor: { emitGetCurrentExecutor(IGF, out); return; } - if (Builtin.ID == BuiltinValueKind::StartAsyncLet) { + case BuiltinValueKind::StartAsyncLet: { auto taskOptions = args.claimNext(); auto taskFunction = args.claimNext(); auto taskContext = args.claimNext(); @@ -249,7 +299,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::StartAsyncLetWithLocalBuffer) { + case BuiltinValueKind::StartAsyncLetWithLocalBuffer: { auto taskOptions = args.claimNext(); auto taskFunction = args.claimNext(); auto taskContext = args.claimNext(); @@ -270,7 +320,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::EndAsyncLet) { + case BuiltinValueKind::EndAsyncLet: { emitEndAsyncLet(IGF, args.claimNext()); // Ignore a second operand which is inserted by ClosureLifetimeFixup and // only used for dependency tracking. @@ -278,7 +328,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::EndAsyncLetLifetime) { + case BuiltinValueKind::EndAsyncLetLifetime: { IGF.Builder.CreateLifetimeEnd(args.claimNext()); // Ignore a second operand which is inserted by ClosureLifetimeFixup and // only used for dependency tracking. @@ -286,7 +336,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::TaskRunInline) { + case BuiltinValueKind::TaskRunInline: { auto result = args.claimNext(); auto closure = args.claimNext(); auto closureContext = args.claimNext(); @@ -295,33 +345,33 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::CreateTaskGroup) { + case BuiltinValueKind::CreateTaskGroup: { llvm::Value *groupFlags = nullptr; assert(args.size() == 0); out.add(emitCreateTaskGroup(IGF, substitutions, groupFlags)); return; } - if (Builtin.ID == BuiltinValueKind::CreateTaskGroupWithFlags) { + case BuiltinValueKind::CreateTaskGroupWithFlags: { auto groupFlags = args.claimNext(); assert(args.size() == 0); out.add(emitCreateTaskGroup(IGF, substitutions, groupFlags)); return; } - if (Builtin.ID == BuiltinValueKind::DestroyTaskGroup) { + case BuiltinValueKind::DestroyTaskGroup: { emitDestroyTaskGroup(IGF, args.claimNext()); return; } // Everything else cares about the (rvalue) argument. - if (Builtin.ID == BuiltinValueKind::CancelAsyncTask) { + case BuiltinValueKind::CancelAsyncTask: { emitTaskCancel(IGF, args.claimNext()); return; } - if (Builtin.ID == BuiltinValueKind::ConvertTaskToJob) { + case BuiltinValueKind::ConvertTaskToJob: { auto task = args.claimNext(); // The job object starts at the beginning of the task. auto job = IGF.Builder.CreateBitCast(task, IGF.IGM.SwiftJobPtrTy); @@ -329,9 +379,9 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::InitializeDefaultActor || - Builtin.ID == BuiltinValueKind::InitializeNonDefaultDistributedActor || - Builtin.ID == BuiltinValueKind::DestroyDefaultActor) { + case BuiltinValueKind::InitializeDefaultActor: + case BuiltinValueKind::InitializeNonDefaultDistributedActor: + case BuiltinValueKind::DestroyDefaultActor: { irgen::FunctionPointer fn; switch (Builtin.ID) { case BuiltinValueKind::InitializeDefaultActor: @@ -354,8 +404,8 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::ResumeThrowingContinuationReturning || - Builtin.ID == BuiltinValueKind::ResumeNonThrowingContinuationReturning) { + case BuiltinValueKind::ResumeThrowingContinuationReturning: + case BuiltinValueKind::ResumeNonThrowingContinuationReturning: { auto continuation = args.claimNext(); auto valueTy = argTypes[1]; auto valuePtr = args.claimNext(); @@ -366,25 +416,25 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::ResumeThrowingContinuationThrowing) { + case BuiltinValueKind::ResumeThrowingContinuationThrowing: { auto continuation = args.claimNext(); auto error = args.claimNext(); IGF.emitResumeAsyncContinuationThrowing(continuation, error); return; } - if (Builtin.ID == BuiltinValueKind::BuildMainActorExecutorRef) { + case BuiltinValueKind::BuildMainActorExecutorRef: { emitBuildMainActorExecutorRef(IGF, out); return; } - if (Builtin.ID == BuiltinValueKind::BuildDefaultActorExecutorRef) { + case BuiltinValueKind::BuildDefaultActorExecutorRef: { auto actor = args.claimNext(); emitBuildDefaultActorExecutorRef(IGF, actor, out); return; } - if (Builtin.ID == BuiltinValueKind::BuildOrdinaryTaskExecutorRef) { + case BuiltinValueKind::BuildOrdinaryTaskExecutorRef: { auto actor = args.claimNext(); auto type = substitutions.getReplacementTypes()[0]->getCanonicalType(); auto conf = substitutions.getConformances()[0]; @@ -392,14 +442,14 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::BuildOrdinarySerialExecutorRef) { + case BuiltinValueKind::BuildOrdinarySerialExecutorRef: { auto actor = args.claimNext(); auto type = substitutions.getReplacementTypes()[0]->getCanonicalType(); auto conf = substitutions.getConformances()[0]; emitBuildOrdinarySerialExecutorRef(IGF, actor, type, conf, out); return; } - if (Builtin.ID == BuiltinValueKind::BuildComplexEqualitySerialExecutorRef) { + case BuiltinValueKind::BuildComplexEqualitySerialExecutorRef: { auto actor = args.claimNext(); auto type = substitutions.getReplacementTypes()[0]->getCanonicalType(); auto conf = substitutions.getConformances()[0]; @@ -407,62 +457,13 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::InitializeDistributedRemoteActor) { + case BuiltinValueKind::InitializeDistributedRemoteActor: { auto actorMetatype = args.claimNext(); emitDistributedActorInitializeRemote(IGF, resultType, actorMetatype, out); return; } - // If this is an LLVM IR intrinsic, lower it to an intrinsic call. - const IntrinsicInfo &IInfo = IGF.getSILModule().getIntrinsicInfo(FnId); - llvm::Intrinsic::ID IID = IInfo.ID; - - // Emit non-mergeable traps only. - if (IGF.Builder.isTrapIntrinsic(IID)) { - IGF.Builder.CreateNonMergeableTrap(IGF.IGM, StringRef()); - return; - } - - // Implement the ptrauth builtins as no-ops when the Clang - // intrinsics are disabled. - if ((IID == llvm::Intrinsic::ptrauth_sign || - IID == llvm::Intrinsic::ptrauth_auth || - IID == llvm::Intrinsic::ptrauth_resign || - IID == llvm::Intrinsic::ptrauth_strip) && - !IGF.IGM.getClangASTContext().getLangOpts().PointerAuthIntrinsics) { - out.add(args.claimNext()); // Return the input pointer. - (void) args.claimNext(); // Ignore the key. - if (IID != llvm::Intrinsic::ptrauth_strip) { - (void) args.claimNext(); // Ignore the discriminator. - } - if (IID == llvm::Intrinsic::ptrauth_resign) { - (void) args.claimNext(); // Ignore the new key. - (void) args.claimNext(); // Ignore the new discriminator. - } - return; - } - - if (IID != llvm::Intrinsic::not_intrinsic) { - SmallVector ArgTys; - for (auto T : IInfo.Types) - ArgTys.push_back(IGF.IGM.getStorageTypeForLowered(T->getCanonicalType())); - - auto F = llvm::Intrinsic::getOrInsertDeclaration( - &IGF.IGM.Module, (llvm::Intrinsic::ID)IID, ArgTys); - llvm::FunctionType *FT = F->getFunctionType(); - SmallVector IRArgs; - for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) - IRArgs.push_back(args.claimNext()); - llvm::Value *TheCall = IGF.Builder.CreateIntrinsicCall( - (llvm::Intrinsic::ID)IID, ArgTys, IRArgs); - - if (!TheCall->getType()->isVoidTy()) - extractScalarResults(IGF, TheCall->getType(), TheCall, out); - - return; - } - - if (Builtin.ID == BuiltinValueKind::StringObjectOr) { + case BuiltinValueKind::StringObjectOr: { llvm::Value *lhs = args.claimNext(); llvm::Value *rhs = args.claimNext(); llvm::Value *v = IGF.Builder.CreateOr(lhs, rhs); @@ -471,28 +472,28 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, // TODO: A linear series of ifs is suboptimal. #define BUILTIN_SIL_OPERATION(id, name, overload) \ - if (Builtin.ID == BuiltinValueKind::id) \ + case BuiltinValueKind::id: \ llvm_unreachable(name " builtin should be lowered away by SILGen!"); #define BUILTIN_CAST_OPERATION(id, name, attrs) \ - if (Builtin.ID == BuiltinValueKind::id) \ + case BuiltinValueKind::id: \ return emitCastBuiltin(IGF, resultType, out, args, \ llvm::Instruction::id); #define BUILTIN_CAST_OR_BITCAST_OPERATION(id, name, attrs) \ - if (Builtin.ID == BuiltinValueKind::id) \ + case BuiltinValueKind::id: \ return emitCastOrBitCastBuiltin(IGF, resultType, out, args, \ BuiltinValueKind::id); #define BUILTIN_BINARY_OPERATION_OVERLOADED_STATIC(id, name, attrs, overload) \ - if (Builtin.ID == BuiltinValueKind::id) { \ + case BuiltinValueKind::id: { \ llvm::Value *lhs = args.claimNext(); \ llvm::Value *rhs = args.claimNext(); \ llvm::Value *v = IGF.Builder.Create##id(lhs, rhs); \ return out.add(v); \ } #define BUILTIN_BINARY_OPERATION_POLYMORPHIC(id, name) \ - if (Builtin.ID == BuiltinValueKind::id) { \ + case BuiltinValueKind::id: { \ /* This builtin must be guarded so that dynamically it is never called. */ \ IGF.emitTrap("invalid use of polymorphic builtin", /*Unreachable*/ false); \ auto returnValue = llvm::UndefValue::get(IGF.IGM.Int8PtrTy); \ @@ -502,7 +503,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, } #define BUILTIN_RUNTIME_CALL(id, name, attrs) \ - if (Builtin.ID == BuiltinValueKind::id) { \ + case BuiltinValueKind::id: { \ auto fn = IGF.IGM.get##id##FunctionPointer(); \ llvm::CallInst *call = IGF.Builder.CreateCall(fn, args.claimNext()); \ return out.add(call); \ @@ -510,7 +511,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, #define BUILTIN_BINARY_OPERATION_WITH_OVERFLOW(id, name, uncheckedID, attrs, \ overload) \ - if (Builtin.ID == BuiltinValueKind::id) { \ + case BuiltinValueKind::id: { \ SmallVector ArgTys; \ auto opType = Builtin.Types[0]->getCanonicalType(); \ ArgTys.push_back(IGF.IGM.getStorageTypeForLowered(opType)); \ @@ -530,17 +531,17 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, // third argument is true. Now, we just ignore it. #define BUILTIN_BINARY_PREDICATE(id, name, attrs, overload) \ - if (Builtin.ID == BuiltinValueKind::id) \ + case BuiltinValueKind::id: \ return emitCompareBuiltin(IGF, out, args, llvm::CmpInst::id); #define BUILTIN_TYPE_TRAIT_OPERATION(id, name) \ - if (Builtin.ID == BuiltinValueKind::id) \ + case BuiltinValueKind::id: \ return emitTypeTraitBuiltin(IGF, out, args, substitutions, &TypeBase::name); #define BUILTIN(ID, Name, Attrs) // Ignore the rest. #include "swift/AST/Builtins.def" - if (Builtin.ID == BuiltinValueKind::GlobalStringTablePointer) { + case BuiltinValueKind::GlobalStringTablePointer: { // This builtin should be used only on strings constructed from a // string literal. If we ever get to the point of executing this builtin // at run time, it implies an incorrect use of the builtin and must result @@ -553,7 +554,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return out.add(returnValue); } - if (Builtin.ID == BuiltinValueKind::WillThrow) { + case BuiltinValueKind::WillThrow: { // willThrow is emitted like a Swift function call with the error in // the error return register. We also have to pass a fake context // argument due to how swiftcc works in clang. @@ -589,18 +590,18 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return out.add(call); } - if (Builtin.ID == BuiltinValueKind::FNeg) { + case BuiltinValueKind::FNeg: { llvm::Value *v = IGF.Builder.CreateFNeg(args.claimNext()); return out.add(v); } - if (Builtin.ID == BuiltinValueKind::AssumeTrue) { + case BuiltinValueKind::AssumeTrue: { llvm::Value *v = args.claimNext(); if (v->getType() == IGF.IGM.Int1Ty) { IGF.Builder.CreateIntrinsicCall(llvm::Intrinsic::assume, v); } return; } - if (Builtin.ID == BuiltinValueKind::AssumeNonNegative) { + case BuiltinValueKind::AssumeNonNegative: { llvm::Value *v = args.claimNext(); // If the argument is a `load` or `call` we can use a range metadata to // specify the >= 0 constraint. @@ -636,11 +637,11 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, // Don't generate any code for the builtin. return out.add(v); } - if (Builtin.ID == BuiltinValueKind::Freeze) { + case BuiltinValueKind::Freeze: { return out.add(IGF.Builder.CreateFreeze(args.claimNext())); } - if (Builtin.ID == BuiltinValueKind::AllocRaw) { + case BuiltinValueKind::AllocRaw: { auto size = args.claimNext(); auto align = args.claimNext(); // Translate the alignment to a mask. @@ -650,7 +651,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::DeallocRaw) { + case BuiltinValueKind::DeallocRaw: { auto pointer = args.claimNext(); auto size = args.claimNext(); auto align = args.claimNext(); @@ -660,7 +661,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::Fence) { + case BuiltinValueKind::Fence: { SmallVector Types; StringRef BuiltinName = getBuiltinBaseName(IGF.IGM.Context, FnId.str(), Types); @@ -683,7 +684,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::Ifdef) { + case BuiltinValueKind::Ifdef: { // Ifdef not constant folded, which means it was not @_alwaysEmitIntoClient IGF.IGM.error( Inst->getLoc().getSourceLoc(), @@ -692,7 +693,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::CmpXChg) { + case BuiltinValueKind::CmpXChg: { SmallVector Types; StringRef BuiltinName = getBuiltinBaseName(IGF.IGM.Context, FnId.str(), Types); @@ -755,7 +756,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::AtomicRMW) { + case BuiltinValueKind::AtomicRMW: { using namespace llvm; SmallVector Types; @@ -816,8 +817,8 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::AtomicLoad - || Builtin.ID == BuiltinValueKind::AtomicStore) { + case BuiltinValueKind::AtomicLoad: + case BuiltinValueKind::AtomicStore: { using namespace llvm; SmallVector Types; @@ -882,7 +883,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, } } - if (Builtin.ID == BuiltinValueKind::ExtractElement) { + case BuiltinValueKind::ExtractElement: { using namespace llvm; auto vector = args.claimNext(); @@ -891,7 +892,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::InsertElement) { + case BuiltinValueKind::InsertElement: { using namespace llvm; auto vector = args.claimNext(); @@ -901,7 +902,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::Select) { + case BuiltinValueKind::Select: { using namespace llvm; auto pred = args.claimNext(); @@ -911,7 +912,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::ShuffleVector) { + case BuiltinValueKind::ShuffleVector: { using namespace llvm; auto dict0 = args.claimNext(); @@ -921,7 +922,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::Interleave) { + case BuiltinValueKind::Interleave: { using namespace llvm; auto src0 = args.claimNext(); @@ -943,7 +944,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, } - if (Builtin.ID == BuiltinValueKind::Deinterleave) { + case BuiltinValueKind::Deinterleave: { using namespace llvm; auto src0 = args.claimNext(); @@ -962,9 +963,9 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::SToSCheckedTrunc || - Builtin.ID == BuiltinValueKind::UToUCheckedTrunc || - Builtin.ID == BuiltinValueKind::SToUCheckedTrunc) { + case BuiltinValueKind::SToSCheckedTrunc: + case BuiltinValueKind::UToUCheckedTrunc: + case BuiltinValueKind::SToUCheckedTrunc: { bool Signed = (Builtin.ID == BuiltinValueKind::SToSCheckedTrunc); auto FromType = Builtin.Types[0]->getCanonicalType(); @@ -1005,7 +1006,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return out.add(OverflowFlag); } - if (Builtin.ID == BuiltinValueKind::UToSCheckedTrunc) { + case BuiltinValueKind::UToSCheckedTrunc: { auto FromTy = IGF.IGM.getStorageTypeForLowered(Builtin.Types[0]->getCanonicalType()); auto ToTy = @@ -1034,7 +1035,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, // We are currently emitting code for '_convertFromBuiltinIntegerLiteral', // which will call the builtin and pass it a non-compile-time-const parameter. - if (Builtin.ID == BuiltinValueKind::IntToFPWithOverflow) { + case BuiltinValueKind::IntToFPWithOverflow: { assert(Builtin.Types[0]->is()); auto toType = IGF.IGM.getStorageTypeForLowered(Builtin.Types[1]->getCanonicalType()); @@ -1043,26 +1044,26 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::BitWidth) { + case BuiltinValueKind::BitWidth: { assert(Builtin.Types[0]->is()); out.add(emitIntLiteralBitWidth(IGF, args)); return; } - if (Builtin.ID == BuiltinValueKind::IsNegative) { + case BuiltinValueKind::IsNegative: { assert(Builtin.Types[0]->is()); out.add(emitIntLiteralIsNegative(IGF, args)); return; } - if (Builtin.ID == BuiltinValueKind::WordAtIndex) { + case BuiltinValueKind::WordAtIndex: { assert(Builtin.Types[0]->is()); out.add(emitIntLiteralWordAtIndex(IGF, args)); return; } - if (Builtin.ID == BuiltinValueKind::Once - || Builtin.ID == BuiltinValueKind::OnceWithContext) { + case BuiltinValueKind::Once: + case BuiltinValueKind::OnceWithContext: { // The input type is statically (Builtin.RawPointer, @convention(thin) () -> ()). llvm::Value *PredPtr = args.claimNext(); // Cast the predicate to a pointer. @@ -1123,7 +1124,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::AssertConf) { + case BuiltinValueKind::AssertConf: { // Replace the call to assert_configuration by the Debug configuration // value. // TODO: assert(IGF.IGM.getOptions().AssertConfig == @@ -1135,7 +1136,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::DestroyArray) { + case BuiltinValueKind::DestroyArray: { // The input type is (T.Type, Builtin.RawPointer, Builtin.Word). /* metatype (which may be thin) */ if (args.size() == 3) @@ -1208,14 +1209,14 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::CopyArray || - Builtin.ID == BuiltinValueKind::TakeArrayNoAlias || - Builtin.ID == BuiltinValueKind::TakeArrayFrontToBack || - Builtin.ID == BuiltinValueKind::TakeArrayBackToFront || - Builtin.ID == BuiltinValueKind::AssignCopyArrayNoAlias || - Builtin.ID == BuiltinValueKind::AssignCopyArrayFrontToBack || - Builtin.ID == BuiltinValueKind::AssignCopyArrayBackToFront || - Builtin.ID == BuiltinValueKind::AssignTakeArray) { + case BuiltinValueKind::CopyArray: + case BuiltinValueKind::TakeArrayNoAlias: + case BuiltinValueKind::TakeArrayFrontToBack: + case BuiltinValueKind::TakeArrayBackToFront: + case BuiltinValueKind::AssignCopyArrayNoAlias: + case BuiltinValueKind::AssignCopyArrayFrontToBack: + case BuiltinValueKind::AssignCopyArrayBackToFront: + case BuiltinValueKind::AssignTakeArray: { // The input type is (T.Type, Builtin.RawPointer, Builtin.RawPointer, Builtin.Word). /* metatype (which may be thin) */ if (args.size() == 4) @@ -1362,13 +1363,13 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::CondUnreachable) { + case BuiltinValueKind::CondUnreachable: { // conditionallyUnreachable is a no-op by itself. Since it's noreturn, there // should be a true unreachable terminator right after. return; } - if (Builtin.ID == BuiltinValueKind::ZeroInitializer) { + case BuiltinValueKind::ZeroInitializer: { if (args.size() > 0) { auto valueType = argTypes[0]; auto &valueTI = IGF.IGM.getTypeInfo(valueType); @@ -1388,13 +1389,13 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::PrepareInitialization) { + case BuiltinValueKind::PrepareInitialization: { ASSERT(args.size() > 0 && "only address-variant of prepareInitialization is supported"); (void)args.claimNext(); return; } - if (Builtin.ID == BuiltinValueKind::GetObjCTypeEncoding) { + case BuiltinValueKind::GetObjCTypeEncoding: { (void)args.claimAll(); Type valueTy = substitutions.getReplacementTypes()[0]; // Get the type encoding for the associated clang type. @@ -1408,7 +1409,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::TSanInoutAccess) { + case BuiltinValueKind::TSanInoutAccess: { auto address = args.claimNext(); // The tsanInoutAccess builtin takes a single argument, the address @@ -1425,7 +1426,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::TargetOSVersionAtLeast) { + case BuiltinValueKind::TargetOSVersionAtLeast: { auto major = args.claimNext(); auto minor = args.claimNext(); auto patch = args.claimNext(); @@ -1434,7 +1435,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::TargetVariantOSVersionAtLeast) { + case BuiltinValueKind::TargetVariantOSVersionAtLeast: { auto major = args.claimNext(); auto minor = args.claimNext(); auto patch = args.claimNext(); @@ -1444,8 +1445,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == - BuiltinValueKind::TargetOSVersionOrVariantOSVersionAtLeast) { + case BuiltinValueKind::TargetOSVersionOrVariantOSVersionAtLeast: { auto major1 = args.claimNext(); auto minor1 = args.claimNext(); auto patch1 = args.claimNext(); @@ -1461,7 +1461,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::TypePtrAuthDiscriminator) { + case BuiltinValueKind::TypePtrAuthDiscriminator: { (void)args.claimAll(); Type valueTy = substitutions.getReplacementTypes()[0]; @@ -1472,7 +1472,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::IsSameMetatype) { + case BuiltinValueKind::IsSameMetatype: { auto metatypeLHS = args.claimNext(); auto metatypeRHS = args.claimNext(); (void)args.claimAll(); @@ -1485,7 +1485,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::AutoDiffCreateLinearMapContextWithType) { + case BuiltinValueKind::AutoDiffCreateLinearMapContextWithType: { auto topLevelSubcontextMetaType = args.claimNext(); out.add(emitAutoDiffCreateLinearMapContextWithType( IGF, topLevelSubcontextMetaType) @@ -1493,7 +1493,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::AutoDiffProjectTopLevelSubcontext) { + case BuiltinValueKind::AutoDiffProjectTopLevelSubcontext: { Address allocatorAddr(args.claimNext(), IGF.IGM.RefCountedStructTy, IGF.IGM.getPointerAlignment()); out.add( @@ -1501,7 +1501,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::AutoDiffAllocateSubcontextWithType) { + case BuiltinValueKind::AutoDiffAllocateSubcontextWithType: { Address allocatorAddr(args.claimNext(), IGF.IGM.RefCountedStructTy, IGF.IGM.getPointerAlignment()); auto subcontextMetatype = args.claimNext(); @@ -1511,7 +1511,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::AssumeAlignment) { + case BuiltinValueKind::AssumeAlignment: { // A no-op pointer cast that passes on its first value. Common occurrences of // this builtin should already be removed with the alignment guarantee moved // to the subsequent load or store. @@ -1524,7 +1524,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::AllocVector) { + case BuiltinValueKind::AllocVector: { // Obsolete: only there to be able to read old Swift.interface files which still // contain the builtin. (void)args.claimAll(); @@ -1535,7 +1535,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::GetEnumTag) { + case BuiltinValueKind::GetEnumTag: { auto arg = args.claimNext(); auto ty = argTypes[0]; auto &ti = IGF.getTypeInfo(ty); @@ -1557,7 +1557,7 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, return; } - if (Builtin.ID == BuiltinValueKind::InjectEnumTag) { + case BuiltinValueKind::InjectEnumTag: { auto input = args.claimNext(); auto tag = args.claimNext(); auto inputTy = argTypes[0]; @@ -1579,12 +1579,33 @@ void irgen::emitBuiltinCall(IRGenFunction &IGF, const BuiltinInfo &Builtin, // LLVM must not see the address generated here as 'invariant' or immutable // ever. A raw layout's address defies all formal access, so immutable looking // uses may actually mutate the underlying value! - if (Builtin.ID == BuiltinValueKind::AddressOfRawLayout) { + case BuiltinValueKind::AddressOfRawLayout: { auto addr = args.claimNext(); auto value = IGF.Builder.CreateBitCast(addr, IGF.IGM.Int8PtrTy); out.add(value); return; } - llvm_unreachable("IRGen unimplemented for this builtin!"); + // Builtins without IRGen implementations. + case BuiltinValueKind::None: + case BuiltinValueKind::CondFailMessage: + case BuiltinValueKind::StackAlloc: + case BuiltinValueKind::UnprotectedStackAlloc: + case BuiltinValueKind::StackDealloc: + case BuiltinValueKind::StaticReport: + case BuiltinValueKind::Unreachable: + case BuiltinValueKind::PoundAssert: + case BuiltinValueKind::FlowSensitiveSelfIsolation: + case BuiltinValueKind::FlowSensitiveDistributedSelfIsolation: + case BuiltinValueKind::AddressOfBorrowOpaque: + case BuiltinValueKind::UnprotectedAddressOfBorrowOpaque: + case BuiltinValueKind::CreateAsyncTask: + case BuiltinValueKind::ExtractFunctionIsolation: + case BuiltinValueKind::DistributedActorAsAnyActor: + case BuiltinValueKind::TypeJoin: + case BuiltinValueKind::TypeJoinInout: + case BuiltinValueKind::TypeJoinMeta: + case BuiltinValueKind::TriggerFallbackDiagnostic: + llvm_unreachable("IRGen unimplemented for this builtin!"); + } } diff --git a/lib/SIL/Verifier/SILVerifier.cpp b/lib/SIL/Verifier/SILVerifier.cpp index a76a817ef5d5d..b8bc65a3d7316 100644 --- a/lib/SIL/Verifier/SILVerifier.cpp +++ b/lib/SIL/Verifier/SILVerifier.cpp @@ -2539,8 +2539,25 @@ class SILVerifier : public SILVerifierBase { if (builtinKind == BuiltinValueKind::ExtractFunctionIsolation) { require(false, "this builtin is pre-SIL-only"); } + + // Validate all "SIL builtins" never show up as BuiltinInst since they + // should just result in sequences of non-builtin inst SIL instructions + // being emitted and should never show up as a BuiltinInst. + switch (*builtinKind) { + case BuiltinValueKind::None: + break; +#define BUILTIN_SIL_OPERATION(id, name, overload) \ + case BuiltinValueKind::id: \ + require(false, \ + "this builtin should never show up as builtin inst. It should " \ + "only result in other SIL instructions being emitted by SILGen"); +#define BUILTIN(ID, Name, Attrs) \ + case BuiltinValueKind::ID: \ + break; +#include "swift/AST/Builtins.def" + } } - + void checkFunctionRefBaseInst(FunctionRefBaseInst *FRI) { auto fnType = requireObjectType(SILFunctionType, FRI, "result of function_ref");