From 8a7e955b0d69e8edf921fbe8a6596da619f2515d Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 18 Jan 2022 09:09:09 -0800 Subject: [PATCH 01/88] Move -enable-sil-opaque-value to SILOptions. --- include/swift/AST/SILOptions.h | 3 +++ include/swift/Basic/LangOptions.h | 5 ----- include/swift/Option/FrontendOptions.td | 6 +++--- lib/Frontend/CompilerInvocation.cpp | 2 +- lib/SIL/IR/SILType.cpp | 2 +- lib/SIL/IR/TypeLowering.cpp | 2 +- lib/SILOptimizer/Mandatory/AddressLowering.cpp | 2 +- tools/sil-opt/SILOpt.cpp | 3 +-- 8 files changed, 11 insertions(+), 14 deletions(-) diff --git a/include/swift/AST/SILOptions.h b/include/swift/AST/SILOptions.h index c9f4fade472a6..31d02389e86ee 100644 --- a/include/swift/AST/SILOptions.h +++ b/include/swift/AST/SILOptions.h @@ -140,6 +140,9 @@ class SILOptions { /// If this is disabled we do not serialize in OSSA form when optimizing. bool EnableOSSAModules = false; + /// If set to true, compile with the SIL Opaque Values enabled. + bool EnableSILOpaqueValues = false; + // The kind of function bodies to skip emitting. FunctionBodySkipping SkipFunctionBodies = FunctionBodySkipping::None; diff --git a/include/swift/Basic/LangOptions.h b/include/swift/Basic/LangOptions.h index 7f094fc7cb11f..4f5902c7be634 100644 --- a/include/swift/Basic/LangOptions.h +++ b/include/swift/Basic/LangOptions.h @@ -376,11 +376,6 @@ namespace swift { /// [TODO: Clang-type-plumbing] Turn on for feature rollout. bool UseClangFunctionTypes = false; - /// If set to true, compile with the SIL Opaque Values enabled. - /// This is for bootstrapping. It can't be in SILOptions because the - /// TypeChecker uses it to set resolve the ParameterConvention. - bool EnableSILOpaqueValues = false; - /// If set to true, the diagnosis engine can assume the emitted diagnostics /// will be used in editor. This usually leads to more aggressive fixit. bool DiagnosticsEditorMode = false; diff --git a/include/swift/Option/FrontendOptions.td b/include/swift/Option/FrontendOptions.td index 86d99764ccc18..515c7037d1798 100644 --- a/include/swift/Option/FrontendOptions.td +++ b/include/swift/Option/FrontendOptions.td @@ -504,9 +504,6 @@ def disable_sil_ownership_verifier : Flag<["-"], "disable-sil-ownership-verifier def suppress_static_exclusivity_swap : Flag<["-"], "suppress-static-exclusivity-swap">, HelpText<"Suppress static violations of exclusive access with swap()">; -def enable_sil_opaque_values : Flag<["-"], "enable-sil-opaque-values">, - HelpText<"Enable SIL Opaque Values">; - def enable_experimental_static_assert : Flag<["-"], "enable-experimental-static-assert">, HelpText<"Enable experimental #assert">; @@ -1015,6 +1012,9 @@ def enable_ossa_modules : Flag<["-"], "enable-ossa-modules">, HelpText<"Always serialize SIL in ossa form. If this flag is not passed in, " "when optimizing ownership will be lowered before serializing SIL">; +def enable_sil_opaque_values : Flag<["-"], "enable-sil-opaque-values">, + HelpText<"Enable SIL Opaque Values">; + def new_driver_path : Separate<["-"], "new-driver-path">, MetaVarName<"">, HelpText<"Path of the new driver to be used">; diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp index bf58496dc8a44..b2759eaf5f73b 100644 --- a/lib/Frontend/CompilerInvocation.cpp +++ b/lib/Frontend/CompilerInvocation.cpp @@ -768,7 +768,6 @@ static bool ParseLangArgs(LangOptions &Opts, ArgList &Args, Opts.EnableObjCInterop = Args.hasFlag(OPT_enable_objc_interop, OPT_disable_objc_interop, Target.isOSDarwin()); - Opts.EnableSILOpaqueValues |= Args.hasArg(OPT_enable_sil_opaque_values); Opts.VerifyAllSubstitutionMaps |= Args.hasArg(OPT_verify_all_substitution_maps); @@ -1679,6 +1678,7 @@ static bool ParseSILArgs(SILOptions &Opts, ArgList &Args, Opts.EnableARCOptimizations &= !Args.hasArg(OPT_disable_arc_opts); Opts.EnableOSSAModules |= Args.hasArg(OPT_enable_ossa_modules); Opts.EnableOSSAOptimizations &= !Args.hasArg(OPT_disable_ossa_opts); + Opts.EnableSILOpaqueValues |= Args.hasArg(OPT_enable_sil_opaque_values); Opts.EnableSpeculativeDevirtualization |= Args.hasArg(OPT_enable_spec_devirt); Opts.EnableActorDataRaceChecks |= Args.hasFlag( OPT_enable_actor_data_race_checks, diff --git a/lib/SIL/IR/SILType.cpp b/lib/SIL/IR/SILType.cpp index da4bb30b81984..51e574c6ac198 100644 --- a/lib/SIL/IR/SILType.cpp +++ b/lib/SIL/IR/SILType.cpp @@ -507,7 +507,7 @@ SILResultInfo::getOwnershipKind(SILFunction &F, SILModuleConventions::SILModuleConventions(SILModule &M) : M(&M), - loweredAddresses(!M.getASTContext().LangOpts.EnableSILOpaqueValues + loweredAddresses(!M.getOptions().EnableSILOpaqueValues || M.getStage() == SILStage::Lowered) {} diff --git a/lib/SIL/IR/TypeLowering.cpp b/lib/SIL/IR/TypeLowering.cpp index fc6be72968979..65bf3fde1b804 100644 --- a/lib/SIL/IR/TypeLowering.cpp +++ b/lib/SIL/IR/TypeLowering.cpp @@ -1683,7 +1683,7 @@ namespace { TypeLowering *handleAddressOnly(CanType type, RecursiveProperties properties) { - if (!TC.Context.LangOpts.EnableSILOpaqueValues) { + if (!TC.Context.SILOpts.EnableSILOpaqueValues) { auto silType = SILType::getPrimitiveAddressType(type); return new (TC) AddressOnlyTypeLowering(silType, properties, Expansion); diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index bd428535e7371..1e1a3cd7539d8 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -1527,7 +1527,7 @@ void AddressLowering::runOnFunction(SILFunction *F) { /// The entry point to this function transformation. void AddressLowering::run() { - if (getModule()->getASTContext().LangOpts.EnableSILOpaqueValues) { + if (getModule()->getOptions().EnableSILOpaqueValues) { for (auto &F : *getModule()) runOnFunction(&F); } diff --git a/tools/sil-opt/SILOpt.cpp b/tools/sil-opt/SILOpt.cpp index 468d9458d76ad..d892d353d02e4 100644 --- a/tools/sil-opt/SILOpt.cpp +++ b/tools/sil-opt/SILOpt.cpp @@ -571,8 +571,6 @@ int main(int argc, char **argv) { EnableObjCInterop ? true : DisableObjCInterop ? false : llvm::Triple(Target).isOSDarwin(); - Invocation.getLangOptions().EnableSILOpaqueValues = EnableSILOpaqueValues; - Invocation.getLangOptions().OptimizationRemarkPassedPattern = createOptRemarkRegex(PassRemarksPassed); Invocation.getLangOptions().OptimizationRemarkMissedPattern = @@ -634,6 +632,7 @@ int main(int argc, char **argv) { SILOpts.EnableSpeculativeDevirtualization = EnableSpeculativeDevirtualization; SILOpts.IgnoreAlwaysInline = IgnoreAlwaysInline; SILOpts.EnableOSSAModules = EnableOSSAModules; + SILOpts.EnableSILOpaqueValues = EnableSILOpaqueValues; if (CopyPropagationState) { SILOpts.CopyPropagation = *CopyPropagationState; From 8457ba30ed6ec25270cdc25700e3a17a898d7527 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 18 Jan 2022 09:12:23 -0800 Subject: [PATCH 02/88] Add emitLoad/emitStore to OpaqueValue type lowering. --- lib/SIL/IR/TypeLowering.cpp | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/lib/SIL/IR/TypeLowering.cpp b/lib/SIL/IR/TypeLowering.cpp index 65bf3fde1b804..52b0faf8f72d0 100644 --- a/lib/SIL/IR/TypeLowering.cpp +++ b/lib/SIL/IR/TypeLowering.cpp @@ -1605,10 +1605,6 @@ namespace { }; /// Lower address only types as opaque values. - /// - /// Opaque values behave like loadable leaf types in SIL. - /// - /// FIXME: When you remove an unreachable, just delete the method. class OpaqueValueTypeLowering : public LeafLoadableTypeLowering { public: OpaqueValueTypeLowering(SILType type, RecursiveProperties properties, @@ -1622,6 +1618,20 @@ namespace { llvm_unreachable("copy into"); } + // OpaqueValue store cannot be decoupled from a destroy because it is not + // bitwise-movable. + void emitStore(SILBuilder &B, SILLocation loc, SILValue value, + SILValue addr, StoreOwnershipQualifier qual) const override { + B.createStore(loc, value, addr, qual); + } + + // OpaqueValue load cannot be decoupled from a copy because it is not + // bitwise-movable. + SILValue emitLoad(SILBuilder &B, SILLocation loc, SILValue addr, + LoadOwnershipQualifier qual) const override { + return B.createLoad(loc, addr, qual); + } + // --- Same as LeafLoadableTypeLowering. SILValue emitLoweredCopyValue(SILBuilder &B, SILLocation loc, From 2907c61203494ddfeab049987b4bc41fd00178c7 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Mon, 14 Feb 2022 23:23:42 -0800 Subject: [PATCH 03/88] SILModule::hasLoweredAddress --- include/swift/SIL/SILModule.h | 15 ++++++++++++--- lib/SIL/IR/SILModule.cpp | 4 ++-- lib/SIL/IR/SILType.cpp | 5 +---- lib/SIL/Parser/ParseSIL.cpp | 3 +++ lib/SILGen/SILGenFunction.cpp | 13 ++++++------- lib/SILGen/SILGenFunction.h | 2 ++ lib/SILOptimizer/Mandatory/AddressLowering.cpp | 12 +++++++----- lib/SILOptimizer/PassManager/PassPipeline.cpp | 8 +++++--- lib/SILOptimizer/PassManager/Passes.cpp | 2 +- 9 files changed, 39 insertions(+), 25 deletions(-) diff --git a/include/swift/SIL/SILModule.h b/include/swift/SIL/SILModule.h index c522cb946b86e..cbc7066fd359e 100644 --- a/include/swift/SIL/SILModule.h +++ b/include/swift/SIL/SILModule.h @@ -307,6 +307,12 @@ class SILModule { /// The stage of processing this module is at. SILStage Stage; + /// True if SIL conventions force address-only to be passed by address. + /// + /// Used for bootstrapping the AddressLowering pass. This should eventually + /// be inferred from the SIL stage to be true only when Stage == Lowered. + bool loweredAddresses; + /// The set of deserialization notification handlers. DeserializationNotificationHandlerSet deserializationNotificationHandlers; @@ -796,6 +802,11 @@ class SILModule { Stage = s; } + /// True if SIL conventions force address-only to be passed by address. + bool useLoweredAddresses() const { return loweredAddresses; } + + void setLoweredAddresses(bool val) { loweredAddresses = val; } + llvm::IndexedInstrProfReader *getPGOReader() const { return PGOReader.get(); } void setPGOReader(std::unique_ptr IPR) { @@ -962,15 +973,13 @@ inline bool SILOptions::supportsLexicalLifetimes(const SILModule &mod) const { // entirely. return LexicalLifetimes != LexicalLifetimesOption::Off; case SILStage::Canonical: + case SILStage::Lowered: // In Canonical SIL, lexical markers are used to ensure that object // lifetimes do not get observably shortened from the end of a lexical // scope. That behavior only occurs when lexical lifetimes is (fully) // enabled. (When only diagnostic markers are enabled, the markers are // stripped as part of lowering from raw to canonical SIL.) return LexicalLifetimes == LexicalLifetimesOption::On; - case SILStage::Lowered: - // We do not support OSSA in Lowered SIL, so this is always false. - return false; } } diff --git a/lib/SIL/IR/SILModule.cpp b/lib/SIL/IR/SILModule.cpp index 6f63f8c85ec08..8bd442a03ddd8 100644 --- a/lib/SIL/IR/SILModule.cpp +++ b/lib/SIL/IR/SILModule.cpp @@ -91,8 +91,8 @@ class SILModule::SerializationCallback final SILModule::SILModule(llvm::PointerUnion context, Lowering::TypeConverter &TC, const SILOptions &Options) - : Stage(SILStage::Raw), indexTrieRoot(new IndexTrieNode()), - Options(Options), serialized(false), + : Stage(SILStage::Raw), loweredAddresses(!Options.EnableSILOpaqueValues), + indexTrieRoot(new IndexTrieNode()), Options(Options), serialized(false), regDeserializationNotificationHandlerForNonTransparentFuncOME(false), regDeserializationNotificationHandlerForAllFuncOME(false), prespecializedFunctionDeclsImported(false), SerializeSILAction(), diff --git a/lib/SIL/IR/SILType.cpp b/lib/SIL/IR/SILType.cpp index 51e574c6ac198..9914d564b4d79 100644 --- a/lib/SIL/IR/SILType.cpp +++ b/lib/SIL/IR/SILType.cpp @@ -506,10 +506,7 @@ SILResultInfo::getOwnershipKind(SILFunction &F, } SILModuleConventions::SILModuleConventions(SILModule &M) - : M(&M), - loweredAddresses(!M.getOptions().EnableSILOpaqueValues - || M.getStage() == SILStage::Lowered) -{} + : M(&M), loweredAddresses(M.useLoweredAddresses()) {} bool SILModuleConventions::isReturnedIndirectlyInSIL(SILType type, SILModule &M) { diff --git a/lib/SIL/Parser/ParseSIL.cpp b/lib/SIL/Parser/ParseSIL.cpp index fd0837fc7b4c6..a387df995beec 100644 --- a/lib/SIL/Parser/ParseSIL.cpp +++ b/lib/SIL/Parser/ParseSIL.cpp @@ -6533,6 +6533,9 @@ bool SILParserState::parseDeclSILStage(Parser &P) { } M.setStage(stage); + if (M.getOptions().EnableSILOpaqueValues) { + M.setLoweredAddresses(stage != SILStage::Raw); + } DidParseSILStage = true; return false; } diff --git a/lib/SILGen/SILGenFunction.cpp b/lib/SILGen/SILGenFunction.cpp index 79c075920bcc4..27dc9410142a5 100644 --- a/lib/SILGen/SILGenFunction.cpp +++ b/lib/SILGen/SILGenFunction.cpp @@ -290,13 +290,12 @@ void SILGenFunction::emitCaptures(SILLocation loc, // Get an address value for a SILValue if it is address only in an type // expansion context without opaque archetype substitution. auto getAddressValue = [&](SILValue entryValue) -> SILValue { - if (SGM.Types - .getTypeLowering( - valueType, - TypeExpansionContext::noOpaqueTypeArchetypesSubstitution( - expansion.getResilienceExpansion())) - .isAddressOnly() && - !entryValue->getType().isAddress()) { + if (SGM.Types.getTypeLowering( + valueType, + TypeExpansionContext::noOpaqueTypeArchetypesSubstitution( + expansion.getResilienceExpansion())) + .isAddressOnly() + && !entryValue->getType().isAddress()) { auto addr = emitTemporaryAllocation(vd, entryValue->getType()); auto val = B.emitCopyValueOperation(vd, entryValue); diff --git a/lib/SILGen/SILGenFunction.h b/lib/SILGen/SILGenFunction.h index 79703bf9f9f9d..887e2b94fbe9b 100644 --- a/lib/SILGen/SILGenFunction.h +++ b/lib/SILGen/SILGenFunction.h @@ -245,6 +245,8 @@ class LLVM_LIBRARY_VISIBILITY SILGenFunction /// The SILModuleConventions for this SIL module. SILModuleConventions silConv; + bool useLoweredAddresses() const { return silConv.useLoweredAddresses(); } + /// The DeclContext corresponding to the function currently being emitted. DeclContext * const FunctionDC; diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index 1e1a3cd7539d8..de04331eb6c71 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -1527,13 +1527,15 @@ void AddressLowering::runOnFunction(SILFunction *F) { /// The entry point to this function transformation. void AddressLowering::run() { - if (getModule()->getOptions().EnableSILOpaqueValues) { - for (auto &F : *getModule()) - runOnFunction(&F); + if (getModule()->useLoweredAddresses()) + return; + + for (auto &F : *getModule()) { + runOnFunction(&F); } - // Set the SIL state before the PassManager has a chance to run + // Update the SILModule before the PassManager has a chance to run // verification. - getModule()->setStage(SILStage::Lowered); + getModule()->setLoweredAddresses(true); } SILTransform *swift::createAddressLowering() { return new AddressLowering(); } diff --git a/lib/SILOptimizer/PassManager/PassPipeline.cpp b/lib/SILOptimizer/PassManager/PassPipeline.cpp index 13eb1599d5601..f9c84763edc95 100644 --- a/lib/SILOptimizer/PassManager/PassPipeline.cpp +++ b/lib/SILOptimizer/PassManager/PassPipeline.cpp @@ -87,12 +87,14 @@ static void addModulePrinterPipeline(SILPassPipelinePlan &plan, static void addMandatoryDebugSerialization(SILPassPipelinePlan &P) { P.startPipeline("Mandatory Debug Serialization"); + P.addAddressLowering(); P.addOwnershipModelEliminator(); P.addMandatoryInlining(); } static void addOwnershipModelEliminatorPipeline(SILPassPipelinePlan &P) { P.startPipeline("Ownership Model Eliminator"); + P.addAddressLowering(); P.addOwnershipModelEliminator(); } @@ -113,6 +115,7 @@ static void addDefiniteInitialization(SILPassPipelinePlan &P) { static void addMandatoryDiagnosticOptPipeline(SILPassPipelinePlan &P) { P.startPipeline("Mandatory Diagnostic Passes + Enabling Optimization Passes"); P.addSILGenCleanup(); + P.addAddressLowering(); P.addDiagnoseInvalidEscapingCaptures(); P.addDiagnoseStaticExclusivity(); P.addNestedSemanticFunctionCheck(); @@ -796,11 +799,10 @@ static void addSILDebugInfoGeneratorPipeline(SILPassPipelinePlan &P) { SILPassPipelinePlan SILPassPipelinePlan::getLoweringPassPipeline(const SILOptions &Options) { SILPassPipelinePlan P(Options); - P.startPipeline("Address Lowering"); + P.startPipeline("Lowering"); P.addLowerHopToActor(); // FIXME: earlier for more opportunities? P.addOwnershipModelEliminator(); P.addIRGenPrepare(); - P.addAddressLowering(); return P; } @@ -913,7 +915,7 @@ SILPassPipelinePlan::getOnonePassPipeline(const SILOptions &Options) { // depend on other passes needed for diagnostics). Thus we can run them later // and avoid having SourceKit run these passes when just emitting diagnostics // in the editor. - P.startPipeline("non-Diagnostic Enabling Mandatory Optimizations"); + P.startPipeline("Non-Diagnostic Mandatory Optimizations"); P.addForEachLoopUnroll(); P.addMandatoryCombine(); diff --git a/lib/SILOptimizer/PassManager/Passes.cpp b/lib/SILOptimizer/PassManager/Passes.cpp index 04c4dc612fa90..05aba1c411abc 100644 --- a/lib/SILOptimizer/PassManager/Passes.cpp +++ b/lib/SILOptimizer/PassManager/Passes.cpp @@ -213,7 +213,7 @@ void swift::runSILLoweringPasses(SILModule &Module) { SILPassPipelinePlan::getLoweringPassPipeline(opts), /*isMandatory*/ true); - assert(Module.getStage() == SILStage::Lowered); + Module.setStage(SILStage::Lowered); } /// Registered briged pass run functions. From b90007a98c8df8abaddecd6e100a367d522e7e78 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Sun, 28 Nov 2021 20:20:58 -0800 Subject: [PATCH 04/88] Update and reimplement AddressLowering pass (for SIL opaque values). Merge the AddressLowering pass from its old development branch and update it so we can begin incrementally enabling it under a flag. This has been reimplemented for simplicity. There's no point in looking at the old code. --- include/swift/SIL/SILBuilder.h | 7 +- .../Mandatory/AddressLowering.cpp | 3714 ++++++++++++----- lib/SILOptimizer/Mandatory/AddressLowering.h | 282 ++ lib/SILOptimizer/Mandatory/CMakeLists.txt | 1 + .../Mandatory/PhiStorageOptimizer.cpp | 237 ++ .../Mandatory/PhiStorageOptimizer.h | 51 + test/IRGen/opaque_values_irgen.sil | 6 +- test/SIL/Parser/opaque_values_parse.sil | 2 +- .../Serialization/opaque_values_serialize.sil | 2 +- .../opaque_use_verifier.sil | 2 +- test/SILOptimizer/address_lowering.sil | 1142 +++-- test/SILOptimizer/address_lowering_phi.sil | 440 ++ test/SILOptimizer/address_projection.sil | 444 -- test/SILOptimizer/copy_propagation_opaque.sil | 2 +- test/SILOptimizer/opaque_values_mandatory.sil | 17 +- test/SILOptimizer/opaque_values_opt.sil | 19 +- test/SILOptimizer/specialize_opaque.sil | 2 +- test/SILOptimizer/specialize_opaque_ossa.sil | 2 +- test/sil-passpipeline-dump/basic.test-sh | 2 +- 19 files changed, 4579 insertions(+), 1795 deletions(-) create mode 100644 lib/SILOptimizer/Mandatory/AddressLowering.h create mode 100644 lib/SILOptimizer/Mandatory/PhiStorageOptimizer.cpp create mode 100644 lib/SILOptimizer/Mandatory/PhiStorageOptimizer.h create mode 100644 test/SILOptimizer/address_lowering_phi.sil delete mode 100644 test/SILOptimizer/address_projection.sil diff --git a/include/swift/SIL/SILBuilder.h b/include/swift/SIL/SILBuilder.h index 9a43437c39da0..5da1ae39cae7c 100644 --- a/include/swift/SIL/SILBuilder.h +++ b/include/swift/SIL/SILBuilder.h @@ -266,11 +266,10 @@ class SILBuilder { void clearInsertionPoint() { BB = nullptr; } /// setInsertionPoint - Set the insertion point. - void setInsertionPoint(SILBasicBlock *BB, SILBasicBlock::iterator InsertPt) { + void setInsertionPoint(SILBasicBlock *BB, SILBasicBlock::iterator insertPt) { this->BB = BB; - this->InsertPt = InsertPt; - if (InsertPt == BB->end()) - return; + this->InsertPt = insertPt; + assert(insertPt == BB->end() || insertPt->getParent() == BB); } /// setInsertionPoint - Set the insertion point to insert before the specified diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index de04331eb6c71..81efd270a1f6c 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -2,101 +2,114 @@ // // This source file is part of the Swift.org open source project // -// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Copyright (c) 2014 - 2022 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // -// This pass lowers SILTypes. On completion, the SILType of every SILValue is -// its SIL storage type. A SIL storage type is always an address type for values -// that require indirect storage at the LLVM IR level. Consequently, this pass -// is required for IRGen. It is a mandatory IRGen preparation pass (not a -// diagnostic pass). -// -// In the following text, items marked "[REUSE]" only apply to the proposed -// storage reuse optimization, which is not currently implemented. -// -// ## State -// -// A `valueStorageMap` maps each opaque SIL value to its storage -// information containing: -// -// - An ordinal representing the position of this instruction. -// -// - [REUSE] The identifier of the storage object. An optimized storage object -// may have multiple disjoint lifetimes. A storage object may also have -// subobjects. Each subobject has its own live range. When considering -// liveness of the subobject, one must also consider liveness of the -// parent object. -// -// - If this is a subobject projection, refer back to the value whose -// storage object will be the parent that this storage address is a -// projection of. -// -// - The storage address for this subobject. -// -// ## Step #1: Map opaque values -// -// Populate `valueStorageMap` in forward order (RPO), giving each opaque value -// an ordinal position. -// -// [REUSE] Assign a storage identifier to each opaque value. Optionally optimize -// storage by assigning multiple values the same identifier. -// -// ## Step #2: Allocate storage -// -// In reverse order (PO), allocate the parent storage object for each opaque -// value. -// -// [REUSE] If storage has already been allocated for the current live range, -// then simply reuse it. -// -// If the value's use composes a parent object from this value, and use's -// storage can be projected from, then mark the value's storage as a projection -// from the use value. [REUSE] Also inherit the use's storage identifier, and -// add an interval to the live range with the current projection path. -// -// A use can be projected from if its allocation is available at (dominates) -// this value and using the same storage over the interval from this value to -// the use does not overlap with the existing live range. -// -// Checking interference requires checking all operands that have been marked as -// projections. In the case of block arguments, it means checking the terminator -// operands of all predecessor blocks. -// -// [REUSE] Rather than checking all value operands, each live range will contain -// a set of intervals. Each interval will be associated with a projection path. -// -// Opaque value's that are the root of all projection paths now have their -// `storageAddress` assigned to an `alloc_stack` or argument. Opaque value's -// that are projections do not yet have a `storageAddress`. -// -// ## Step #3. Rewrite opaque values -// -// In forward order (RPO), rewrite each opaque value definition, and all its -// uses. This generally involves creating a new `_addr` variant of the -// instruction and obtaining the storage address from the `valueStorageMap`. -// -// If this value's storage is a projection of the value defined by its composing -// use, then first generate instructions to materialize the projection. This is -// a recursive process starting with the root of the projection path. -// -// A projection path will be materialized once, for the leaf subobject. When -// this happens, the `storageAddress` will be assigned for any intermediate -// projection paths. When those values are rewritten, their `storageAddress` -// will already be available. -// +//===----------------------------------------------------------------------===// +/// +/// This pass removes "opaque SILValues" by translating them into addressable +/// memory locations such as a stack locations. This is mandatory for IRGen. +/// +/// Lowering to LLVM IR requires each SILValue's type to be a valid "SIL storage +/// type". Opaque SILValues have address-only types. Address-only values require +/// indirect storage in LLVM, so their SIL storage type must be an address type. +/// +/// This pass should not introduce any semantic copies. Guaranteed values always +/// reuse the borrowed value's storage. This means that we SIL cannot allow +/// guaranteed opaque uses unless they are projections of the definition. In +/// particular, borrowed structs, tuples, and enums of address-only types are +/// not allowed. +/// +/// When owned values are consumed by phis, multiple storage locations are +/// required to avoid interfering with other phi operands. However, the value +/// never needs to be live in multiple storage locations a once. When the value +/// is consumed by a phi, either it's own storage is coalesced with the phi +/// storage (they have the same address), or the value is bitwise moved into the +/// phi's storage. +/// +/// ## Step #1: Map opaque values +/// +/// Populate a map from each opaque SILValue to its ValueStorage in forward +/// order (RPO). Each opaque value is mapped to an ordinal ID representing the +/// storage. Storage locations can now be optimized by remapping the values. +/// +/// ## Step #2: Allocate storage +/// +/// In reverse order (PO), allocate the parent storage object for each opaque +/// value. +/// +/// If the value is a subobject extraction (struct_extract, tuple_extract, +/// open_existential_value, unchecked_enum_data), then mark the value's storage +/// as a projection from the def's storage. +/// +/// If the value's use composes a parent object from this value (struct, tuple, +/// enum), and the use's storage dominates this value, then mark the value's +/// storage as a projection into the use's storage. +/// +/// ValueStorage projections can be chained. A non-projection ValueStorage is +/// the root of a tree of projections. +/// +/// When allocating storage, each ValueStorage root has its `storageAddress` +/// assigned to an `alloc_stack` or an argument. Opaque values that are storage +/// projections are not mapped to a `storageAddress` at this point. That happens +/// during rewriting. +/// +/// After allocating storage for all non-phi opaque values, phi storage is +/// allocated. This is handled by a PhiStorageOptimizer that checks for +/// interference among the phi operands and reuses storage allocated to other +/// values. +/// +/// ## Step #3. Rewrite opaque values +/// +/// In forward order (RPO), rewrite each opaque value definition, and all its +/// uses. This generally involves creating a new `_addr` variant of the +/// instruction and obtaining the storage address from the `valueStorageMap`. +/// +/// If this value's storage is a def-projection (the value is used to compose an +/// aggregate), then first generate instructions to materialize the +/// projection. This is a recursive process starting with the root of the +/// projection path. +/// +/// A projection path will be materialized once for the leaf subobject. When +/// this happens, the `storageAddress` will be assigned for any intermediate +/// projection paths. When those values are rewritten, their `storageAddress` +/// will already be available. +/// +//===----------------------------------------------------------------------===// +/// +/// TODO: Much of the implementation complexity, including most of the general +/// helper routines, stems from handling calls with multiple return values as +/// tuples. Once those calls are properly represented as instructions with +/// multiple results, then the implementation complexity will fall away. See the +/// code tagged "TODO: Multi-Result". +/// +/// TODO: Some complexity stems from the SILPhiArgument type/opcode being used +/// for terminator results rather than phis. +/// //===----------------------------------------------------------------------===// #define DEBUG_TYPE "address-lowering" + +#include "PhiStorageOptimizer.h" +#include "swift/Basic/BlotSetVector.h" +#include "swift/Basic/Range.h" +#include "swift/SIL/BasicBlockUtils.h" #include "swift/SIL/DebugUtils.h" +#include "swift/SIL/OwnershipUtils.h" +#include "swift/SIL/PrettyStackTrace.h" +#include "swift/SIL/PrunedLiveness.h" #include "swift/SIL/SILArgument.h" #include "swift/SIL/SILBuilder.h" #include "swift/SIL/SILVisitor.h" #include "swift/SILOptimizer/Analysis/PostOrderAnalysis.h" #include "swift/SILOptimizer/PassManager/Transforms.h" +#include "swift/SILOptimizer/Utils/BasicBlockOptUtils.h" #include "swift/SILOptimizer/Utils/InstOptUtils.h" +#include "swift/SILOptimizer/Utils/InstructionDeleter.h" +#include "swift/SILOptimizer/Utils/StackNesting.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SetVector.h" #include "llvm/Support/CommandLine.h" @@ -104,169 +117,406 @@ using namespace swift; using llvm::SmallSetVector; -using llvm::PointerIntPair; - -llvm::cl::opt - OptimizeOpaqueAddressLowering("optimize-opaque-address-lowering", - llvm::cl::init(false)); - -// Visit all call results. -// Stop when the visitor returns `false`. -static void visitCallResults(ApplySite apply, - llvm::function_ref visitor) { - // FIXME: this entire implementation only really works for ApplyInst. - auto applyInst = cast(apply); - if (applyInst->getType().is()) { - // TODO: MultiValueInstruction - for (auto *operand : applyInst->getUses()) { - if (auto extract = dyn_cast(operand->getUser())) - if (!visitor(extract)) - break; - } - } else - visitor(applyInst); + +/// Get a function's convention for Lowered SIL, even though the SIL stage is +/// still Canonical. +static SILFunctionConventions getLoweredFnConv(SILFunction *function) { + return SILFunctionConventions( + function->getLoweredFunctionType(), + SILModuleConventions::getLoweredAddressConventions( + function->getModule())); +} + +/// Get a call's function convention for Lowered SIL even though the SIL stage +/// is still Canonical. +static SILFunctionConventions getLoweredCallConv(ApplySite call) { + return SILFunctionConventions( + call.getSubstCalleeType(), + SILModuleConventions::getLoweredAddressConventions(call.getModule())); +} + +/// Invoke \p cleanup on all paths exiting a call. +static void +cleanupAfterCall(FullApplySite apply, + llvm::function_ref cleanup) { + switch (apply.getKind()) { + case FullApplySiteKind::ApplyInst: { + cleanup(std::next(apply.getInstruction()->getIterator())); + break; + } + case FullApplySiteKind::TryApplyInst: { + auto *tryApply = cast(apply.getInstruction()); + cleanup(tryApply->getNormalBB()->begin()); + cleanup(tryApply->getErrorBB()->begin()); + break; + } + case FullApplySiteKind::BeginApplyInst: { + // FIXME: Unimplemented + // + // This should be as simple as calling cleanup for all the end_applies. + llvm::report_fatal_error("Unimplemented coroutine"); + } + } } //===----------------------------------------------------------------------===// -// ValueStorageMap: Map Opaque/Resilient SILValues to abstract storage units. +// Multi-Result +// +// TODO: These helpers all compensate for the legacy representation of return +// values as tuples. Once calls are properly represented as multi-value +// instructions, this complexity all goes away. +// +// Calls are currently SILValues, but when the result type is a tuple, the call +// value does not represent a real value with storage. This is a bad situation +// for address lowering because there's no way to tell from any given value +// whether its legal to assign storage to that value. As a result, the +// implementation of call lowering doesn't fall out naturally from the algorithm +// that lowers values to storage. //===----------------------------------------------------------------------===// -namespace { -struct ValueStorage { - enum { IsProjectionMask = 0x1, IsRewrittenMask = 0x2 }; - PointerIntPair projectionAndFlags; - - /// The final address of this storage unit after rewriting the SIL. - /// For values linked to their own storage, this is set during storage - /// allocation. For projections, it is only set after instruction rewriting. - SILValue storageAddress; +/// If \p pseudoResult has multiple results, return the destructure. +static DestructureTupleInst *getCallMultiResult(SILValue pseudoResult) { + if (pseudoResult->getType().is()) { + if (auto *use = pseudoResult->getSingleUse()) + return cast(use->getUser()); - bool isProjection() const { - return projectionAndFlags.getInt() & IsProjectionMask; + assert(pseudoResult->use_empty() && "pseudo result can't be used"); } - /// Return the operand the composes an aggregate from this value. - Operand *getComposedOperand() const { - assert(isProjection()); - return projectionAndFlags.getPointer(); + return nullptr; +} + +/// \p destructure is the pseudo result of a multi-result call. +/// Visit all real call results. Stop when the visitor returns `false`. +static bool visitCallMultiResults( + DestructureTupleInst *destructure, SILFunctionConventions fnConv, + llvm::function_ref visitor) { + assert(fnConv.getNumDirectSILResults() == destructure->getNumResults()); + + auto resultIter = destructure->getAllResults().begin(); + for (auto resultInfo : fnConv.getDirectSILResults()) { + if (!visitor(*resultIter++, resultInfo)) + return false; } - void setComposedOperand(Operand *oper) { - projectionAndFlags.setPointer(oper); - projectionAndFlags.setInt(projectionAndFlags.getInt() | IsProjectionMask); + return true; +} + +/// Visit all real call results. Stop when the visitor returns `false`. +static bool +visitCallResults(FullApplySite apply, + llvm::function_ref visitor) { + auto fnConv = apply.getSubstCalleeConv(); + SILValue pseudoResult = apply.getPseudoResult(); + if (auto *destructure = getCallMultiResult(pseudoResult)) { + return visitCallMultiResults(destructure, fnConv, visitor); } + return visitor(pseudoResult, *fnConv.getDirectSILResults().begin()); +} - bool isRewritten() const { - if (projectionAndFlags.getInt() & IsRewrittenMask) { - assert(storageAddress); - return true; - } +/// Return true if the given value is either a "fake" tuple that represents all +/// of a call's results or an empty tuple of no results. This may return true +/// for either tuple_inst or a block argument. +static bool isPseudoCallResult(SILValue value) { + if (isa(value)) + return value->getType().is(); + + auto *bbArg = dyn_cast(value); + if (!bbArg) return false; + + auto *term = bbArg->getTerminatorForResult(); + if (!term) + return false; + + return isa(term) && bbArg->getType().is(); +} + +/// Return true if this is a pseudo-return value. +static bool isPseudoReturnValue(SILValue value) { + if (auto *tuple = dyn_cast(value)) { + Operand *singleUse = tuple->getSingleUse(); + return singleUse && isa(singleUse->getUser()); } - void markRewritten() { - projectionAndFlags.setInt(projectionAndFlags.getInt() | IsRewrittenMask); - } -}; + return false; +} -/// Map each opaque/resilient SILValue to its abstract storage. -/// O(1) membership test. -/// O(n) iteration in RPO order. -class ValueStorageMap { - typedef std::vector> ValueVector; - // Hash of values to ValueVector indices. - typedef llvm::DenseMap ValueHashMap; +/// Return the value representing storage of an address-only or indirectly +/// returned tuple element. For real tuples, return the tuple value itself. If +/// the tuple is a pseudo-return value, return the indirect function argument +/// for the corresponding result after lowering. +/// +/// bb0(%loweredIndirectResult : $*T, ...) +/// .... +/// %tuple = tuple(..., %operand, ...) +/// return %tuple +/// +/// When called on %operand, return %loweredIndirectResult. +/// +/// Precondition: \p operand's user is a TupleInst +/// +/// Precondition: indirect function arguments have already been rewritten +/// (see insertIndirectReturnArgs()). +static SILValue getTupleStorageValue(Operand *operand) { + auto *tuple = cast(operand->getUser()); + Operand *singleUse = tuple->getSingleUse(); + if (!singleUse || !isa(singleUse->getUser())) + return tuple; + + unsigned resultIdx = tuple->getElementIndex(operand); + + SILFunction *function = tuple->getFunction(); + auto loweredFnConv = getLoweredFnConv(function); + assert(loweredFnConv.getResults().size() == tuple->getElements().size()); + + unsigned indirectResultIdx = 0; + for (SILResultInfo result : loweredFnConv.getResults().slice(0, resultIdx)) { + if (loweredFnConv.isSILIndirect(result)) + ++indirectResultIdx; + } + // Cannot call F->getIndirectSILResults here because that API uses the + // function conventions before address lowering. + return function->getArguments()[indirectResultIdx]; +} - ValueVector valueVector; - ValueHashMap valueHashMap; +/// Return the value representing storage for a single return value. +/// +/// bb0(%loweredIndirectResult : $*T, ...) +/// return %oper +/// +/// For %oper, return %loweredIndirectResult +static SILValue getSingleReturnValue(Operand *operand) { + assert(!isPseudoReturnValue(operand->get())); + + auto *function = operand->getParentFunction(); + auto loweredFnConv = getLoweredFnConv(function); + assert(loweredFnConv.getNumIndirectSILResults() == 1); + (void)loweredFnConv; + + // Cannot call getIndirectSILResults here because that API uses the + // function conventions before address lowering. + return function->getArguments()[0]; +} -public: - bool empty() const { return valueVector.empty(); } +//===----------------------------------------------------------------------===// +// ValueStorageMap +// +// Map Opaque SILValues to abstract storage units. +//===----------------------------------------------------------------------===// - void clear() { - valueVector.clear(); - valueHashMap.clear(); - } +/// Check if this is a copy->store pair. If so, the copy storage will be +/// projected from the source, and the copy semantics will be handled by +/// UseRewriter::visitStoreInst. +static bool isStoreCopy(SILValue value) { + auto *copyInst = dyn_cast(value); + if (!copyInst) + return false; - ValueVector::iterator begin() { return valueVector.begin(); } + if (!copyInst->hasOneUse()) + return false; - ValueVector::iterator end() { return valueVector.end(); } + auto *user = value->getSingleUse()->getUser(); + return isa(user) || isa(user); +} - ValueVector::reverse_iterator rbegin() { return valueVector.rbegin(); } +ValueStorage &ValueStorageMap::insertValue(SILValue value) { + assert(!stableStorage && "cannot grow stable storage map"); - ValueVector::reverse_iterator rend() { return valueVector.rend(); } + auto hashResult = + valueHashMap.insert(std::make_pair(value, valueVector.size())); + (void)hashResult; + assert(hashResult.second && "SILValue already mapped"); - bool contains(SILValue value) const { - return valueHashMap.find(value) != valueHashMap.end(); - } + valueVector.emplace_back(value, ValueStorage()); - unsigned getOrdinal(SILValue value) { - auto hashIter = valueHashMap.find(value); - assert(hashIter != valueHashMap.end() && "Missing SILValue"); - return hashIter->second; - } + return valueVector.back().storage; +} - ValueStorage &getStorage(SILValue value) { - return valueVector[getOrdinal(value)].second; - } +void ValueStorageMap::replaceValue(SILValue oldValue, SILValue newValue) { + auto pos = valueHashMap.find(oldValue); + assert(pos != valueHashMap.end()); + unsigned ordinal = pos->second; + valueHashMap.erase(pos); - // This must be called in RPO order. - ValueStorage &insertValue(SILValue value) { - auto hashResult = - valueHashMap.insert(std::make_pair(value, valueVector.size())); - (void)hashResult; - assert(hashResult.second && "SILValue already mapped"); + auto hashResult = valueHashMap.insert(std::make_pair(newValue, ordinal)); + (void)hashResult; + assert(hashResult.second && "SILValue already mapped"); - valueVector.emplace_back(value, ValueStorage()); + valueVector[ordinal].value = newValue; +} - return valueVector.back().second; +void ValueStorageMap::dump() { + llvm::dbgs() << "ValueStorageMap:\n"; + for (unsigned ordinal : indices(valueVector)) { + auto &valStoragePair = valueVector[ordinal]; + llvm::dbgs() << "value: "; + valStoragePair.value->dump(); + auto &storage = valStoragePair.storage; + if (storage.isUseProjection) { + llvm::dbgs() << " use projection: "; + if (!storage.isRewritten) + valueVector[storage.projectedStorageID].value->dump(); + } else if (storage.isDefProjection) { + llvm::dbgs() << " def projection: "; + if (!storage.isRewritten) + valueVector[storage.projectedStorageID].value->dump(); + } + if (storage.storageAddress) { + llvm::dbgs() << " storage: "; + storage.storageAddress->dump(); + } } -}; -} // end anonymous namespace +} //===----------------------------------------------------------------------===// -// AddressLoweringState: shared state for the pass's analysis and transforms. +// AddressLoweringState +// +// Shared state for the pass's analysis and transforms. //===----------------------------------------------------------------------===// namespace { +class PhiRewriter; + struct AddressLoweringState { - SILFunction *F; + SILFunction *function; SILFunctionConventions loweredFnConv; // Dominators remain valid throughout this pass. DominanceInfo *domInfo; - // All opaque values and associated storage. + InstructionDeleter deleter; + + // All opaque values mapped to their associated storage. ValueStorageMap valueStorageMap; + // All call sites with formally indirect SILArgument or SILResult conventions. - // Calls are removed from the set when rewritten. - SmallSetVector indirectApplies; + // + // Applies with indirect results are removed as they are rewritten. Applies + // with only indirect arguments are rewritten in a post-pass, only after all + // parameters are rewritten. + SmallBlotSetVector indirectApplies; + // All function-exiting terminators (return or throw instructions). - SmallVector returnInsts; - // Delete these instructions after performing transformations. - // They must not have any remaining users. - SmallSetVector instsToDelete; - - AddressLoweringState(SILFunction *F, DominanceInfo *domInfo) - : F(F), - loweredFnConv(F->getLoweredFunctionType(), - SILModuleConventions::getLoweredAddressConventions(F->getModule())), + SmallVector exitingInsts; + + // Copies from a phi's operand storage to the phi storage. These logically + // occur on the CFG edge. Keep track of them to resolve anti-dependencies. + std::unique_ptr phiRewriter; + + AddressLoweringState(SILFunction *function, DominanceInfo *domInfo) + : function(function), loweredFnConv(getLoweredFnConv(function)), domInfo(domInfo) {} - bool isDead(SILInstruction *inst) const { return instsToDelete.count(inst); } + SILModule *getModule() const { return &function->getModule(); } - void markDead(SILInstruction *inst) { -#ifndef NDEBUG - for (auto result : inst->getResults()) - for (Operand *use : result->getUses()) - assert(instsToDelete.count(use->getUser())); -#endif - instsToDelete.insert(inst); + SILLocation genLoc() const { + return RegularLocation::getAutoGeneratedLocation(); + } + + // Get a builder that uses function conventions for the Lowered SIL stage even + // though the SIL stage hasn't explicitly changed yet. + SILBuilder getBuilder(SILBasicBlock::iterator insertPt) const { + return getBuilder(insertPt, &*insertPt); + } + SILBuilder getTermBuilder(TermInst *term) const { + return getBuilder(term->getParent()->end(), term); + } + + PhiRewriter &getPhiRewriter(); + + SILValue getMaterializedAddress(SILValue origValue) const { + return valueStorageMap.getStorage(origValue).getMaterializedAddress(); + } + +protected: + SILBuilder getBuilder(SILBasicBlock::iterator insertPt, + SILInstruction *originalInst) const { + SILBuilder builder(originalInst->getParent(), insertPt); + builder.setSILConventions( + SILModuleConventions::getLoweredAddressConventions( + builder.getModule())); + builder.setCurrentDebugScope(originalInst->getDebugScope()); + return builder; } }; } // end anonymous namespace //===----------------------------------------------------------------------===// -// OpaqueValueVisitor: Map OpaqueValues to ValueStorage. +// OpaqueValueVisitor +// +// Map opaque values to ValueStorage. //===----------------------------------------------------------------------===// +/// Before populating the ValueStorageMap, replace each value-typed argument to +/// the current function with an address-typed argument by inserting a temporary +/// load instruction. +static void convertIndirectFunctionArgs(AddressLoweringState &pass) { + // Insert temporary argument loads at the top of the function. + SILBuilder argBuilder = + pass.getBuilder(pass.function->getEntryBlock()->begin()); + + auto fnConv = pass.function->getConventions(); + unsigned argIdx = fnConv.getSILArgIndexOfFirstParam(); + for (SILParameterInfo param : + pass.function->getLoweredFunctionType()->getParameters()) { + + if (param.isFormalIndirect() && !fnConv.isSILIndirect(param)) { + SILArgument *arg = pass.function->getArgument(argIdx); + SILType addrType = arg->getType().getAddressType(); + LoadInst *loadArg = argBuilder.createTrivialLoadOr( + SILValue(arg).getLoc(), SILUndef::get(addrType, *pass.function), + LoadOwnershipQualifier::Take); + + arg->replaceAllUsesWith(loadArg); + assert(!pass.valueStorageMap.contains(arg)); + + arg = arg->getParent()->replaceFunctionArgument( + arg->getIndex(), addrType, OwnershipKind::None, arg->getDecl()); + + loadArg->setOperand(arg); + + // Indirect calling convention may be used for loadable types. In that + // case, generating the argument loads is sufficient. + if (addrType.isAddressOnly(*pass.function)) { + auto &storage = pass.valueStorageMap.insertValue(loadArg); + storage.storageAddress = arg; + storage.isRewritten = true; + } + } + ++argIdx; + } + assert(argIdx + == fnConv.getSILArgIndexOfFirstParam() + fnConv.getNumSILArguments()); +} + +/// Before populating the ValueStorageMap, insert function arguments for any +/// @out result type. Return the number of indirect result arguments added. +static unsigned insertIndirectReturnArgs(AddressLoweringState &pass) { + auto &astCtx = pass.getModule()->getASTContext(); + auto typeCtx = pass.function->getTypeExpansionContext(); + auto *declCtx = pass.function->getDeclContext(); + + unsigned argIdx = 0; + for (auto resultTy : pass.loweredFnConv.getIndirectSILResultTypes(typeCtx)) { + auto bodyResultTy = pass.function->mapTypeIntoContext(resultTy); + auto var = new (astCtx) ParamDecl( + SourceLoc(), SourceLoc(), astCtx.getIdentifier("$return_value"), + SourceLoc(), astCtx.getIdentifier("$return_value"), declCtx); + + SILFunctionArgument *funcArg = + pass.function->begin()->insertFunctionArgument( + argIdx, bodyResultTy.getAddressType(), OwnershipKind::None, var); + // Insert function results into valueStorageMap so that the caller storage + // can be projected onto values inside the function as use projections. + auto &storage = pass.valueStorageMap.insertValue(funcArg); + // This is the only case where a value defines its own storage. + storage.storageAddress = funcArg; + storage.isRewritten = true; + + ++argIdx; + } + assert(argIdx == pass.loweredFnConv.getNumIndirectSILResults()); + return argIdx; +} + namespace { /// Collect all opaque/resilient values, inserting them in `valueStorageMap` in /// RPO order. @@ -282,46 +532,53 @@ class OpaqueValueVisitor { public: explicit OpaqueValueVisitor(AddressLoweringState &pass) - : pass(pass), postorderInfo(pass.F) {} + : pass(pass), postorderInfo(pass.function) {} void mapValueStorage(); protected: - void visitApply(ApplySite applySite); + void checkForIndirectApply(FullApplySite applySite); void visitValue(SILValue value); + void canonicalizeReturnValues(); }; } // end anonymous namespace -/// Top-level entry: Populate `valueStorageMap`, `indirectResults`, and -/// `indirectOperands`. +/// Top-level entry. Populates AddressLoweringState's `valueStorageMap`, +/// `indirectApplies`, and `exitingInsts`. /// /// Find all Opaque/Resilient SILValues and add them /// to valueStorageMap in RPO. void OpaqueValueVisitor::mapValueStorage() { - for (auto *BB : postorderInfo.getReversePostOrder()) { - if (BB->getTerminator()->isFunctionExiting()) - pass.returnInsts.push_back(BB->getTerminator()); + for (auto *block : postorderInfo.getReversePostOrder()) { + if (block->getTerminator()->isFunctionExiting()) + pass.exitingInsts.push_back(block->getTerminator()); // Opaque function arguments have already been replaced. - if (BB != pass.F->getEntryBlock()) { - for (auto argI = BB->args_begin(), argEnd = BB->args_end(); - argI != argEnd; ++argI) { - visitValue(*argI); + if (block != pass.function->getEntryBlock()) { + for (auto *arg : block->getArguments()) { + if (isPseudoCallResult(arg)) + continue; + + visitValue(arg); } } - for (auto &II : *BB) { - if (auto apply = ApplySite::isa(&II)) - visitApply(apply); + for (auto &inst : *block) { + if (auto apply = FullApplySite::isa(&inst)) + checkForIndirectApply(apply); + + for (auto result : inst.getResults()) { + if (isPseudoCallResult(result) || isPseudoReturnValue(result)) + continue; - for (auto result : II.getResults()) visitValue(result); + } } } + canonicalizeReturnValues(); } -/// Populate `indirectApplies` and insert this apply in `valueStorageMap` if -/// the call's non-tuple result is returned indirectly. -void OpaqueValueVisitor::visitApply(ApplySite applySite) { +/// Populate `indirectApplies`. +void OpaqueValueVisitor::checkForIndirectApply(FullApplySite applySite) { auto calleeConv = applySite.getSubstCalleeConv(); unsigned calleeArgIdx = applySite.getCalleeArgIndexOfFirstAppliedArg(); for (Operand &operand : applySite.getArgumentOperands()) { @@ -329,265 +586,602 @@ void OpaqueValueVisitor::visitApply(ApplySite applySite) { auto argConv = calleeConv.getSILArgumentConvention(calleeArgIdx); if (argConv.isIndirectConvention()) { pass.indirectApplies.insert(applySite); + return; } } ++calleeArgIdx; } - - if (applySite.getSubstCalleeType()->hasIndirectFormalResults()) { + if (applySite.getSubstCalleeType()->hasIndirectFormalResults()) pass.indirectApplies.insert(applySite); - if (!applySite.getType().is()) - pass.valueStorageMap.insertValue(cast(applySite)); - - return; - } } -/// If `value` is address-only add it to the `valueStorageMap`. +/// If `value` is address-only, add it to the `valueStorageMap`. void OpaqueValueVisitor::visitValue(SILValue value) { - if (value->getType().isObject() - && value->getType().isAddressOnly(*pass.F)) { - if (pass.valueStorageMap.contains(value)) { - assert(isa( - pass.valueStorageMap.getStorage(value).storageAddress)); - return; - } - pass.valueStorageMap.insertValue(value); + if (!value->getType().isObject() + || !value->getType().isAddressOnly(*pass.function)) { + return; + } + if (pass.valueStorageMap.contains(value)) { + // Function arguments are already mapped from loads. + assert(isa( + pass.valueStorageMap.getStorage(value).storageAddress)); + return; } + pass.valueStorageMap.insertValue(value); } -//===----------------------------------------------------------------------===// -// OpaqueStorageAllocation: Generate alloc_stack and address projections for all -// abstract storage locations. -//===----------------------------------------------------------------------===// - -namespace { -/// Allocate storage on the stack for every opaque value defined in this -/// function in RPO order. If the definition is an argument of this function, -/// simply replace the function argument with an address representing the -/// caller's storage. -/// -/// TODO: shrink lifetimes by inserting alloc_stack at the dominance LCA and -/// finding the lifetime boundary with a simple backward walk from uses. -class OpaqueStorageAllocation { - AddressLoweringState &pass; +// Canonicalize returned values. +// +// Given: +// %t = def : $(T, T) +// use %t : $(T, T) +// return %t : $(T, T) +// +// Produce: +// %t = def +// use %t : $(T, T) +// (%e0, %e1) = destructure_tuple %t : $(T, T) +// %r = tuple (%e0 : $T, %e1 : $T) +// return %r : $(T, T) +// +// TODO: Multi-Result. This should be a standard OSSA canonicalization until +// returns are fixed to take multiple operands. +void OpaqueValueVisitor::canonicalizeReturnValues() { + auto numResults = pass.function->getConventions().getNumDirectSILResults(); + if (numResults < 2) + return; -public: - explicit OpaqueStorageAllocation(AddressLoweringState &pass) : pass(pass) {} + for (SILInstruction *termInst : pass.exitingInsts) { + auto *returnInst = dyn_cast(termInst); + if (!returnInst) { + assert(isa(termInst)); + continue; + } + SILValue oldResult = returnInst->getOperand(); + if (oldResult.getOwnershipKind() != OwnershipKind::Owned) + continue; - void allocateOpaqueStorage(); + assert(oldResult->getType().is()); + if (oldResult->hasOneUse()) { + assert(isPseudoReturnValue(oldResult)); + continue; + } + // There is another nonconsuming use of the returned tuple. + SILBuilderWithScope returnBuilder(returnInst); + auto loc = pass.genLoc(); + auto *destructure = returnBuilder.createDestructureTuple(loc, oldResult); + + SmallVector results; + results.reserve(numResults); + for (auto result : destructure->getResults()) { + // Update the value storage map for new instructions. Since they are + // created at function exits, they are naturally in RPO order. + this->visitValue(result); + results.push_back(result); + } + auto *newResult = returnBuilder.createTuple( + pass.genLoc(), oldResult->getType(), results, OwnershipKind::Owned); + returnInst->setOperand(newResult); -protected: - void convertIndirectFunctionArgs(); - unsigned insertIndirectReturnArgs(); - bool canProjectFrom(SingleValueInstruction *innerVal, - SILInstruction *composingUse); - void allocateForValue(SILValue value, ValueStorage &storage); -}; -} // end anonymous namespace + assert(isPseudoReturnValue(newResult)); + } +} -/// Top-level entry point: allocate storage for all opaque/resilient values. -void OpaqueStorageAllocation::allocateOpaqueStorage() { +/// Top-level entry point. +/// +/// Prepare the SIL by rewriting function arguments and returns. +/// Initialize the ValueStorageMap with an entry for each opaque value in the +/// function. +static void prepareValueStorage(AddressLoweringState &pass) { // Fixup this function's argument types with temporary loads. - convertIndirectFunctionArgs(); + convertIndirectFunctionArgs(pass); // Create a new function argument for each indirect result. - insertIndirectReturnArgs(); + insertIndirectReturnArgs(pass); // Populate valueStorageMap. OpaqueValueVisitor(pass).mapValueStorage(); - - // Create an AllocStack for every opaque value defined in the function. Visit - // values in post-order to create storage for aggregates before subobjects. - for (auto &valueStorageI : llvm::reverse(pass.valueStorageMap)) - allocateForValue(valueStorageI.first, valueStorageI.second); } -/// Replace each value-typed argument to the current function with an -/// address-typed argument by inserting a temporary load instruction. -void OpaqueStorageAllocation::convertIndirectFunctionArgs() { - // Insert temporary argument loads at the top of the function. - SILBuilder argBuilder(pass.F->getEntryBlock()->begin()); - argBuilder.setSILConventions( - SILModuleConventions::getLoweredAddressConventions(pass.F->getModule())); - - auto fnConv = pass.F->getConventions(); - unsigned argIdx = fnConv.getSILArgIndexOfFirstParam(); - for (SILParameterInfo param : - pass.F->getLoweredFunctionType()->getParameters()) { - - if (param.isFormalIndirect() && !fnConv.isSILIndirect(param)) { - SILArgument *arg = pass.F->getArgument(argIdx); - SILType addrType = arg->getType().getAddressType(); +//===----------------------------------------------------------------------===// +// Storage Projection +// +// These queries determine whether storage for a SILValue can be projected from +// its operands or into its uses. +// ===---------------------------------------------------------------------===// - LoadInst *loadArg = argBuilder.createLoad( - RegularLocation(const_cast(arg->getDecl())), - SILUndef::get(addrType, *pass.F), - LoadOwnershipQualifier::Unqualified); +/// Return the operand whose source is an aggregate value that is extracted +/// into the given subobject, \p value. Or return nullptr. +/// +/// Def-projection oracle: The answer must be consistent across both +/// OpaqueStorageAllocation and AddressMaterialization. +/// +/// Invariant: +/// `getProjectedDefOperand(value) != nullptr` +/// if-and-only-if +/// `pass.valueStorageMap.getStorage(value).isDefProjection` +/// +/// Invariant: if \p value has guaranteed ownership, this must return a nonnull +/// value. +static Operand *getProjectedDefOperand(SILValue value) { + switch (value->getKind()) { + default: + return nullptr; + + case ValueKind::BeginBorrowInst: + return &cast(value)->getOperandRef(); + + case ValueKind::CopyValueInst: + if (isStoreCopy(value)) + return &cast(value)->getOperandRef(); + + return nullptr; + + case ValueKind::MultipleValueInstructionResult: { + SILInstruction *destructure = + cast(value)->getParent(); + switch (destructure->getKind()) { + default: + return nullptr; + case SILInstructionKind::DestructureStructInst: + return &destructure->getOperandRef(0); + case SILInstructionKind::DestructureTupleInst: { + auto *oper = &destructure->getOperandRef(0); + if (isPseudoCallResult(oper->get())) + return nullptr; + + return oper; + } + } + } + case ValueKind::TupleExtractInst: { + auto *TEI = cast(value); + // TODO: Multi-Result: TupleExtract from an apply are handled specially + // until we have multi-result calls. Force them to allocate storage. + if (ApplySite::isa(TEI->getOperand())) + return nullptr; - arg->replaceAllUsesWith(loadArg); - assert(!pass.valueStorageMap.contains(arg)); + LLVM_FALLTHROUGH; + } + case ValueKind::StructExtractInst: + case ValueKind::OpenExistentialValueInst: + case ValueKind::OpenExistentialBoxValueInst: + assert(value.getOwnershipKind() == OwnershipKind::Guaranteed); + return &cast(value)->getAllOperands()[0]; + } +} - arg = arg->getParent()->replaceFunctionArgument( - arg->getIndex(), addrType, OwnershipKind::None, arg->getDecl()); +/// Return the operand of the reused storage. These operations are always +/// rewritten by the use rewriter and destructively reuse their operand's +/// storage. If the result is address-only, then the operand must be +/// address-only (otherwise, the operand would not necessarilly have storage). +static Operand *getReusedStorageOperand(SILValue value) { + switch (value->getKind()) { + default: + break; - loadArg->setOperand(arg); + case ValueKind::OpenExistentialValueInst: + case ValueKind::OpenExistentialBoxValueInst: + case ValueKind::UncheckedEnumDataInst: + return &cast(value)->getOperandRef(0); - if (addrType.isAddressOnly(*pass.F)) - pass.valueStorageMap.insertValue(loadArg).storageAddress = arg; + case ValueKind::SILPhiArgument: { + if (auto *term = cast(value)->getTerminatorForResult()) { + if (auto *switchEnum = dyn_cast(term)) { + return &switchEnum->getAllOperands()[0]; + } } - ++argIdx; + break; } - assert(argIdx - == fnConv.getSILArgIndexOfFirstParam() + fnConv.getNumSILArguments()); -} - -/// Insert function arguments for any @out result type. Return the number of -/// indirect result arguments added. -unsigned OpaqueStorageAllocation::insertIndirectReturnArgs() { - auto &ctx = pass.F->getModule().getASTContext(); - unsigned argIdx = 0; - for (auto resultTy : pass.loweredFnConv.getIndirectSILResultTypes( - pass.F->getTypeExpansionContext())) { - auto bodyResultTy = pass.F->mapTypeIntoContext(resultTy); - auto var = new (ctx) - ParamDecl(SourceLoc(), SourceLoc(), - ctx.getIdentifier("$return_value"), SourceLoc(), - ctx.getIdentifier("$return_value"), - pass.F->getDeclContext()); - var->setSpecifier(ParamSpecifier::InOut); - - pass.F->begin()->insertFunctionArgument( - argIdx, bodyResultTy.getAddressType(), OwnershipKind::None, var); - ++argIdx; } - assert(argIdx == pass.loweredFnConv.getNumIndirectSILResults()); - return argIdx; + return nullptr; } -/// Is this operand composing an aggregate from a subobject, or simply -/// forwarding the operand's value to storage defined elsewhere? +/// If \p operand can project into its user, return the SILValue representing +/// user's storage. The user may composes an aggregate from its operands or +/// forwards its operands to arguments. /// -/// TODO: Handle struct. -/// TODO: Make this a visitor. -bool OpaqueStorageAllocation::canProjectFrom(SingleValueInstruction *innerVal, - SILInstruction *composingUse) { - if (!OptimizeOpaqueAddressLowering) - return false; - - SILValue composingValue; - switch (composingUse->getKind()) { +/// TODO: Handle SwitchValueInst, CheckedCastValueBranchInst. +static SILValue getProjectedUseValue(Operand *operand) { + auto *user = operand->getUser(); + switch (user->getKind()) { default: - return false; - case SILInstructionKind::ApplyInst: - // @in operands never need their own storage since they are non-mutating - // uses. They simply reuse the storage allocated for their operand. So it - // wouldn't make sense to "project" out of the apply argument. - return false; - case SILInstructionKind::EnumInst: - composingValue = cast(composingUse); - break; - case SILInstructionKind::InitExistentialValueInst: { - // Ensure that all opened archetypes are available at the inner value's - // definition. - auto *initExistential = cast(composingUse); - for (Operand &operand : initExistential->getTypeDependentOperands()) { - if (!pass.domInfo->properlyDominates(operand.get(), innerVal)) - return false; - } - composingValue = initExistential; break; - } + + // structs an enums are straightforward compositions. + case SILInstructionKind::StructInst: + case SILInstructionKind::EnumInst: + return cast(user); + + // init_existential_value composes an existential value, but may depends on + // opened archetypes. The caller will need to check that storage dominates + // the opened types. + case SILInstructionKind::InitExistentialValueInst: + return cast(user); + + // A tuple is either a composition or forwards its element through a return + // through function argument storage. Either way, its element can be a + // use projection. + case SILInstructionKind::TupleInst: + return getTupleStorageValue(operand); + + // Return instructions can project into the return value. case SILInstructionKind::ReturnInst: - return true; - case SILInstructionKind::StoreInst: { - if (cast(composingUse)->getSrc() == innerVal - && isa(innerVal)) { - return true; - } + return getSingleReturnValue(operand); + } + return SILValue(); +} + +//===----------------------------------------------------------------------===// +// OpaqueStorageAllocation +// +// Generate alloc_stack and address projections for abstract storage locations. +// ===---------------------------------------------------------------------===// + +// Record a storage projection from the source of the given operand into its +// use (e.g. struct_extract, tuple_extract, switch_enum). +void ValueStorageMap::recordDefProjection(Operand *oper, + SILValue projectedValue) { + auto &storage = getStorage(projectedValue); + storage.projectedStorageID = getOrdinal(oper->get()); + storage.isDefProjection = true; +} + +// Mark this operand as coalesced with \p useValue storage. +void ValueStorageMap::recordComposingUseProjection(Operand *oper, + SILValue userValue) { + auto &storage = getStorage(oper->get()); + assert(!storage.isAllocated()); + storage.projectedStorageID = getOrdinal(userValue); + storage.projectedOperandNum = oper->getOperandNumber(); + storage.isUseProjection = true; + + if (EnumDecl *enumDecl = userValue->getType().getEnumOrBoundGenericEnum()) { + storage.initializesEnum = true; + } + assert(!storage.isPhiProjection()); +} + +// Mark this phi operand as coalesced with the phi storage. +void ValueStorageMap::recordPhiUseProjection(Operand *operand, + SILPhiArgument *phi) { + assert(isa(operand->getUser())); + + auto &storage = getStorage(operand->get()); + assert(!storage.isAllocated()); + assert(storage.projectedOperandNum == ValueStorage::InvalidOper); + + storage.projectedStorageID = getOrdinal(phi); + storage.isUseProjection = true; + + assert(storage.isPhiProjection()); +} + +bool ValueStorageMap::isComposingUseProjection(Operand *oper) const { + auto hashPos = valueHashMap.find(oper->get()); + if (hashPos == valueHashMap.end()) + return false; + + auto &srcStorage = valueVector[hashPos->second].storage; + if (!srcStorage.isUseProjection) return false; + + return srcStorage.projectedOperandNum == oper->getOperandNumber(); +} + +namespace { +/// Allocate storage on the stack for every opaque value defined in this +/// function in postorder. If the definition is an argument of this function, +/// simply replace the function argument with an address representing the +/// caller's storage. +/// +/// TODO: shrink lifetimes by inserting alloc_stack at the dominance LCA and +/// finding the lifetime boundary with a simple backward walk from uses. +class OpaqueStorageAllocation { + AddressLoweringState &pass; + +public: + explicit OpaqueStorageAllocation(AddressLoweringState &pass) : pass(pass) {} + + void allocateOpaqueStorage(); + +protected: + void allocateValue(SILValue value); + bool findProjectionIntoUseImpl(SILValue value, + ArrayRef incomingValues, + bool intoPhi); + + bool findValueProjectionIntoUse(SILValue value) { + return findProjectionIntoUseImpl(value, ArrayRef(value), false); } - case SILInstructionKind::TupleInst: - composingValue = cast(composingUse); - break; + + bool findPhiProjectionIntoUse(SILValue value, + ArrayRef incomingValues) { + return findProjectionIntoUseImpl(value, incomingValues, true); } - ValueStorage &storage = pass.valueStorageMap.getStorage(composingValue); - if (SILValue addr = storage.storageAddress) { - if (auto *stackInst = dyn_cast(addr)) { - assert(pass.domInfo->properlyDominates(stackInst, innerVal)); - return true; - } - if (isa(addr)) { - return true; - } - } else if (storage.isProjection()) - return canProjectFrom(innerVal, storage.getComposedOperand()->getUser()); - return false; + bool checkStorageDominates(AllocStackInst *allocInst, + ArrayRef incomingValues); + + void allocatePhi(PhiValue phi); + + void removeAllocation(SILValue value); + + AllocStackInst *createStackAllocation(SILValue value); + + void createStackAllocationStorage(SILValue value) { + pass.valueStorageMap.getStorage(value).storageAddress = + createStackAllocation(value); + } +}; +} // end anonymous namespace + +/// Top-level entry point: allocate storage for all opaque/resilient values. +void OpaqueStorageAllocation::allocateOpaqueStorage() { + // Create an AllocStack for every opaque value defined in the function. Visit + // values in post-order to create storage for aggregates before subobjects. + for (auto &valueStorageI : llvm::reverse(pass.valueStorageMap)) { + SILValue value = valueStorageI.value; + if (!PhiValue(value)) + allocateValue(value); + } + // Only allocate phis after all SSA values have been allocated. allocatedValue + // assumes SSA form without checking interference. At that point, multiple + // SILValues can share storage via projections, but the storage is still + // singly defined. However, allocatePhi may coalesce multiple values, or even + // a single value across multiple loop iterations. The burden for checking + // inteference is entirely on allocatePhi. + for (auto &valueStorageI : llvm::reverse(pass.valueStorageMap)) { + if (auto phi = PhiValue(valueStorageI.value)) { + allocatePhi(phi); + } + } } /// Allocate storage for a single opaque/resilient value. -void OpaqueStorageAllocation::allocateForValue(SILValue value, - ValueStorage &storage) { +void OpaqueStorageAllocation::allocateValue(SILValue value) { + // Phis must be deferred. + assert(!PhiValue(value)); + + // Pseudo call results have no storage. + assert(!isPseudoCallResult(value)); + + // Pseudo return values have no storage. + assert(!isPseudoReturnValue(value)); + + auto &storage = pass.valueStorageMap.getStorage(value); + + // Fake loads for incoming function arguments are already rewritten; so are + // outgoing function arguments. + if (storage.isRewritten) + return; + + // Function arguments are preallocated to fake loads, so they aren't mapped to + // storage, and indirect results are already rewritten. assert(!isa(value)); - if (auto apply = ApplySite::isa(value)) { - // Result tuples will be canonicalized during apply rewriting so the tuple - // itself is unused. - if (value->getType().is()) { - assert(apply.getSubstCalleeType()->getNumResults() > 1); - return; - } + assert(!storage.isAllocated()); + + if (getReusedStorageOperand(value)) + return; + + // Check for values that inherently project storage from their operand. + if (auto *storageOper = getProjectedDefOperand(value)) { + pass.valueStorageMap.recordDefProjection(storageOper, value); + return; + } + if (value->getOwnershipKind() == OwnershipKind::Guaranteed) { + value->dump(); + llvm::report_fatal_error("^^^ guaranteed values must reuse storage"); } - // Argument loads already have a storage address. - if (storage.storageAddress) { - assert(isa(storage.storageAddress)); + // Attempt to reuse a user's storage. + if (findValueProjectionIntoUse(value)) return; + + // Eagerly create stack allocation. This way any operands can check + // alloc_stack dominance before their storage is coalesced with this + // value. Unfortunately, this alloc_stack may be dead if we later coalesce + // this value's storage with a branch use. + createStackAllocationStorage(value); +} + +/// Find a use of \p value that can provide the value's storage. +/// +/// \p incomingValues is a Range of SILValues (e.g. ArrayRef), +/// that all need \p value's storage to be available in their scope. +bool OpaqueStorageAllocation::findProjectionIntoUseImpl( + SILValue value, ArrayRef incomingValues, bool intoPhi) { + // Def-projections take precedence. + assert(!getProjectedDefOperand(value) && !getReusedStorageOperand(value)); + + for (Operand *use : value->getUses()) { + // Get the user's value, whose storage we will project into. + SILValue userValue = getProjectedUseValue(use); + if (!userValue) + continue; + + assert(!getProjectedDefOperand(userValue) + && "storage cannot project in two directions."); + + // Recurse through all storage projections to find the uniquely allocated + // storage. Enum storage cannot be reused across multiple subobjects because + // it must be initialized via a single init_enum_data_addr instruction. + // + // TODO: fix the memory verifier to consider the actual store instructions + // to initialize an enum rather than the init_enum_data_addr to reuse enum + // storage across multiple subobjects within the payload. + auto *baseStorage = pass.valueStorageMap.getBaseStorage( + userValue, /*allowInitEnum*/ !intoPhi); + if (!baseStorage) + continue; + + if (auto *stackInst = + dyn_cast(baseStorage->storageAddress)) { + if (!checkStorageDominates(stackInst, incomingValues)) + continue; + } else + assert(isa(baseStorage->storageAddress)); + + LLVM_DEBUG(llvm::dbgs() << " PROJECT "; value->dump(); + llvm::dbgs() << " into use "; use->getUser()->dump()); + + pass.valueStorageMap.recordComposingUseProjection(use, userValue); + return true; } + return false; +} - if (value->hasOneUse()) { - // TODO: Handle block arguments. - // TODO: Handle subobjects with a single composition, and other non-mutating - // uses such as @in arguments. - if (auto *def = dyn_cast(value)) { - Operand *useOper = *value->use_begin(); - if (canProjectFrom(def, useOper->getUser())) { - storage.setComposedOperand(useOper); - return; - } +bool OpaqueStorageAllocation:: +checkStorageDominates(AllocStackInst *allocInst, + ArrayRef incomingValues) { + + for (SILValue incomingValue : incomingValues) { + if (auto *defInst = incomingValue->getDefiningInstruction()) { + if (!pass.domInfo->properlyDominates(allocInst, defInst)) + return false; + continue; + } + // Handle both phis and terminator results. + auto *bbArg = cast(incomingValue); + // The storage block must strictly dominate the phi. + if (!pass.domInfo->properlyDominates( + allocInst->getParent(), bbArg->getParent())) { + return false; + } + } + return true; +} + +void OpaqueStorageAllocation::allocatePhi(PhiValue phi) { + // Coalesces phi operand storage with the phi storage. The algorithm processes + // all incoming values at once, so it is is run when visiting the block + // argument. + // + // The phi operand projections are computed first to give them priority. Then + // we determine if the phi itself can share storage with one of its users. + CoalescedPhi coalescedPhi; + coalescedPhi.coalesce(phi, pass.valueStorageMap); + + SmallVector coalescedValues; + coalescedValues.resize(coalescedPhi.getCoalescedOperands().size()); + for (SILValue value : coalescedPhi.getCoalescedValues()) + coalescedValues.push_back(value); + + if (!findPhiProjectionIntoUse(phi, coalescedValues)) + createStackAllocationStorage(phi); + + // Regardless of whether we projected into a user or allocated storage, + // provide this storage to all the incoming values that can reuse it. + for (Operand *phiOper : coalescedPhi.getCoalescedOperands()) { + removeAllocation(phiOper->get()); + pass.valueStorageMap.recordPhiUseProjection(phiOper, + PhiOperand(phiOper).getValue()); + } +} + +// Unfortunately, we create alloc_stack instructions for SSA values before +// coalescing block arguments. This temporary storage now needs to be removed. +void OpaqueStorageAllocation::removeAllocation(SILValue value) { + auto &storage = pass.valueStorageMap.getStorage(value); + auto *allocInst = cast(storage.storageAddress); + storage.storageAddress = nullptr; + + // It's only use should be dealloc_stacks. + for (Operand *use : allocInst->getUses()) { + pass.deleter.forceDelete(cast(use->getUser())); + } + pass.deleter.forceDelete(allocInst); +} + +// Create alloc_stack that dominates an owned value \p value. Create +// jointly-postdominating dealloc_stack instructions. Nesting will be fixed +// later. +// +// Any value that may be used by a return instruction must be deallocated +// immediately before the return. This allows the return to be rewritten by +// loading from storage. +AllocStackInst *OpaqueStorageAllocation:: +createStackAllocation(SILValue value) { + assert(value.getOwnershipKind() != OwnershipKind::Guaranteed && + "creating storage for a guaranteed value implies a copy"); + +#ifndef NDEBUG + // Instructions that produce an opened type never reach here because they + // have guaranteed ownership--they project their storage. We reach this + // point after the opened value has been copied. + if (auto *defInst = value->getDefiningInstruction()) { + if (auto *singleValue = dyn_cast(defInst)) { + assert(!cast(defInst)->getDefinedOpenedArchetype() + && "owned open_existential is unsupported"); } } +#endif + + SILType allocTy = value->getType(); - SILBuilder allocBuilder(pass.F->begin()->begin()); - allocBuilder.setSILConventions( - SILModuleConventions::getLoweredAddressConventions(pass.F->getModule())); - AllocStackInst *allocInstr = - allocBuilder.createAllocStack(value.getLoc(), value->getType()); + // For opened existential types, allocate stack space at the type + // definition. Allocating as early as possible provides more opportunity for + // creating use projections into value. + SILInstruction *firstOpeningInst = nullptr; + allocTy.getASTType().visit([&](CanType type) { + auto archetype = dyn_cast(type); + if (!archetype) + return; - storage.storageAddress = allocInstr; + if (auto openedTy = getOpenedArchetypeOf(archetype)) { + auto openingVal = + pass.getModule()->getRootOpenedArchetypeDef(openedTy, pass.function); - // Insert stack deallocations. - for (TermInst *termInst : pass.returnInsts) { - SILBuilder deallocBuilder(termInst); - deallocBuilder.setSILConventions( - SILModuleConventions::getLoweredAddressConventions(pass.F->getModule())); - deallocBuilder.createDeallocStack(allocInstr->getLoc(), allocInstr); + auto *openingInst = openingVal->getDefiningInstruction(); + assert(openingVal && "all opened archetypes should be resolved"); + if (firstOpeningInst + && pass.domInfo->dominates(firstOpeningInst, openingInst)) { + return; + } + firstOpeningInst = openingInst; + } + }); + auto allocPt = firstOpeningInst ? std::next(firstOpeningInst->getIterator()) + : pass.function->begin()->begin(); + auto allocBuilder = pass.getBuilder(allocPt); + AllocStackInst *alloc = allocBuilder.createAllocStack(pass.genLoc(), allocTy); + + auto dealloc = [&](SILBasicBlock::iterator insertPt) { + auto deallocBuilder = pass.getBuilder(insertPt); + deallocBuilder.createDeallocStack(pass.genLoc(), alloc); + }; + if (firstOpeningInst) { + // Deallocate at the dominance frontier to ensure that allocation encloses + // not only the uses of the current value, but also of any values reusing + // this storage as a use projection. + SmallVector frontier; + computeDominanceFrontier(alloc->getParent(), pass.domInfo, frontier); + for (SILBasicBlock *deallocBlock : frontier) { + dealloc(deallocBlock->getTerminator()->getIterator()); + } + } else { + for (SILInstruction *deallocPoint : pass.exitingInsts) { + dealloc(deallocPoint->getIterator()); + } } + return alloc; } //===----------------------------------------------------------------------===// -// AddressMaterialization - materialize storage addresses, generate projections. +// AddressMaterialization +// +// Materialize storage addresses, generate projections. //===----------------------------------------------------------------------===// namespace { /// Materialize the address of a value's storage. For values that are directly -/// mapped to a storage location, simply return the mapped `AllocStackInst`. -/// For subobjects emit any necessary `_addr` projections using the provided +/// mapped to a storage location, return the mapped `AllocStackInst`. For +/// subobjects emit any necessary `_addr` projections using the provided /// `SILBuilder`. /// -/// This is a common utility for ApplyRewriter, AddressOnlyDefRewriter, -/// and AddressOnlyUseRewriter. +/// This is a common utility for PhiRewriter, CallArgRewriter, ApplyRewriter, +/// ReturnRewriter, UseRewriter, and DefRewriter. class AddressMaterialization { AddressLoweringState &pass; SILBuilder &B; @@ -596,78 +1190,233 @@ class AddressMaterialization { AddressMaterialization(AddressLoweringState &pass, SILBuilder &B) : pass(pass), B(B) {} - SILValue initializeOperandMem(Operand *operand); + /// Return the address of the storage for `origValue`. This may involve + /// materializing projections. Record the materialized address as storage for + /// origValue. Called once at the definition of \p origValue. + SILValue materializeAddress(SILValue origValue) { + ValueStorage &storage = pass.valueStorageMap.getStorage(origValue); + if (storage.storageAddress) + return storage.storageAddress; + + if (storage.isUseProjection) { + materializeUseProjectionStorage(storage, /*intoPhiOperand*/ false); + } else { + assert(storage.isDefProjection); + storage.storageAddress = materializeDefProjection(origValue); + } + return storage.storageAddress; + } + + void initializeOperand(Operand *operand); - SILValue materializeAddress(SILValue origValue); + SILValue materializeUseProjectionStorage(ValueStorage &storage, + bool intoPhiOperand); + + SILValue materializeDefProjection(SILValue origValue); protected: - SILValue materializeProjection(Operand *operand); + SILValue materializeStructExtract(SILInstruction *extractInst, + SILValue elementValue, unsigned fieldIdx); + + SILValue materializeTupleExtract(SILInstruction *extractInst, + SILValue elementValue, unsigned fieldIdx); + + SILValue materializeProjectionIntoUse(Operand *operand, bool intoPhiOperand); + + SILValue materializeComposingUser(SingleValueInstruction *user, + bool intoPhiOperand) { + return materializeUseProjectionStorage( + pass.valueStorageMap.getStorage(user), intoPhiOperand); + } }; } // anonymous namespace -// Materialize an address pointing to initialized memory for this operand, -// generating a projection and copy if needed. -SILValue AddressMaterialization::initializeOperandMem(Operand *operand) { +/// Given the operand of an aggregate instruction (struct, tuple, enum), ensure +/// that the in-memory subobject is initialized. Generates an address +/// projection and copy if needed. +/// +/// If the operand projects into its use, then the memory was already +/// initialized when visiting the use. +void AddressMaterialization::initializeOperand(Operand *operand) { SILValue def = operand->get(); - SILValue destAddr; - if (operand->get()->getType().isAddressOnly(*pass.F)) { + if (def->getType().isAddressOnly(*pass.function)) { ValueStorage &storage = pass.valueStorageMap.getStorage(def); - // Source value should already be rewritten. - assert(storage.isRewritten()); - if (storage.isProjection()) - destAddr = storage.storageAddress; - else { - destAddr = materializeProjection(operand); - B.createCopyAddr(operand->getUser()->getLoc(), storage.storageAddress, - destAddr, IsTake, IsInitialization); - } - } else { - destAddr = materializeProjection(operand); - B.createStore(operand->getUser()->getLoc(), operand->get(), destAddr, - StoreOwnershipQualifier::Unqualified); + assert(storage.isRewritten && "Source value should be rewritten"); + + if (storage.isUseProjection) + return; + + auto destAddr = + materializeProjectionIntoUse(operand, /*intoPhiOperand*/ false); + B.createCopyAddr(operand->getUser()->getLoc(), storage.storageAddress, + destAddr, IsTake, IsInitialization); + return; } - return destAddr; + SILValue destAddr = materializeProjectionIntoUse(operand, + /*intoPhiOperand*/ false); + B.createTrivialStoreOr(operand->getUser()->getLoc(), operand->get(), destAddr, + StoreOwnershipQualifier::Init); } -/// Return the address of the storage for `origValue`. This may involve -/// materializing projections. -SILValue AddressMaterialization::materializeAddress(SILValue origValue) { - ValueStorage &storage = pass.valueStorageMap.getStorage(origValue); - - if (!storage.storageAddress) - storage.storageAddress = - materializeProjection(storage.getComposedOperand()); +// Recursively materialize the address for storage at the point that a use +// projects into it via either a composing-use (struct, tuple, enum) or phi +// projection. This only materializes the address that the operands project +// into. It does not materialize the storage for the result. e.g. it +// materializes init_enum_data_addr, not inject_enum_addr. +// +// If \p intoPhiOperand is true, this materializes the address in the path that +// reaches a phi operand, not the phi block itself. +// +// If \p intoPhiOperand is false, then the materialized address is guaranteed to +// domaninate the composing user. Map the user onto this address to avoid +// rematerialization. +SILValue AddressMaterialization::materializeUseProjectionStorage( + ValueStorage &storage, bool intoPhiOperand = false) { + // If this storage is already materialized, then simply return its + // address. This not only avoids redundant projections, but is necessary for + // correctness when emitting init_enum_data_addr. + if (!intoPhiOperand && storage.storageAddress) + return storage.storageAddress; + auto recordAddress = [&](SILValue address) { + if (!intoPhiOperand) + storage.storageAddress = address; + return address; + }; + if (storage.isComposingUseProjection()) { + // Handle chains of composing users. + auto &useStorage = pass.valueStorageMap.getProjectedStorage(storage); + SILValue useVal = useStorage.value; + if (auto *defInst = useVal->getDefiningInstruction()) { + Operand *useOper = + &defInst->getAllOperands()[storage.projectedOperandNum]; + return recordAddress( + materializeProjectionIntoUse(useOper, intoPhiOperand)); + } + // For indirect function results, projectedOperandNum is the index into + // the tuple of opaque results, which isn't useful here. + assert(isa(useVal) && useStorage.storage.isRewritten); + return recordAddress(useStorage.storage.storageAddress); + } + if (storage.isPhiProjection()) { + return recordAddress(materializeUseProjectionStorage( + pass.valueStorageMap.getProjectedStorage(storage).storage, + /*intoPhiOperand*/ true)); + } + assert(!storage.isProjection() + && "a composing user may not also be a def projection"); return storage.storageAddress; } -SILValue AddressMaterialization::materializeProjection(Operand *operand) { - SILInstruction *user = operand->getUser(); +/// Materialize the address of a subobject. +/// +/// \param origValue is a value associated with the subobject storage. It is +/// either a SingleValueInstruction projection or a terminator result. +SILValue AddressMaterialization::materializeDefProjection(SILValue origValue) { + switch (origValue->getKind()) { + default: + llvm_unreachable("Unexpected projection from def."); + + case ValueKind::CopyValueInst: + assert(isStoreCopy(origValue)); + return pass.getMaterializedAddress( + cast(origValue)->getOperand()); + + case ValueKind::MultipleValueInstructionResult: { + auto *result = cast(origValue); + SILInstruction *destructure = result->getParent(); + switch (destructure->getKind()) { + default: + llvm_unreachable("Unexpected projection from def."); + + case SILInstructionKind::DestructureStructInst: { + return materializeStructExtract(destructure, origValue, + result->getIndex()); + break; + } + case SILInstructionKind::DestructureTupleInst: { + return materializeTupleExtract(destructure, origValue, + result->getIndex()); + break; + } + } + } + case ValueKind::StructExtractInst: { + auto *extractInst = cast(origValue); + return materializeStructExtract(extractInst, origValue, + extractInst->getFieldIndex()); + } + case ValueKind::TupleExtractInst: { + auto *extractInst = cast(origValue); + return materializeTupleExtract(extractInst, origValue, + extractInst->getFieldIndex()); + } + case ValueKind::SILPhiArgument: { + // Handle this in the caller. unchecked_take_enum_data_addr is + // destructive. It cannot be materialized on demand. + llvm_unreachable("Unimplemented switch_enum optimization"); + } + } +} +// \p structInst is a unary instruction whose first operand is a struct. +SILValue AddressMaterialization::materializeStructExtract( + SILInstruction *extractInst, SILValue elementValue, unsigned fieldIdx) { + auto structVal = extractInst->getOperand(0); + SILValue srcAddr = pass.getMaterializedAddress(structVal); + auto *structType = structVal->getType().getStructOrBoundGenericStruct(); + auto *varDecl = structType->getStoredProperties()[fieldIdx]; + return B.createStructElementAddr(extractInst->getLoc(), srcAddr, varDecl, + elementValue->getType().getAddressType()); +} + +// \p tupleInst is a unary instruction whose first operand is a tuple. +SILValue AddressMaterialization::materializeTupleExtract( + SILInstruction *extractInst, SILValue elementValue, unsigned fieldIdx) { + SILValue srcAddr = pass.getMaterializedAddress(extractInst->getOperand(0)); + return B.createTupleElementAddr(extractInst->getLoc(), srcAddr, fieldIdx, + elementValue->getType().getAddressType()); +} + +/// Recursively materialize the address of a subobject that is a member of the +/// operand's user. The operand's user must be an aggregate struct, tuple, enum, +/// init_existential_value. +SILValue +AddressMaterialization::materializeProjectionIntoUse(Operand *operand, + bool intoPhiOperand) { + SILInstruction *user = operand->getUser(); switch (user->getKind()) { default: LLVM_DEBUG(user->dump()); - llvm_unreachable("Unexpected subobject composition."); + llvm_unreachable("Unexpected projection from use."); case SILInstructionKind::EnumInst: { auto *enumInst = cast(user); - SILValue enumAddr = materializeAddress(enumInst); + SILValue enumAddr = materializeComposingUser(enumInst, intoPhiOperand); return B.createInitEnumDataAddr(enumInst->getLoc(), enumAddr, enumInst->getElement(), operand->get()->getType().getAddressType()); } case SILInstructionKind::InitExistentialValueInst: { auto *initExistentialValue = cast(user); - SILValue containerAddr = materializeAddress(initExistentialValue); + SILValue containerAddr = + materializeComposingUser(initExistentialValue, intoPhiOperand); auto canTy = initExistentialValue->getFormalConcreteType(); auto opaque = Lowering::AbstractionPattern::getOpaque(); - auto &concreteTL = pass.F->getTypeLowering(opaque, canTy); + auto &concreteTL = pass.function->getTypeLowering(opaque, canTy); return B.createInitExistentialAddr( initExistentialValue->getLoc(), containerAddr, canTy, concreteTL.getLoweredType(), initExistentialValue->getConformances()); } - case SILInstructionKind::ReturnInst: { - assert(pass.loweredFnConv.hasIndirectSILResults()); - return pass.F->getArguments()[0]; + case SILInstructionKind::StructInst: { + auto *structInst = cast(user); + + auto fieldIter = structInst->getStructDecl()->getStoredProperties().begin(); + std::advance(fieldIter, operand->getOperandNumber()); + + SILValue structAddr = materializeComposingUser(structInst, intoPhiOperand); + return B.createStructElementAddr( + structInst->getLoc(), structAddr, *fieldIter, + operand->get()->getType().getAddressType()); } case SILInstructionKind::TupleInst: { auto *tupleInst = cast(user); @@ -678,854 +1427,1749 @@ SILValue AddressMaterialization::materializeProjection(Operand *operand) { assert(resultIdx < pass.loweredFnConv.getNumIndirectSILResults()); // Cannot call getIndirectSILResults here because that API uses the // original function type. - return pass.F->getArguments()[resultIdx]; + return pass.function->getArguments()[resultIdx]; } - // TODO: emit tuple_element_addr - llvm_unreachable("Unimplemented"); + SILValue tupleAddr = materializeComposingUser(tupleInst, intoPhiOperand); + return B.createTupleElementAddr(tupleInst->getLoc(), tupleAddr, + operand->getOperandNumber(), + operand->get()->getType().getAddressType()); } } } //===----------------------------------------------------------------------===// -// ApplyRewriter - rewrite call sites with indirect arguments. +// PhiRewriter +// +// Insert copies on CFG edges to break phi operand interferences. //===----------------------------------------------------------------------===// namespace { -/// Rewrite an Apply, lowering its indirect SIL arguments. -/// -/// Replace indirect parameter arguments of this function with address-type -/// arguments. -/// -/// Insert new indirect result arguments for this function to represent the -/// caller's storage. -class ApplyRewriter { + +// To materialize a phi operand in the corresponding phi predecessor block: +// +// 1. Materialize the phi address. If the phi projects into a use, this requires +// initialization of the user's storage in each predecessor. +// +// 2. If the phi operand is not coalesced, then copy the operand into the +// materialized phi address. +// +// For blocks with multiple phis, all copies of phi operands semantically occur +// in parallel on the CFG edge from the predecessor to the phi block. As these +// copies are inserted into the predecessor's intruction list, maintain the +// illusion of parallel copies by resolving any interference between the phi +// copies. This is done by checking for anti-dependencies to or from other phi +// copies. If one phi copy's source reads from another phi copy's dest, then the +// read must occur before the write. +// +// Insert a second copy to break an anti-dependence cycle when both the source +// and destination of the new phi interferes with other phis (the classic +// phi-swap problem). +// +// Input: +// addr0 = alloc_stack // storage for val0 +// addr1 = alloc_stack // storage for val1 +// bb1: +// br bb3(val0, val1) +// bb2: +// br bb3(val1, val0) +// bb3(phi0, phi1): +// +// Output: +// +// bb1: +// br bb3(val0, val1) +// bb2: +// temp = alloc_stack +// copy_addr addr0 to temp +// copy_addr addr1 to addr0 +// copy_addr temp to addr1 +// dealloc_stack temp +// br bb3(val1, val1) +// bb3(phi0, phi1): +class PhiRewriter { AddressLoweringState &pass; - ApplySite apply; - SILBuilder argBuilder; - /// For now, we assume that the apply site is a normal apply. - ApplyInst *getApplyInst() const { return cast(apply); } + // A set of copies from a phi operand storage to phi storage. These logically + // occur on the CFG edge. Keep track of them to resolve anti-dependencies. + SmallPtrSet phiCopies; public: - ApplyRewriter(ApplySite origCall, AddressLoweringState &pass) - : pass(pass), apply(origCall), argBuilder(origCall.getInstruction()) { - argBuilder.setSILConventions( - SILModuleConventions::getLoweredAddressConventions(origCall.getModule())); - } + PhiRewriter(AddressLoweringState &pass) : pass(pass) {} - void rewriteParameters(); - void rewriteIndirectParameter(Operand *operand); - - void convertApplyWithIndirectResults(); + void materializeOperand(PhiOperand phiOperand); protected: - void - canonicalizeResults(MutableArrayRef directResultValues, - ArrayRef nonCanonicalUses); - SILValue materializeIndirectResultAddress( - SingleValueInstruction *origDirectResultVal, - SILType argTy); + PhiRewriter(const PhiRewriter &) = delete; + PhiRewriter &operator=(const PhiRewriter &) = delete; + + CopyAddrInst *createPhiCopy(SILBuilder &builder, SILValue from, SILValue to) { + auto *copy = builder.createCopyAddr(pass.genLoc(), from, to, IsTake, + IsInitialization); + phiCopies.insert(copy); + return copy; + } + + struct CopyPosition { + SILBasicBlock::iterator latestCopyPos; + bool foundAntiDependenceCycle = false; + }; + CopyPosition findPhiCopyPosition(PhiOperand phiOper); }; -} // end anonymous namespace +} // anonymous namespace -/// Rewrite any indirect parameter in place. -void ApplyRewriter::rewriteParameters() { - // Rewrite all incoming indirect operands. - unsigned calleeArgIdx = apply.getCalleeArgIndexOfFirstAppliedArg(); - for (Operand &operand : apply.getArgumentOperands()) { - if (operand.get()->getType().isObject()) { - auto argConv = - apply.getSubstCalleeConv().getSILArgumentConvention(calleeArgIdx); - if (argConv.isIndirectConvention()) - rewriteIndirectParameter(&operand); +void PhiRewriter::materializeOperand(PhiOperand phiOper) { + auto &operStorage = + pass.valueStorageMap.getStorage(phiOper.getOperand()->get()); + if (operStorage.isPhiProjection()) { + if (operStorage.projectedStorageID + == pass.valueStorageMap.getOrdinal(phiOper.getValue())) { + // This operand was coalesced with this particular phi. No copy needed. + return; } - ++calleeArgIdx; } + auto phiOperAddress = operStorage.getMaterializedAddress(); + + auto copyPos = findPhiCopyPosition(phiOper); + + auto builder = pass.getBuilder(copyPos.latestCopyPos); + AddressMaterialization addrMat(pass, builder); + + auto &phiStorage = pass.valueStorageMap.getStorage(phiOper.getValue()); + SILValue phiAddress = + addrMat.materializeUseProjectionStorage(phiStorage, + /*intoPhiOperand*/ true); + + if (!copyPos.foundAntiDependenceCycle) { + createPhiCopy(builder, phiOperAddress, phiAddress); + return; + } + AllocStackInst *alloc = + builder.createAllocStack(pass.genLoc(), phiOper.getValue()->getType()); + createPhiCopy(builder, phiOperAddress, alloc); + + auto tempBuilder = pass.getBuilder(phiOper.getBranch()->getIterator()); + createPhiCopy(tempBuilder, alloc, phiAddress); + tempBuilder.createDeallocStack(pass.genLoc(), alloc); } -/// Deallocate temporary call-site stack storage. -/// -/// `argLoad` is non-null for @out args that are loaded. -static void insertStackDeallocationAtCall(AllocStackInst *allocInst, - SILInstruction *applyInst, - SILInstruction *argLoad) { - SILInstruction *lastUse = argLoad ? argLoad : applyInst; - - switch (applyInst->getKind()) { - case SILInstructionKind::ApplyInst: { - SILBuilder deallocBuilder(&*std::next(lastUse->getIterator())); - deallocBuilder.setSILConventions( - SILModuleConventions::getLoweredAddressConventions(applyInst->getModule())); - deallocBuilder.createDeallocStack(allocInst->getLoc(), allocInst); - break; +PhiRewriter &AddressLoweringState::getPhiRewriter() { + if (!this->phiRewriter) { + this->phiRewriter = std::make_unique(*this); } - case SILInstructionKind::TryApplyInst: - // TODO!!!: insert dealloc in the catch block. - llvm_unreachable("not implemented for this instruction!"); - case SILInstructionKind::PartialApplyInst: - llvm_unreachable("partial apply cannot have indirect results."); - default: - llvm_unreachable("not implemented for this instruction!"); + return *(this->phiRewriter.get()); +} + +// Return the latest position at which a copy into this phi may be emitted +// without violating an anti-dependence on another phi copy. +PhiRewriter::CopyPosition PhiRewriter::findPhiCopyPosition(PhiOperand phiOper) { + auto phiBaseAddress = + pass.valueStorageMap.getBaseStorage(phiOper.getValue()).storageAddress; + + auto operBaseAddress = + pass.valueStorageMap.getBaseStorage(phiOper.getOperand()->get()) + .storageAddress; + + auto insertPt = phiOper.getBranch()->getIterator(); + bool foundEarliestInsertPoint = false; + + CopyPosition copyPos; + copyPos.latestCopyPos = insertPt; + + // Continue scanning until all phi copies have been checked for interference. + for (auto beginIter = phiOper.predBlock->begin(); insertPt != beginIter;) { + --insertPt; + + auto *phiCopy = dyn_cast(&*insertPt); + if (!phiCopy || !phiCopies.contains(phiCopy)) + break; + + if (!foundEarliestInsertPoint + && getAccessBase(phiCopy->getSrc()) == phiBaseAddress) { + // Anti-dependence from the phi copy to the phi value. Do not copy into + // the phi storage before this point. + foundEarliestInsertPoint = true; + } + if (getAccessBase(phiCopy->getDest()) == operBaseAddress) { + // Anti-dependence from the phi operand to the phi copy. Do not copy out + // of the operand storage after this point. + copyPos.latestCopyPos = insertPt; + // If the earliest and latest points conflict, allocate a temporary. + if (foundEarliestInsertPoint) { + copyPos.foundAntiDependenceCycle = true; + } + } } + return copyPos; } -/// Rewrite a formally indirect parameter in place. +//===----------------------------------------------------------------------===// +// CallArgRewriter +// +// Rewrite call arguments for indirect parameters. +//===----------------------------------------------------------------------===// + +namespace { +/// This rewrites one parameter at a time, replacing the incoming +/// object arguments with address-type arguments. +class CallArgRewriter { + AddressLoweringState &pass; + FullApplySite apply; + SILLocation callLoc; + SILBuilder argBuilder; + AddressMaterialization addrMat; + +public: + CallArgRewriter(FullApplySite apply, AddressLoweringState &pass) + : pass(pass), apply(apply), callLoc(apply.getLoc()), + argBuilder(pass.getBuilder(apply.getInstruction()->getIterator())), + addrMat(pass, argBuilder) {} + + bool rewriteArguments(); + + void rewriteIndirectArgument(Operand *operand); +}; +} // end anonymous namespace + +/// Rewrite all incoming indirect arguments in place without modifying the call. +bool CallArgRewriter::rewriteArguments() { + bool changed = false; + + auto origConv = apply.getSubstCalleeConv(); + assert(apply.getNumArguments() == origConv.getNumParameters() + && "results should not yet be rewritten"); + + for (unsigned argIdx = apply.getCalleeArgIndexOfFirstAppliedArg(), + endArgIdx = argIdx + apply.getNumArguments(); + argIdx < endArgIdx; ++argIdx) { + + Operand &operand = apply.getArgumentRef(argIdx); + // Ignore arguments that have already been rewritten with an address. + if (operand.get()->getType().isAddress()) + continue; + + auto argConv = apply.getSubstCalleeConv().getSILArgumentConvention(argIdx); + if (argConv.isIndirectConvention()) { + rewriteIndirectArgument(&operand); + changed |= true; + } + } + return changed; +} + +/// Rewrite a formally indirect argument in place. /// Update the operand to the incoming value's storage address. /// After this, the SIL argument types no longer match SIL function conventions. /// /// Temporary argument storage may be created for loadable values. -/// -/// Note: Temporary argument storage does not own its value. If the argument -/// is owned, the stored value should already have been copied. -void ApplyRewriter::rewriteIndirectParameter(Operand *operand) { +void CallArgRewriter::rewriteIndirectArgument(Operand *operand) { SILValue argValue = operand->get(); - if (argValue->getType().isAddressOnly(*pass.F)) { + if (argValue->getType().isAddressOnly(*pass.function)) { ValueStorage &storage = pass.valueStorageMap.getStorage(argValue); - // Source value should already be rewritten. - assert(storage.isRewritten()); + assert(storage.isRewritten && "arg source should be rewritten"); operand->set(storage.storageAddress); return; } // Allocate temporary storage for a loadable operand. - AllocStackInst *allocInstr = - argBuilder.createAllocStack(apply.getLoc(), argValue->getType()); - - argBuilder.createStore(apply.getLoc(), argValue, allocInstr, - StoreOwnershipQualifier::Unqualified); - - operand->set(allocInstr); - - insertStackDeallocationAtCall(allocInstr, apply.getInstruction(), - /*argLoad=*/nullptr); -} - -// Canonicalize call result uses. Treat each result of a multi-result call as -// an independent value. Currently, SILGen may generate tuple_extract for each -// result but generate a single destroy_value for the entire tuple of -// results. This makes it impossible to reason about each call result as an -// independent value according to the callee's function type. -// -// directResultValues has an entry for each tuple extract corresponding to -// that result if one exists. This function will add an entry to -// directResultValues whenever it needs to materialize a TupleExtractInst. -void ApplyRewriter::canonicalizeResults( - MutableArrayRef directResultValues, - ArrayRef nonCanonicalUses) { - - auto *applyInst = getApplyInst(); - - for (Operand *operand : nonCanonicalUses) { - auto *destroyInst = dyn_cast(operand->getUser()); - if (!destroyInst) - llvm::report_fatal_error("Simultaneous use of multiple call results."); - - for (unsigned resultIdx : indices(directResultValues)) { - SingleValueInstruction *result = directResultValues[resultIdx]; - if (!result) { - SILBuilder resultBuilder(std::next(SILBasicBlock::iterator(applyInst))); - resultBuilder.setSILConventions( - SILModuleConventions::getLoweredAddressConventions(applyInst->getModule())); - result = resultBuilder.createTupleExtract(applyInst->getLoc(), - applyInst, resultIdx); - directResultValues[resultIdx] = result; + AllocStackInst *allocInst = + argBuilder.createAllocStack(callLoc, argValue->getType()); + + operand->set(allocInst); + + if (apply.getArgumentConvention(*operand).isOwnedConvention()) { + argBuilder.createTrivialStoreOr(apply.getLoc(), argValue, allocInst, + StoreOwnershipQualifier::Init); + cleanupAfterCall(apply, [&](SILBasicBlock::iterator insertPt) { + auto deallocBuilder = pass.getBuilder(insertPt); + deallocBuilder.createDeallocStack(callLoc, allocInst); + }); + } else { + auto borrow = argBuilder.emitBeginBorrowOperation(callLoc, argValue); + auto *storeInst = + argBuilder.emitStoreBorrowOperation(callLoc, borrow, allocInst); + + cleanupAfterCall(apply, [&](SILBasicBlock::iterator insertPt) { + auto cleanupBuilder = pass.getBuilder(insertPt); + if (auto *storeBorrow = dyn_cast(storeInst)) { + cleanupBuilder.emitEndBorrowOperation(callLoc, storeBorrow); } - SILBuilder B(destroyInst); - B.setSILConventions(SILModuleConventions::getLoweredAddressConventions(applyInst->getModule())); - auto &TL = pass.F->getTypeLowering(result->getType()); - TL.emitDestroyValue(B, destroyInst->getLoc(), result); + cleanupBuilder.emitEndBorrowOperation(callLoc, borrow); + cleanupBuilder.createDeallocStack(callLoc, allocInst); + }); + } +} + +//===----------------------------------------------------------------------===// +// ApplyRewriter +// +// Rewrite call sites with indirect results. +// ===---------------------------------------------------------------------===// + +namespace { +/// Once any result needs to be rewritten, then the entire apply is +/// replaced. Creates new indirect result arguments for this function to +/// represent the caller's storage. +/// +/// TODO: Multi-Result - this is complicated because calls are not properly +/// represented as multi-value instructions. +class ApplyRewriter { + AddressLoweringState &pass; + + // This apply site mutates when the new apply instruction is generated. + FullApplySite apply; + SILLocation callLoc; + + // For building incoming arguments and materializing addresses. + SILBuilder argBuilder; + + // For loading results. + SILBuilder resultBuilder; + + AddressMaterialization addrMat; + SILFunctionConventions opaqueCalleeConv; + SILFunctionConventions loweredCalleeConv; + +public: + ApplyRewriter(FullApplySite oldCall, AddressLoweringState &pass) + : pass(pass), apply(oldCall), callLoc(oldCall.getLoc()), + argBuilder(pass.getBuilder(oldCall.getInstruction()->getIterator())), + resultBuilder(pass.getBuilder(getCallResultInsertionPoint())), + addrMat(pass, argBuilder), + opaqueCalleeConv(oldCall.getSubstCalleeConv()), + loweredCalleeConv(getLoweredCallConv(oldCall)) {} + + void convertApplyWithIndirectResults(); + +protected: + SILBasicBlock::iterator getCallResultInsertionPoint() { + if (isa(apply)) + return std::next(SILBasicBlock::iterator(apply.getInstruction())); + + auto *bb = cast(apply)->getNormalBB(); + return bb->begin(); + } + + void makeIndirectArgs(MutableArrayRef newCallArgs); + + SILBasicBlock::iterator getResultInsertionPoint(); + + SILValue materializeIndirectResultAddress(SILValue oldResult, SILType argTy); + + void rewriteApply(ArrayRef newCallArgs); + + void rewriteTryApply(ArrayRef newCallArgs); + + void replaceDirectResults(DestructureTupleInst *oldDestructure); +}; +} // end anonymous namespace + +/// Top-level entry: Allocate storage for formally indirect results at a call +/// site. Create a new apply instruction with indirect SIL arguments. The +/// original apply instruction remains in place, unless it is a try_apply. +/// +/// Input (T = address-only, L=Loadable): +/// +/// %addr = alloc_stack $T // storage for %oldResult +/// ... +/// %oldResult = apply : $() -> @out T +/// +/// Output: +/// +/// %addr = alloc_stack $T // storage for %oldResult +/// ... +/// %newCall = apply(%addr) : $() -> @out T // no uses +/// %oldResult = apply() : $() -> @out T // original apply +/// +/// Input: +/// +/// %result = apply : $() -> @out L +/// +/// Output: +/// +/// %addr = alloc_stack $L // unmapped temp storage +/// %newCall = apply(%addr) : $() -> @out L // no uses +/// %oldCall = apply() : $() -> @out L // original apply, no uses +/// %result = load %addr : $*L +/// dealloc_stack %addr +/// +/// Input: +/// +/// %addr0 = alloc_stack $T // storage for %result0 +/// ... +/// %tuple = apply : $() -> (@out T, @out L, L) +/// (%r0, %r1, %r2) = destructure_tuple %tuple : $(T, T, T) +/// +/// Output: +/// +/// %addr0 = alloc_stack $T // storage for %r0 +/// ... +/// %addr1 = alloc_stack // unmapped temp storage +/// %r2 = apply(%addr0, %addr1) : $() -> (@out T, @out L, L) +/// %oldCall = apply() : $() -> (@out T, @out L, L) +/// %r1 = load %addr1 : $*L +/// (%r0, %d1, %d2) = destructure_tuple %tuple : $(T, T, T) +/// // no uses of %d1, %d2 +/// +void ApplyRewriter::convertApplyWithIndirectResults() { + // Gather information from the old apply before rewriting it and mutating + // this->apply. + + // Avoid revisiting this apply. + bool erased = pass.indirectApplies.erase(apply); + assert(erased && "all results should be rewritten at the same time"); + (void)erased; + + // List of new call arguments. + SmallVector newCallArgs(loweredCalleeConv.getNumSILArguments()); + + // Materialize and map the address of each opaque indirect result, possibly + // creating alloc_stacks. + // + // Create a load for each loadable indirect result. + // + // Populate newCallArgs. + makeIndirectArgs(newCallArgs); + + // Record the original results before potentially removing the apply + // (try_apply is removed during rewriting). + auto *destructure = getCallMultiResult(apply.getPseudoResult()); + + switch (apply.getKind()) { + case FullApplySiteKind::ApplyInst: { + // this->apply will be updated with the new apply instruction. + rewriteApply(newCallArgs); + break; + } + case FullApplySiteKind::TryApplyInst: { + // this->apply will be updated with the new try_apply instruction. + rewriteTryApply(newCallArgs); + break; + } + case FullApplySiteKind::BeginApplyInst: + // BeginApply does not need to be rewritten. It's argument list is not + // polluted with indirect results. + break; + }; + + // Replace all results of the original call that remain direct. ApplyRewriter + // is only used when at least one result is indirect. So any direct results + // require a destructure. + if (destructure) { + replaceDirectResults(destructure); + } +} + +// Populate \p newCallArgs with the new call instruction's SIL argument list. +// Materialize temporary storage for loadable indirect results. +// +// Input (T = address-only, L=Loadable): +// +// %addr = alloc_stack $T // storage for %oldResult +// ... +// %oldResult = apply : $() -> @out T +// +// Output (newCallArgs = [%addr]): +// +// Input: +// +// %result = apply : $() -> @out L +// +// Output (newCallArgs = [%addr]): +// +// %addr = alloc_stack $L // unmapped temp storage +// %oldCall = apply() : $() -> @out L // no uses +// %result = load %addr : $*L +// dealloc_stack %addr +// +// Input: +// +// %addr0 = alloc_stack $T // storage for %r0 +// ... +// %tuple = apply : $() -> (@out T, @out L, L) +// (%r0, %r1, %r2) = destructure_tuple %tuple : $(T, L, L) +// +// Output (newCallArgs = [%addr0, %addr1]): +// +// %addr0 = alloc_stack $T // storage for %r0 +// ... +// %addr1 = alloc_stack // unmapped temp storage +// %tuple = apply() : $() -> (@out T, @out L, L) +// %r1 = load %addr1 : $*L +// dealloc_stack %addr1 +// (%r0, %d1, %r2) = destructure_tuple %tuple : $(T, L, L) +// // no uses of %d1 +// +void ApplyRewriter::makeIndirectArgs(MutableArrayRef newCallArgs) { + + auto typeCtx = pass.function->getTypeExpansionContext(); + + // The index of the next indirect result argument. + unsigned newResultArgIdx = + loweredCalleeConv.getSILArgIndexOfFirstIndirectResult(); + + auto visitCallResult = [&](SILValue result, SILResultInfo resultInfo) { + assert(!opaqueCalleeConv.isSILIndirect(resultInfo) + && "canonical call results are always direct"); + + if (loweredCalleeConv.isSILIndirect(resultInfo)) { + SILValue indirectResultAddr = materializeIndirectResultAddress( + result, loweredCalleeConv.getSILType(resultInfo, typeCtx)); + // Record the new indirect call argument. + newCallArgs[newResultArgIdx++] = indirectResultAddr; } - destroyInst->eraseFromParent(); + return true; + }; + visitCallResults(apply, visitCallResult); + + // Append the existing call arguments to the SIL argument list. They were + // already lowered to addresses by CallArgRewriter. + assert(newResultArgIdx == loweredCalleeConv.getSILArgIndexOfFirstParam()); + unsigned origArgIdx = apply.getSubstCalleeConv().getSILArgIndexOfFirstParam(); + for (unsigned endIdx = newCallArgs.size(); newResultArgIdx < endIdx; + ++newResultArgIdx, ++origArgIdx) { + newCallArgs[newResultArgIdx] = apply.getArgument(origArgIdx); + } +} + +SILBasicBlock::iterator ApplyRewriter::getResultInsertionPoint() { + switch (apply.getKind()) { + case FullApplySiteKind::ApplyInst: { + return std::next(apply.getInstruction()->getIterator()); + } + case FullApplySiteKind::TryApplyInst: { + auto *tryApply = cast(apply.getInstruction()); + return tryApply->getNormalBB()->begin(); + } + case FullApplySiteKind::BeginApplyInst: { + llvm_unreachable("coroutines don't have indirect results"); + } } } /// Return the storage address for the indirect result corresponding to the -/// given original result value. Allocate temporary argument storage for any -/// indirect results that are unmapped because they are loadable or unused. +/// \p oldResult. Allocate temporary argument storage for an +/// indirect result that isn't mapped to storage because it is either loadable +/// or unused. /// -/// origDirectResultVal may be nullptr for unused results. -SILValue ApplyRewriter::materializeIndirectResultAddress( - SingleValueInstruction *origDirectResultVal, SILType argTy) { - - if (origDirectResultVal - && origDirectResultVal->getType().isAddressOnly(*pass.F)) { - auto &storage = pass.valueStorageMap.getStorage(origDirectResultVal); +/// \p oldResult is invalid for an unused result. +SILValue ApplyRewriter::materializeIndirectResultAddress(SILValue oldResult, + SILType argTy) { + if (oldResult && oldResult->getType().isAddressOnly(*pass.function)) { + // Results that project into their uses have not yet been materialized. + addrMat.materializeAddress(oldResult); + + auto &storage = pass.valueStorageMap.getStorage(oldResult); storage.markRewritten(); - // Pass the local storage address as the indirect result address. return storage.storageAddress; } // Allocate temporary call-site storage for an unused or loadable result. - SILInstruction *origCallInst = apply.getInstruction(); - SILLocation loc = origCallInst->getLoc(); - auto *allocInst = argBuilder.createAllocStack(loc, argTy); - LoadInst *loadInst = nullptr; - if (origDirectResultVal) { - // TODO: Find the try_apply's result block. - // Build results outside-in to next stack allocations. - SILBuilder resultBuilder(std::next(SILBasicBlock::iterator(origCallInst))); - resultBuilder.setSILConventions( - SILModuleConventions::getLoweredAddressConventions(origCallInst->getModule())); + auto *allocInst = argBuilder.createAllocStack(callLoc, argTy); + + // Instead of using resultBuilder, insert dealloc immediately after the call + // for stack discpline across loadable indirect results. + cleanupAfterCall(apply, [&](SILBasicBlock::iterator insertPt) { + auto cleanupBuilder = pass.getBuilder(insertPt); + cleanupBuilder.createDeallocStack(callLoc, allocInst); + }); + + if (oldResult && !oldResult->use_empty()) { + // Insert reloads immediately after the call. Get the reaload insertion + // point after emitting dealloc to ensure the reload happens first. + auto reloadBuilder = pass.getBuilder(getResultInsertionPoint()); + // This is a formally indirect argument, but is loadable. - loadInst = resultBuilder.createLoad(loc, allocInst, - LoadOwnershipQualifier::Unqualified); - origDirectResultVal->replaceAllUsesWith(loadInst); - pass.markDead(origDirectResultVal); + auto *loadInst = reloadBuilder.createTrivialLoadOr( + callLoc, allocInst, LoadOwnershipQualifier::Take); + oldResult->replaceAllUsesWith(loadInst); } - insertStackDeallocationAtCall(allocInst, origCallInst, loadInst); return SILValue(allocInst); } -/// Allocate storage for formally indirect results at the given call site. -/// Create a new call instruction with indirect SIL arguments. -void ApplyRewriter::convertApplyWithIndirectResults() { - assert(apply.getSubstCalleeType()->hasIndirectFormalResults()); - - auto *origCallInst = getApplyInst(); - SILFunctionConventions origFnConv = apply.getSubstCalleeConv(); - - // Gather the original direct return values. - // Canonicalize results so no user uses more than one result. - SmallVector origDirectResultValues( - origFnConv.getNumDirectSILResults()); - SmallVector nonCanonicalUses; - if (origCallInst->getType().is()) { - for (Operand *operand : origCallInst->getUses()) { - if (auto *extract = dyn_cast(operand->getUser())) - origDirectResultValues[extract->getFieldIndex()] = extract; - else - nonCanonicalUses.push_back(operand); - } - if (!nonCanonicalUses.empty()) - canonicalizeResults(origDirectResultValues, nonCanonicalUses); - } else { - // This call has a single, indirect result (convertApplyWithIndirectResults - // only handles call with at least one indirect result). - // An unused result can remain unmapped. Temporary storage will be allocated - // later when fixing up the call's uses. - assert(origDirectResultValues.size() == 1); - if (!origCallInst->use_empty()) { - assert(pass.valueStorageMap.contains(origCallInst)); - origDirectResultValues[0] = origCallInst; - } +void ApplyRewriter::rewriteApply(ArrayRef newCallArgs) { + auto *oldCall = cast(apply.getInstruction()); + + auto *newCall = argBuilder.createApply( + callLoc, apply.getCallee(), apply.getSubstitutionMap(), newCallArgs, + oldCall->getApplyOptions(), oldCall->getSpecializationInfo()); + + this->apply = FullApplySite(newCall); + + // No need to delete this apply. It either has a single address-only result + // and will be deleted at the end of the pass. Or it has multiple results and + // will be deleted with its destructure_tuple. +} + +// Replace \p tryApply with a new try_apply using \p newCallArgs. +// +// If the old result was a single address-only value, then create and return a +// fake load that takes its place in the storage map. Otherwise, return an +// invalid SILValue. +// +// Update this->apply with the new call instruction. +// +// Input (T = address-only, L=Loadable): +// +// %addr = alloc_stack $T // storage for %oldResult +// ... +// try_apply : $() -> @out T +// bbNormal(%oldResult : $T): +// +// Output (return %oldResult - ApplyRewriter final)): +// +// %addr = alloc_stack $T // storage for %oldResult +// ... +// try_apply(%addr) : $() -> @out T +// bbNormal(%newResult : $()): +// %oldResult = load undef +// +// Input: +// +// %addr = alloc_stack $L // unmapped temp storage +// try_apply() : $() -> @out L +// bbNormal(%oldResult : $L): // no uses +// %result = load %addr : $*L +// dealloc_stack %addr +// +// Output (return invalid - ApplyRewriter final): +// +// %addr = alloc_stack $L // unmapped temp storage +// try_apply(%addr) : $() -> @out L +// bbNormal(%oldResult : $()): // no uses +// %result = load %addr : $*L +// dealloc_stack %addr +// +// Input: +// +// %addr0 = alloc_stack $T // storage for %result0 +// ... +// %addr1 = alloc_stack // unmapped temp storage +// try_apply() : $() -> (@out T, @out L, L) +// bbNormal(%tuple : $(T, L, L)): +// %r1 = load %addr1 : $*L +// dealloc_stack %addr1 +// (%r0, %d1, %r2) = destructure_tuple %tuple : $(T, T, T) +// // no uses of %d1 +// +// Output (return invalid): +// +// %addr0 = alloc_stack $T // storage for %result0 +// ... +// %addr1 = alloc_stack // unmapped temp storage +// try_apply(%addr0, %addr1) : $() -> (@out T, @out L, L) +// bbNormal(%newResult : $L): // no uses yet +// %r1 = load %addr1 : $*L +// dealloc_stack %addr1 +// (%r0, %d1, %r2) = destructure_tuple undef : $(T, T, T) +// // no uses of %d1 +// +void ApplyRewriter::rewriteTryApply(ArrayRef newCallArgs) { + auto typeCtx = pass.function->getTypeExpansionContext(); + auto *tryApply = cast(apply.getInstruction()); + + auto *newCallInst = argBuilder.createTryApply( + callLoc, apply.getCallee(), apply.getSubstitutionMap(), newCallArgs, + tryApply->getNormalBB(), tryApply->getErrorBB(), + tryApply->getApplyOptions(), tryApply->getSpecializationInfo()); + + auto *resultArg = cast(apply.getPseudoResult()); + + auto replaceTermResult = [&](SILValue newResultVal) { + SILType resultTy = loweredCalleeConv.getSILResultType(typeCtx); + auto ownership = resultTy.isTrivial(*pass.function) + ? OwnershipKind::None + : OwnershipKind::Owned; + + resultArg->replaceAllUsesWith(newResultVal); + assert(resultArg->getIndex() == 0); + resultArg->getParent()->replacePhiArgument(0, resultTy, ownership, + resultArg->getDecl()); + }; + // Immediately delete the old try_apply (old applies hang around until + // dead code removal because they directly define values). + pass.deleter.forceDelete(tryApply); + this->apply = FullApplySite(newCallInst); + + // Handle a single opaque result value. + if (pass.valueStorageMap.contains(resultArg)) { + assert(!resultArg->getType().is()); + + // Storage was materialized by materializeIndirectResultAddress. + auto &origStorage = pass.valueStorageMap.getStorage(resultArg); + assert(origStorage.isRewritten); + (void)origStorage; + + // Rewriting try_apply with a new function type requires erasing the opaque + // block argument. Create a dummy load-copy until all uses have been + // rewritten. + LoadInst *loadArg = resultBuilder.createLoad( + callLoc, origStorage.storageAddress, LoadOwnershipQualifier::Copy); + + pass.valueStorageMap.replaceValue(resultArg, loadArg); + replaceTermResult(loadArg); + return; } + // Loadable results were loaded by materializeIndirectResultAddress. + // Temporarily redirect all uses to Undef. They will be fixed in + // replaceDirectResults(). + replaceTermResult( + SILUndef::get(resultArg->getType().getAddressType(), *pass.function)); +} - // Prepare to emit a new call instruction. - SILLocation loc = origCallInst->getLoc(); - SILBuilder callBuilder(origCallInst); - callBuilder.setSILConventions( - SILModuleConventions::getLoweredAddressConventions(origCallInst->getModule())); - - // The new call instruction's SIL calling convention. - SILFunctionConventions loweredCalleeConv( - apply.getSubstCalleeType(), - SILModuleConventions::getLoweredAddressConventions(origCallInst->getModule())); - - // The new call instruction's SIL argument list. - SmallVector newCallArgs(loweredCalleeConv.getNumSILArguments()); - - // Map the original result indices to new result indices. - SmallVector newDirectResultIndices( - origFnConv.getNumDirectSILResults()); - // Indices used to populate newDirectResultIndices. - unsigned oldDirectResultIdx = 0, newDirectResultIdx = 0; - - // The index of the next indirect result argument. - unsigned newResultArgIdx = - loweredCalleeConv.getSILArgIndexOfFirstIndirectResult(); - - // Visit each result. Redirect results that are now indirect by calling - // materializeIndirectResultAddress. Result that remain direct will be - // redirected later. Populate newCallArgs and newDirectResultIndices. - for_each( - apply.getSubstCalleeType()->getResults(), - origDirectResultValues, - [&](SILResultInfo resultInfo, SingleValueInstruction *origDirectResultVal) { - // Assume that all original results are direct in SIL. - assert(!origFnConv.isSILIndirect(resultInfo)); - - if (loweredCalleeConv.isSILIndirect(resultInfo)) { - SILValue indirectResultAddr = materializeIndirectResultAddress( - origDirectResultVal, - loweredCalleeConv.getSILType( - resultInfo, callBuilder.getTypeExpansionContext())); - // Record the new indirect call argument. - newCallArgs[newResultArgIdx++] = indirectResultAddr; - // Leave a placeholder for indirect results. - newDirectResultIndices[oldDirectResultIdx++] = ~0; - } else { - // Record the new direct result, and advance the direct result indices. - newDirectResultIndices[oldDirectResultIdx++] = newDirectResultIdx++; - } - // replaceAllUses will be called later to handle direct results that - // remain direct results of the new call instruction. - }); +// Replace all formally direct results by rewriting the destructure_tuple. +// +// Input: +// +// %addr0 = alloc_stack $T // storage for %r0 +// ... +// %addr1 = alloc_stack // unmapped temp storage +// %newPseudoResult = apply(%addr0, %addr1) : $() -> (@out T, @out L, L) +// %tuple = apply() : $() -> (@out T, @out L, L) +// %r1 = load %addr1 : $*L +// dealloc_stack %addr1 +// (%r0, %d1, %r2) = destructure_tuple %tuple : $(T, T, T) +// // no uses of %d1 +// +// Output: +// +// %addr0 = alloc_stack $T // storage for %r0 +// ... +// %addr1 = alloc_stack // unmapped temp storage +// %r2 = apply(%addr0, %addr1) : $() -> (@out T, @out L, L) +// %tuple = apply() : $() -> (@out T, @out L, L) +// %r1 = load %addr1 : $*L +// dealloc_stack %addr1 +// (%r0, %d1, %d2) = destructure_tuple %tuple : $(T, T, T) +// // no uses of %d1, %d2 +// +void ApplyRewriter::replaceDirectResults(DestructureTupleInst *oldDestructure) { + SILValue newPseudoResult = apply.getPseudoResult(); - // Append the existing call arguments to the SIL argument list. They were - // already lowered to addresses by rewriteIncomingArgument. - assert(newResultArgIdx == loweredCalleeConv.getSILArgIndexOfFirstParam()); - unsigned origArgIdx = apply.getSubstCalleeConv().getSILArgIndexOfFirstParam(); - for (unsigned endIdx = newCallArgs.size(); newResultArgIdx < endIdx; - ++newResultArgIdx, ++origArgIdx) { - newCallArgs[newResultArgIdx] = apply.getArgument(origArgIdx); + DestructureTupleInst *newDestructure = nullptr; + if (loweredCalleeConv.getNumDirectSILResults() > 1) { + newDestructure = + resultBuilder.createDestructureTuple(callLoc, newPseudoResult); } + unsigned newDirectResultIdx = 0; + + auto visitOldCallResult = [&](SILValue result, SILResultInfo resultInfo) { + assert(!opaqueCalleeConv.isSILIndirect(resultInfo) + && "canonical call results are always direct"); - // Create a new apply with indirect result operands. - ApplyInst *newCallInst; - switch (origCallInst->getKind()) { - case SILInstructionKind::ApplyInst: - newCallInst = callBuilder.createApply( - loc, apply.getCallee(), apply.getSubstitutionMap(), newCallArgs, - cast(origCallInst)->getApplyOptions()); - break; - case SILInstructionKind::TryApplyInst: - // TODO: insert dealloc in the catch block. - llvm_unreachable("not implemented for this instruction!"); - case SILInstructionKind::PartialApplyInst: - // Partial apply does not have formally indirect results. - default: - llvm_unreachable("not implemented for this instruction!"); - } - - // Replace all unmapped uses of the original call with uses of the new call. - // - // TODO: handle bbargs from try_apply. - SILBuilder resultBuilder( - std::next(SILBasicBlock::iterator(origCallInst))); - resultBuilder.setSILConventions( - SILModuleConventions::getLoweredAddressConventions(apply.getModule())); - - SmallVector origUses(origCallInst->getUses()); - for (Operand *operand : origUses) { - auto *extractInst = dyn_cast(operand->getUser()); - if (!extractInst) { - assert(origFnConv.getNumDirectSILResults() == 1); - assert(pass.valueStorageMap.contains(origCallInst)); - continue; - } - unsigned origResultIdx = extractInst->getFieldIndex(); - auto resultInfo = origFnConv.getResults()[origResultIdx]; - - if (extractInst->getType().isAddressOnly(*pass.F)) { - // Uses of indirect results will be rewritten by AddressOnlyUseRewriter. - assert(loweredCalleeConv.isSILIndirect(resultInfo)); - assert(pass.valueStorageMap.contains(extractInst)); - if (extractInst->use_empty()) - pass.markDead(extractInst); - continue; - } if (loweredCalleeConv.isSILIndirect(resultInfo)) { + if (result->getType().isAddressOnly(*pass.function)) { + // Mark the extract as rewritten now so we don't attempt to convert the + // call again. + pass.valueStorageMap.getStorage(result).markRewritten(); + return true; + } // This loadable indirect use should already be redirected to a load from // the argument storage and marked dead. - assert(extractInst->use_empty()); - continue; - } - // Either the new call instruction has only a single direct result, or we - // map the original tuple field to the new tuple field. - SILValue newValue = newCallInst; - if (loweredCalleeConv.getNumDirectSILResults() > 1) { - assert(newValue->getType().is()); - newValue = resultBuilder.createTupleExtract( - extractInst->getLoc(), newValue, - newDirectResultIndices[origResultIdx]); + assert(result->use_empty()); + return true; } - extractInst->replaceAllUsesWith(newValue); - extractInst->eraseFromParent(); + auto newResult = newDestructure + ? newDestructure->getResult(newDirectResultIdx) + : newPseudoResult; + ++newDirectResultIdx; + result->replaceAllUsesWith(newResult); + return true; + }; + visitCallMultiResults(oldDestructure, opaqueCalleeConv, visitOldCallResult); + assert(newDirectResultIdx == loweredCalleeConv.getNumDirectSILResults()); + + // If the oldDestructure produces any address-only results, then it will still + // have uses, those results are mapped to storage, and the destructure will be + // force-deleted later during deleteRewrittenInstructions. But if there are no + // address-only results, then all of the old destructure's uses will already + // be replaced. It must be force deleted now to avoid deleting it later as + // regular dead code and emitting a bad lifetime fixup for its owned operand. + if (isInstructionTriviallyDead(oldDestructure)) { + pass.deleter.forceDelete(oldDestructure); } - if (!pass.valueStorageMap.contains(origCallInst)) - pass.markDead(origCallInst); } //===----------------------------------------------------------------------===// -// ReturnRewriter - rewrite return instructions for indirect results. +// ReturnRewriter +// +// Rewrite return instructions for indirect results. //===----------------------------------------------------------------------===// class ReturnRewriter { AddressLoweringState &pass; + SILFunctionConventions opaqueFnConv; public: - ReturnRewriter(AddressLoweringState &pass) : pass(pass) {} + ReturnRewriter(AddressLoweringState &pass) + : pass(pass), opaqueFnConv(pass.function->getConventions()) {} void rewriteReturns(); protected: void rewriteReturn(ReturnInst *returnInst); + + void rewriteElement(SILValue oldResult, SILArgument *newResultArg, + SILBuilder &returnBuilder); }; void ReturnRewriter::rewriteReturns() { - for (TermInst *termInst : pass.returnInsts) { - // TODO: handle throws - rewriteReturn(cast(termInst)); + for (SILInstruction *termInst : pass.exitingInsts) { + if (auto *returnInst = dyn_cast(termInst)) + rewriteReturn(returnInst); + else + assert(isa(termInst)); } } void ReturnRewriter::rewriteReturn(ReturnInst *returnInst) { + auto &astCtx = pass.getModule()->getASTContext(); + auto typeCtx = pass.function->getTypeExpansionContext(); + + // Find the point before allocated storage has been deallocated. auto insertPt = SILBasicBlock::iterator(returnInst); - auto bbStart = returnInst->getParent()->begin(); - while (insertPt != bbStart) { - --insertPt; - if (!isa(*insertPt)) + for (auto bbStart = returnInst->getParent()->begin(); + insertPt != bbStart; --insertPt) { + if (!isa(*std::prev(insertPt))) break; } - SILBuilder B(insertPt); - B.setSILConventions( - SILModuleConventions::getLoweredAddressConventions(returnInst->getModule())); + auto returnBuilder = pass.getBuilder(insertPt); // Gather direct function results. - unsigned numOrigDirectResults = - pass.F->getConventions().getNumDirectSILResults(); - SmallVector origDirectResultValues; - if (numOrigDirectResults == 1) - origDirectResultValues.push_back(returnInst->getOperand()); + unsigned numOldResults = opaqueFnConv.getNumDirectSILResults(); + SmallVector oldResults; + TupleInst *pseudoReturnVal = nullptr; + if (numOldResults == 1) + oldResults.push_back(returnInst->getOperand()); else { - auto *tupleInst = cast(returnInst->getOperand()); - origDirectResultValues.append(tupleInst->getElements().begin(), - tupleInst->getElements().end()); - assert(origDirectResultValues.size() == numOrigDirectResults); + pseudoReturnVal = cast(returnInst->getOperand()); + oldResults.append(pseudoReturnVal->getElements().begin(), + pseudoReturnVal->getElements().end()); + assert(oldResults.size() == numOldResults); } - SILFunctionConventions origFnConv(pass.F->getConventions()); - (void)origFnConv; - - // Convert each result. SmallVector newDirectResults; unsigned newResultArgIdx = pass.loweredFnConv.getSILArgIndexOfFirstIndirectResult(); + // Initialize the indirect result arguments and populate newDirectResults. for_each( - pass.F->getLoweredFunctionType()->getResults(), origDirectResultValues, - [&](SILResultInfo resultInfo, SILValue origDirectResultVal) { - // Assume that all original results are direct in SIL. - assert(!origFnConv.isSILIndirect(resultInfo)); - - if (pass.loweredFnConv.isSILIndirect(resultInfo)) { - assert(newResultArgIdx - < pass.loweredFnConv.getSILArgIndexOfFirstParam()); - - SILArgument *resultArg = B.getFunction().getArgument(newResultArgIdx); - SILType resultTy = origDirectResultVal->getType(); - if (resultTy.isAddressOnly(*pass.F)) { - ValueStorage &storage = - pass.valueStorageMap.getStorage(origDirectResultVal); - assert(storage.isRewritten()); - if (!storage.isProjection()) { - // Copy the result from local storage into the result argument. - SILValue resultAddr = storage.storageAddress; - B.createCopyAddr(returnInst->getLoc(), resultAddr, resultArg, - IsTake, IsInitialization); - } - } else { - // Store the result into the result argument. - B.createStore(returnInst->getLoc(), origDirectResultVal, resultArg, - StoreOwnershipQualifier::Unqualified); - } - ++newResultArgIdx; - } else { - // Record the direct result for populating the result tuple. - newDirectResults.push_back(origDirectResultVal); - } - }); + pass.function->getLoweredFunctionType()->getResults(), oldResults, + [&](SILResultInfo resultInfo, SILValue oldResult) { + // Assume that all original results are direct in SIL. + assert(!opaqueFnConv.isSILIndirect(resultInfo)); + if (!pass.loweredFnConv.isSILIndirect(resultInfo)) { + newDirectResults.push_back(oldResult); + return; + } + SILArgument *newResultArg = + pass.function->getArgument(newResultArgIdx); + rewriteElement(oldResult, newResultArg, returnBuilder); + ++newResultArgIdx; + }); + assert(newDirectResults.size() == pass.loweredFnConv.getNumDirectSILResults()); + assert(newResultArgIdx == pass.loweredFnConv.getSILArgIndexOfFirstParam()); + + // Generate a new return_inst for the new direct results. SILValue newReturnVal; if (newDirectResults.empty()) { - SILType emptyTy = SILType::getPrimitiveObjectType( - B.getModule().getASTContext().TheEmptyTupleType); - newReturnVal = B.createTuple(returnInst->getLoc(), emptyTy, {}); + SILType emptyTy = SILType::getPrimitiveObjectType(astCtx.TheEmptyTupleType); + newReturnVal = returnBuilder.createTuple(pass.genLoc(), emptyTy, {}); } else if (newDirectResults.size() == 1) { newReturnVal = newDirectResults[0]; } else { - newReturnVal = B.createTuple( - returnInst->getLoc(), - pass.loweredFnConv.getSILResultType(B.getTypeExpansionContext()), - newDirectResults); + newReturnVal = returnBuilder.createTuple(pass.genLoc(), + pass.loweredFnConv.getSILResultType(typeCtx), + newDirectResults); } + // Rewrite the returned value. SILValue origFullResult = returnInst->getOperand(); + assert(isPseudoReturnValue(origFullResult) == (pseudoReturnVal != nullptr)); + returnInst->setOperand(newReturnVal); - if (auto *fullResultInst = origFullResult->getDefiningInstruction()) { - if (!fullResultInst->hasUsesOfAnyResult()) - pass.markDead(fullResultInst); + // A pseudo return value is not be deleted during deleteRewrittenInstructions + // because it is not mapped ValueStorage. Delete it now since it's value are + // all consumed by newReturnVal. + if (pseudoReturnVal) { + pass.deleter.forceDelete(pseudoReturnVal); + } +} + +void ReturnRewriter::rewriteElement(SILValue oldResult, + SILArgument *newResultArg, + SILBuilder &returnBuilder) { + SILType resultTy = oldResult->getType(); + if (resultTy.isAddressOnly(*pass.function)) { + ValueStorage &storage = pass.valueStorageMap.getStorage(oldResult); + assert(storage.isRewritten); + SILValue resultAddr = storage.storageAddress; + if (resultAddr != newResultArg) { + // Copy the result from local storage into the result argument. + returnBuilder.createCopyAddr(pass.genLoc(), resultAddr, newResultArg, + IsTake, IsInitialization); + } + } else { + // Store the result into the result argument. + returnBuilder.createTrivialStoreOr(pass.genLoc(), oldResult, newResultArg, + StoreOwnershipQualifier::Init); } } //===----------------------------------------------------------------------===// -// AddressOnlyUseRewriter - rewrite opaque value uses. +// UseRewriter +// +// Rewrite opaque value uses in forward order--uses are rewritten before defs. //===----------------------------------------------------------------------===// namespace { -class AddressOnlyUseRewriter - : SILInstructionVisitor { - friend SILVisitorBase; - friend SILInstructionVisitor; +class UseRewriter : SILInstructionVisitor { + friend SILVisitorBase; + friend SILInstructionVisitor; AddressLoweringState &pass; - SILBuilder B; + SILBuilder builder; AddressMaterialization addrMat; - Operand *currOper; + Operand *use = nullptr; + + explicit UseRewriter(AddressLoweringState &pass, Operand *use) + : pass(pass), builder(pass.getBuilder(use->getUser()->getIterator())), + addrMat(pass, builder), use(use) {} public: - explicit AddressOnlyUseRewriter(AddressLoweringState &pass) - : pass(pass), B(*pass.F), addrMat(pass, B) { - B.setSILConventions( - SILModuleConventions::getLoweredAddressConventions(pass.F->getModule())); - } + static void rewriteUse(Operand *use, AddressLoweringState &pass) { + // Special handling for the broken opened archetypes representation in which + // a single result represents both a value of the opened type and the + // metatype itself :/ + if (use->isTypeDependent()) + return; - void visitOperand(Operand *operand) { - currOper = operand; - visit(operand->getUser()); + UseRewriter(pass, use).visit(use->getUser()); } protected: + // If rewriting a use also rewrites the value defined by the user, then mark + // the defined value as rewritten. The defined value will not be revisited by + // DefRewriter. void markRewritten(SILValue oldValue, SILValue addr) { auto &storage = pass.valueStorageMap.getStorage(oldValue); + // getReusedStorageOperand() ensures that oldValue does not already have + // separate storage. So there's no need to delete its alloc_stack. + assert(!storage.storageAddress || storage.storageAddress == addr); storage.storageAddress = addr; storage.markRewritten(); } - void beforeVisit(SILInstruction *I) { - LLVM_DEBUG(llvm::dbgs() << " REWRITE USE "; I->dump()); - - B.setInsertionPoint(I); - B.setCurrentDebugScope(I->getDebugScope()); + void beforeVisit(SILInstruction *inst) { + LLVM_DEBUG(llvm::dbgs() << "REWRITE USE "; inst->dump()); } - void visitSILInstruction(SILInstruction *I) { - LLVM_DEBUG(I->dump()); - llvm_unreachable("Unimplemented?!"); + void visitSILInstruction(SILInstruction *inst) { + inst->dump(); + llvm::report_fatal_error("^^^ Unimplemented opaque value use."); } + // Opaque call argument. void visitApplyInst(ApplyInst *applyInst) { - ApplyRewriter(applyInst, pass).rewriteIndirectParameter(currOper); + CallArgRewriter(applyInst, pass).rewriteIndirectArgument(use); + } + + void visitAssignInst(AssignInst *assignInst); + + void visitBeginBorrowInst(BeginBorrowInst *borrow); + + void visitEndBorrowInst(EndBorrowInst *end) {} + + void visitBranchInst(BranchInst *) { + pass.getPhiRewriter().materializeOperand(use); + + use->set(SILUndef::get(use->get()->getType(), *pass.function)); + } + + // Opaque checked cast source. + void visitCheckedCastValueBranchInst( + CheckedCastValueBranchInst *checkedBranchInst) { + // FIXME: Unimplemented + llvm::report_fatal_error("Unimplemented CheckCastValueBranch use."); } + // Copy from an opaque source operand. void visitCopyValueInst(CopyValueInst *copyInst) { - ValueStorage &storage = pass.valueStorageMap.getStorage(copyInst); - // Fold a copy into a store. - if (storage.isProjection() - && isa(storage.getComposedOperand()->getUser())) { - return; - } SILValue srcVal = copyInst->getOperand(); SILValue srcAddr = pass.valueStorageMap.getStorage(srcVal).storageAddress; + SILValue destAddr = addrMat.materializeAddress(copyInst); - B.createCopyAddr(copyInst->getLoc(), srcAddr, destAddr, IsNotTake, - IsInitialization); + if (destAddr != srcAddr) { + builder.createCopyAddr(copyInst->getLoc(), srcAddr, destAddr, IsNotTake, + IsInitialization); + } markRewritten(copyInst, destAddr); } - + void visitDebugValueInst(DebugValueInst *debugInst) { SILValue srcVal = debugInst->getOperand(); SILValue srcAddr = pass.valueStorageMap.getStorage(srcVal).storageAddress; - B.createDebugValueAddr(debugInst->getLoc(), srcAddr, - *debugInst->getVarInfo()); - pass.markDead(debugInst); + builder.createDebugValueAddr(debugInst->getLoc(), srcAddr, + *debugInst->getVarInfo()); + pass.deleter.forceDelete(debugInst); + } + + void visitDeinitExistentialValueInst( + DeinitExistentialValueInst *deinitExistential) { + // FIXME: Unimplemented + llvm::report_fatal_error("Unimplemented DeinitExsitentialValue use."); } - - void visitDestroyValueInst(DestroyValueInst *destroyInst) { - SILValue srcVal = destroyInst->getOperand(); + + void visitDestroyValueInst(DestroyValueInst *destroy) { + SILValue srcVal = destroy->getOperand(); SILValue srcAddr = pass.valueStorageMap.getStorage(srcVal).storageAddress; - B.createDestroyAddr(destroyInst->getLoc(), srcAddr); - pass.markDead(destroyInst); + builder.createDestroyAddr(destroy->getLoc(), srcAddr); + pass.deleter.forceDelete(destroy); + } + + void rewriteDestructure(SILInstruction *destructure); + + void visitDestructureStructInst(DestructureStructInst *destructure) { + rewriteDestructure(destructure); } - // Handle EnumInst on the def side to handle both opaque and - // loadable operands. + void visitDestructureTupleInst(DestructureTupleInst *destructure) { + rewriteDestructure(destructure); + } + + // Enums are rewritten on the def side to handle both address-only and + // loadable payloads. An address-only payload implies an address-only Enum. void visitEnumInst(EnumInst *enumInst) {} - // Handle InitExistentialValue on the def side to handle both opaque and - // loadable operands. + // Handle InitExistentialValue on the def side because loadable values must + // also be copied into existential storage. void visitInitExistentialValueInst(InitExistentialValueInst *initExistential) {} + // Opening an opaque existential. Rewrite the opened existentials here on + // the use-side because it may produce either loadable or address-only + // types. + void visitOpenExistentialValueInst(OpenExistentialValueInst *openExistential); + + void visitOpenExistentialBoxValueInst( + OpenExistentialBoxValueInst *openExistentialBox) { + // FIXME: Unimplemented + llvm::report_fatal_error("Unimplemented OpenExistentialBox use."); + } + void visitReturnInst(ReturnInst *returnInst) { - // Returns are rewritten for any function with indirect results after opaque - // value rewriting. + // Returns are rewritten for any function with indirect results after + // opaque value rewriting. + } + + void visitSelectValueInst(SelectValueInst *selectInst) { + // FIXME: Unimplemented + llvm::report_fatal_error("Unimplemented SelectValue use."); } - void visitStoreInst(StoreInst *storeInst) { - SILValue srcVal = storeInst->getSrc(); - assert(currOper->get() == srcVal); + // Opaque enum operand to a switch_enum. + void visitSwitchEnumInst(SwitchEnumInst *SEI); - ValueStorage &storage = pass.valueStorageMap.getStorage(srcVal); - SILValue srcAddr = storage.storageAddress; + void rewriteStore(SILValue srcVal, SILValue destAddr, + IsInitialization_t isInit); - IsTake_t isTakeFlag = IsTake; - assert(storeInst->getOwnershipQualifier() - == StoreOwnershipQualifier::Unqualified); + void visitStoreInst(StoreInst *storeInst); - if (storage.isProjection()) { - assert(!srcAddr); - auto *copyInst = cast(srcVal); - ValueStorage &srcStorage = - pass.valueStorageMap.getStorage(copyInst->getOperand()); - assert(!srcStorage.isProjection()); - srcAddr = srcStorage.storageAddress; - isTakeFlag = IsNotTake; - } - // Bitwise copy the value. Two locations now share ownership. This is - // modeled as a take-init. - B.createCopyAddr(storeInst->getLoc(), srcAddr, storeInst->getDest(), - isTakeFlag, IsInitialization); - pass.markDead(storeInst); + /// Emit end_borrows for a an incomplete BorrowedValue with only nonlifetime + /// ending uses. + void emitEndBorrows(SILValue value); + + void emitExtract(SingleValueInstruction *extractInst); + + // Extract from an opaque struct. + void visitStructExtractInst(StructExtractInst *extractInst); + + // Structs are rewritten on the def-side, where both the address-only and + // loadable elements that compose a struct can be handled. An address-only + // member implies an address-only Struct. + void visitStructInst(StructInst *structInst) {} + + // Opaque call argument. + void visitTryApplyInst(TryApplyInst *tryApplyInst) { + CallArgRewriter(tryApplyInst, pass).rewriteIndirectArgument(use); } - void visitTupleInst(TupleInst *tupleInst) { - // Tuples are rewritten on the def-side, where both direct and indirect - // elements are composed. + // Tuples are rewritten on the def-side, where both the address-only and + // loadable elements that compose a tuple can be handled. An address-only + // element implies an address-only Tuple. + void visitTupleInst(TupleInst *tupleInst) {} + + // Extract from an opaque tuple. + void visitTupleExtractInst(TupleExtractInst *extractInst); + + void visitUncheckedBitwiseCast(UncheckedBitwiseCastInst *uncheckedCastInst) { + // FIXME: Unimplemented + llvm::report_fatal_error("Unimplemented UncheckedBitwiseCast use."); } - void visitTupleExtractInst(TupleExtractInst *extractInst) { - // Apply results are rewritten when the result definition is visited. - if (ApplySite::isa(currOper->get())) - return; + void visitUncheckedEnumDataInst(UncheckedEnumDataInst *enumDataInst); - // TODO: generate tuple_element_addr. - // generate copy_addr if we can't project. - llvm_unreachable("unimplemented."); + void visitUnconditionalCheckedCastValueInst( + UnconditionalCheckedCastValueInst *checkedCastInst) { + + // FIXME: Unimplemented + llvm::report_fatal_error("Unimplemented UnconditionalCheckedCast use."); } }; } // end anonymous namespace +void UseRewriter::rewriteDestructure(SILInstruction *destructure) { + for (auto result : destructure->getResults()) { + SILValue extractAddr = addrMat.materializeDefProjection(result); + if (result->getType().isAddressOnly(*pass.function)) { + assert(use == getProjectedDefOperand(result)); + markRewritten(result, extractAddr); + } else { + assert(!pass.valueStorageMap.contains(result)); + SILValue loadElement = builder.createTrivialLoadOr( + destructure->getLoc(), extractAddr, LoadOwnershipQualifier::Take); + + result->replaceAllUsesWith(loadElement); + } + } +} + +void UseRewriter::visitBeginBorrowInst(BeginBorrowInst *borrow) { + assert(use == getProjectedDefOperand(borrow)); + + // Mark the value as rewritten and use the operand's storage. + auto address = pass.valueStorageMap.getStorage(use->get()).storageAddress; + markRewritten(borrow, address); + + // Borrows are irrelevant unless they are marked lexical. + if (borrow->isLexical()) { + if (auto *allocStack = dyn_cast(address)) { + allocStack->setIsLexical(); + return; + } + // Function arguments are inherently lexical. + if (isa(address)) + return; + + SWIFT_ASSERT_ONLY(address->dump()); + llvm_unreachable("^^^ unknown lexical address producer"); + } +} + +// Opening an opaque existential. Rewrite the opened existentials here on +// the use-side because it may produce either loadable or address-only +// types. +void UseRewriter::visitOpenExistentialValueInst( + OpenExistentialValueInst *openExistential) { + assert(use == getReusedStorageOperand(openExistential)); + SILValue srcAddr = pass.valueStorageMap.getStorage(use->get()).storageAddress; + + // Replace the module's openedArchetypesDef + pass.getModule()->willDeleteInstruction(openExistential); + + // Mutable access is always by address. + auto *openAddr = builder.createOpenExistentialAddr( + openExistential->getLoc(), srcAddr, + openExistential->getType().getAddressType(), + OpenedExistentialAccess::Immutable); + + SmallVector typeUses; + for (Operand *use : openExistential->getUses()) { + if (use->isTypeDependent()) { + typeUses.push_back(use); + } + } + for (Operand *use : typeUses) { + use->set(openAddr); + } + markRewritten(openExistential, openAddr); +} + +void UseRewriter::rewriteStore(SILValue srcVal, SILValue destAddr, + IsInitialization_t isInit) { + assert(use->get() == srcVal); + auto *storeInst = use->getUser(); + auto loc = storeInst->getLoc(); + + ValueStorage &storage = pass.valueStorageMap.getStorage(srcVal); + SILValue srcAddr = storage.storageAddress; + + IsTake_t isTake = IsTake; + if (auto *copy = dyn_cast(srcVal)) { + if (storage.isDefProjection) { + SILValue copySrcAddr = + pass.valueStorageMap.getStorage(copy->getOperand()).storageAddress; + assert(srcAddr == copySrcAddr && "folded copy should borrow storage"); + (void)copySrcAddr; + isTake = IsNotTake; + } + } + builder.createCopyAddr(loc, srcAddr, destAddr, isTake, isInit); + pass.deleter.forceDelete(storeInst); +} + +// If the source is a copy that projects storage from its def, then the copy +// semantics are handled here (by omitting the [take] flag from copy_addr). +void UseRewriter::visitStoreInst(StoreInst *storeInst) { + IsInitialization_t isInit; + auto qualifier = storeInst->getOwnershipQualifier(); + if (qualifier == StoreOwnershipQualifier::Init) + isInit = IsInitialization; + else { + assert(qualifier == StoreOwnershipQualifier::Assign); + isInit = IsNotInitialization; + } + rewriteStore(storeInst->getSrc(), storeInst->getDest(), isInit); +} + +void UseRewriter::visitAssignInst(AssignInst *assignInst) { + rewriteStore(assignInst->getSrc(), assignInst->getDest(), + IsNotInitialization); +} + +/// Emit end_borrows for a an incomplete BorrowedValue with only nonlifetime +/// ending uses. This function inserts end_borrows on the lifetime boundary. +void UseRewriter::emitEndBorrows(SILValue value) { + assert(BorrowedValue(value)); + + // Place end_borrows that cover the load_borrow uses. It is not necessary to + // cover the outer borrow scope of the extract's operand. If a lexical + // borrow scope exists for the outer value, which is now in memory, then + // its alloc_stack will be marked lexical, and the in-memory values will be + // kept alive until the end of the outer scope. + SmallVector usePoints; + findInnerTransitiveGuaranteedUses(value, &usePoints); + + SmallVector discoveredBlocks; + PrunedLiveness liveness(&discoveredBlocks); + for (auto *use : usePoints) { + assert(!use->isLifetimeEnding()); + liveness.updateForUse(use->getUser(), /*lifetimeEnding*/ false); + } + PrunedLivenessBoundary guaranteedBoundary; + guaranteedBoundary.compute(liveness); + guaranteedBoundary.visitInsertionPoints( + [&](SILBasicBlock::iterator insertPt) { + pass.getBuilder(insertPt).createEndBorrow(pass.genLoc(), value); + }); +} + +// Extract from an opaque struct or tuple. +void UseRewriter::emitExtract(SingleValueInstruction *extractInst) { + SILValue extractAddr = addrMat.materializeDefProjection(extractInst); + + if (extractInst->getType().isAddressOnly(*pass.function)) { + assert(use == getProjectedDefOperand(extractInst)); + markRewritten(extractInst, extractAddr); + return; + } + auto replaceUsesWithLoad = [&](SingleValueInstruction *oldInst, + SILValue load) { + oldInst->replaceAllUsesWith(load); + pass.deleter.forceDelete(oldInst); + }; + auto loc = extractInst->getLoc(); + if (extractInst->getType().isTrivial(*pass.function)) { + auto *load = + builder.createLoad(loc, extractAddr, LoadOwnershipQualifier::Trivial); + replaceUsesWithLoad(extractInst, load); + return; + } + if (Operand *use = extractInst->getSingleUse()) { + if (auto *copy = dyn_cast(use->getUser())) { + auto *load = + builder.createLoad(loc, extractAddr, LoadOwnershipQualifier::Copy); + replaceUsesWithLoad(copy, load); + return; + } + } + SILValue loadElement = + builder.emitLoadBorrowOperation(extractInst->getLoc(), extractAddr); + replaceUsesWithLoad(extractInst, loadElement); + emitEndBorrows(loadElement); +} + +void UseRewriter::visitStructExtractInst(StructExtractInst *extractInst) { + emitExtract(extractInst); +} + +// Extract from an opaque tuple. +void UseRewriter::visitTupleExtractInst(TupleExtractInst *extractInst) { + emitExtract(extractInst); +} + +// Rewrite switch_enum to switch_enum_addr. All associated block arguments are +// removed. +void UseRewriter::visitSwitchEnumInst(SwitchEnumInst * switchEnum) { + SILValue enumVal = switchEnum->getOperand(); + assert(use->get() == enumVal); + + SILValue enumAddr = pass.getMaterializedAddress(enumVal); + auto loc = switchEnum->getLoc(); + auto rewriteCase = [&](EnumElementDecl *caseDecl, SILBasicBlock *caseBB) { + // Nothing to do for unused case payloads. + if (caseBB->getArguments().size() == 0) + return; + + assert(caseBB->getArguments().size() == 1); + SILArgument *caseArg = caseBB->getArguments()[0]; + + assert(&switchEnum->getOperandRef(0) == getReusedStorageOperand(caseArg)); + assert(caseDecl->hasAssociatedValues() && "caseBB has a payload argument"); + + SILBuilder caseBuilder = pass.getBuilder(caseBB->begin()); + auto *caseAddr = + caseBuilder.createUncheckedTakeEnumDataAddr(loc, enumAddr, caseDecl); + auto *caseLoad = caseBuilder.createTrivialLoadOr( + switchEnum->getLoc(), caseAddr, LoadOwnershipQualifier::Take); + caseArg->replaceAllUsesWith(caseLoad); + if (caseArg->getType().isAddressOnly(*pass.function)) { + // Remap caseArg to the new dummy load which will be deleted during + // deleteRewrittenInstructions. + pass.valueStorageMap.replaceValue(caseArg, caseLoad); + markRewritten(caseLoad, caseAddr); + } + caseBB->eraseArgument(0); + }; + + // TODO: The case list does not change. We should be able to avoid copying. + SmallVector, 8> cases; + SmallVector caseCounters; + + // Collect switch cases for rewriting and remove block arguments. + for (unsigned caseIdx : range(switchEnum->getNumCases())) { + auto caseDeclAndBB = switchEnum->getCase(caseIdx); + EnumElementDecl *caseDecl = caseDeclAndBB.first; + SILBasicBlock *caseBB = caseDeclAndBB.second; + + cases.push_back(caseDeclAndBB); + caseCounters.push_back(switchEnum->getCaseCount(caseIdx)); + + rewriteCase(caseDecl, caseBB); + } + SILBasicBlock *defaultBB = nullptr; + auto defaultCounter = ProfileCounter(); + if (switchEnum->hasDefault()) { + defaultBB = switchEnum->getDefaultBB(); + defaultCounter = switchEnum->getDefaultCount(); + if (auto defaultDecl = switchEnum->getUniqueCaseForDefault()) { + rewriteCase(defaultDecl.get(), defaultBB); + } + } + auto builder = pass.getTermBuilder(switchEnum); + pass.deleter.forceDelete(switchEnum); + builder.createSwitchEnumAddr(loc, enumAddr, defaultBB, cases, + ArrayRef(caseCounters), + defaultCounter); +} + +void UseRewriter::visitUncheckedEnumDataInst( + UncheckedEnumDataInst *enumDataInst) { + assert(use == getReusedStorageOperand(enumDataInst)); + + assert(enumDataInst->getOwnershipKind() != OwnershipKind::Guaranteed); + + // unchecked_enum_data could be a def-projection. It is handled as a + // separate allocation to make it clear that it can't be + // rematerialized. This means that + auto srcAddr = pass.valueStorageMap.getStorage(use->get()).storageAddress; + + auto loc = enumDataInst->getLoc(); + auto elt = enumDataInst->getElement(); + auto destTy = enumDataInst->getType().getAddressType(); + auto *enumAddrInst = + builder.createUncheckedTakeEnumDataAddr(loc, srcAddr, elt, destTy); + + markRewritten(enumDataInst, enumAddrInst); +} + //===----------------------------------------------------------------------===// -// AddressOnlyDefRewriter - rewrite opaque value definitions. +// DefRewriter +// +// Rewrite opaque value definitions in forward order--defs are after uses. //===----------------------------------------------------------------------===// namespace { -class AddressOnlyDefRewriter - : SILInstructionVisitor { - friend SILVisitorBase; - friend SILInstructionVisitor; +class DefRewriter : SILInstructionVisitor { + friend SILVisitorBase; + friend SILInstructionVisitor; AddressLoweringState &pass; - SILBuilder B; + SILBuilder builder; AddressMaterialization addrMat; - ValueStorage *storage = nullptr; + ValueStorage &storage; -public: - explicit AddressOnlyDefRewriter(AddressLoweringState &pass) - : pass(pass), B(*pass.F), addrMat(pass, B) { - B.setSILConventions( - SILModuleConventions::getLoweredAddressConventions(pass.F->getModule())); + explicit DefRewriter(AddressLoweringState &pass, SILValue value, + SILBasicBlock::iterator insertPt) + : pass(pass), builder(pass.getBuilder(insertPt)), addrMat(pass, builder), + storage(pass.valueStorageMap.getStorage(value)) { + assert(!storage.isRewritten); } - void visitInst(SILInstruction *inst) { visit(inst); } +public: + static void rewriteValue(SILValue value, AddressLoweringState &pass) { + if (auto *inst = value->getDefiningInstruction()) { + DefRewriter(pass, value, inst->getIterator()).visit(inst); + + } else { + // function args are already rewritten. + auto *blockArg = cast(value); + auto insertPt = blockArg->getParent()->begin(); + DefRewriter(pass, value, insertPt).rewriteArg(blockArg); + } + } protected: - void beforeVisit(SILInstruction *I) { - // This cast succeeds beecause only specific instructions get added to - // the value storage map. - storage = &pass.valueStorageMap.getStorage(cast(I)); + // Set the storage address for an opaque block arg and mark it rewritten. + void rewriteArg(SILPhiArgument *arg) { + LLVM_DEBUG(llvm::dbgs() << "REWRITE ARG "; arg->dump()); + if (storage.storageAddress) + LLVM_DEBUG(llvm::dbgs() << " STORAGE "; storage.storageAddress->dump()); - LLVM_DEBUG(llvm::dbgs() << "REWRITE DEF "; I->dump()); - if (storage->storageAddress) - LLVM_DEBUG(llvm::dbgs() << " STORAGE "; storage->storageAddress->dump()); + storage.storageAddress = addrMat.materializeAddress(arg); + } - B.setInsertionPoint(I); - B.setCurrentDebugScope(I->getDebugScope()); + void beforeVisit(SILInstruction *inst) { + LLVM_DEBUG(llvm::dbgs() << "REWRITE DEF "; inst->dump()); + if (storage.storageAddress) + LLVM_DEBUG(llvm::dbgs() << " STORAGE "; storage.storageAddress->dump()); } - void visitSILInstruction(SILInstruction *I) { - LLVM_DEBUG(I->dump()); - llvm_unreachable("Unimplemented?!"); + void visitSILInstruction(SILInstruction *inst) { + inst->dump(); + llvm::report_fatal_error("^^^ Unimplemented opaque value def."); } void visitApplyInst(ApplyInst *applyInst) { - assert(isa(applyInst) && - "beforeVisit assumes that ApplyInst is an SVI"); - assert(!storage->isRewritten()); // Completely rewrite the apply instruction, handling any remaining // (loadable) indirect parameters, allocating memory for indirect // results, and generating a new apply instruction. - ApplyRewriter rewriter(applyInst, pass); - rewriter.rewriteParameters(); - rewriter.convertApplyWithIndirectResults(); - } - - void visitCopyValueInst(CopyValueInst *copyInst) { - // A folded copy is not rewritten. - assert(storage->isProjection() || storage->isRewritten()); + CallArgRewriter(applyInst, pass).rewriteArguments(); + ApplyRewriter(applyInst, pass).convertApplyWithIndirectResults(); + } + + // Rewrite the apply for an indirect result. + void visitDestructureTupleInst(DestructureTupleInst *destructure) { + SILValue srcVal = destructure->getOperand(); + assert(isPseudoCallResult(srcVal) && "destructure use should be rewritten"); + + FullApplySite apply; + if (auto *applyInst = dyn_cast(srcVal)) { + apply = FullApplySite::isa(applyInst); + } else { + auto *termInst = + SILArgument::isTerminatorResult(srcVal)->getTerminatorForResult(); + apply = FullApplySite::isa(termInst); + } + CallArgRewriter(apply, pass).rewriteArguments(); + ApplyRewriter(apply, pass).convertApplyWithIndirectResults(); } + // Define an opaque enum value. void visitEnumInst(EnumInst *enumInst) { - SILValue enumAddr; if (enumInst->hasOperand()) { - addrMat.initializeOperandMem(&enumInst->getOperandRef()); - - assert(storage->storageAddress); - enumAddr = storage->storageAddress; - } else - enumAddr = addrMat.materializeAddress(enumInst); - - B.createInjectEnumAddr(enumInst->getLoc(), enumAddr, - enumInst->getElement()); + // Handle operands here because loadable operands must also be copied. + addrMat.initializeOperand(&enumInst->getOperandRef()); + } + SILValue enumAddr = addrMat.materializeAddress(enumInst); - storage->markRewritten(); + builder.createInjectEnumAddr(enumInst->getLoc(), enumAddr, + enumInst->getElement()); } + // Define an existential. void visitInitExistentialValueInst( InitExistentialValueInst *initExistentialValue) { // Initialize memory for the operand which may be opaque or loadable. - addrMat.initializeOperandMem(&initExistentialValue->getOperandRef()); + addrMat.initializeOperand(&initExistentialValue->getOperandRef()); + } - assert(storage->storageAddress); - storage->markRewritten(); + // Project an opaque value out of a box-type existential. + void visitOpenExistentialBoxValueInst( + OpenExistentialBoxValueInst *openExistentialBox) { + // FIXME: Unimplemented + llvm::report_fatal_error("Unimplemented OpenExistentialBoxValue def."); } + // Load an opaque value. void visitLoadInst(LoadInst *loadInst) { - // Bitwise copy the value. Two locations now share ownership. This is - // modeled as a take-init. - SILValue addr = pass.valueStorageMap.getStorage(loadInst).storageAddress; + SILValue addr = addrMat.materializeAddress(loadInst); + IsTake_t isTake; + if (loadInst->getOwnershipQualifier() == LoadOwnershipQualifier::Take) + isTake = IsTake; + else { + assert(loadInst->getOwnershipQualifier() == LoadOwnershipQualifier::Copy); + isTake = IsNotTake; + } + // Dummy loads are already mapped to their storage address. if (addr != loadInst->getOperand()) { - B.createCopyAddr(loadInst->getLoc(), loadInst->getOperand(), addr, IsTake, - IsInitialization); + builder.createCopyAddr(loadInst->getLoc(), loadInst->getOperand(), addr, + isTake, IsInitialization); } - storage->markRewritten(); } + // Define an opaque struct. + void visitStructInst(StructInst *structInst) { + // For each element, initialize the operand's memory. Some struct elements + // may be loadable types. + for (Operand &operand : structInst->getAllOperands()) + addrMat.initializeOperand(&operand); + } + + // Define an opaque tuple. void visitTupleInst(TupleInst *tupleInst) { - ValueStorage &storage = pass.valueStorageMap.getStorage(tupleInst); - if (storage.isProjection() - && isa(storage.getComposedOperand()->getUser())) { - // For indirectly returned values, each element has its own storage. - return; - } // For each element, initialize the operand's memory. Some tuple elements // may be loadable types. - SILValue tupleAddr = addrMat.materializeAddress(tupleInst); - unsigned eltIdx = 0; - for (Operand &operand : tupleInst->getAllOperands()) { - SILType eltTy = operand.get()->getType(); - if (eltTy.isAddressOnly(*pass.F)) - addrMat.initializeOperandMem(&operand); - else { - auto *elementAddr = B.createTupleElementAddr( - tupleInst->getLoc(), tupleAddr, eltIdx, eltTy.getAddressType()); - B.createStore(tupleInst->getLoc(), operand.get(), elementAddr, - StoreOwnershipQualifier::Unqualified); - } - ++eltIdx; - } + for (Operand &operand : tupleInst->getAllOperands()) + addrMat.initializeOperand(&operand); } +}; +} // end anonymous namespace - void visitTupleExtractInst(TupleExtractInst *extractInst) { - // If the source is an opaque tuple, as opposed to a call result, then the - // extract is rewritten on the use-side. - if (storage->isRewritten()) - return; +//===----------------------------------------------------------------------===// +// Rewrite Opaque Values +//===----------------------------------------------------------------------===// - // This must be an indirect result for an apply that has not yet been - // rewritten. Rewrite the apply. - SILValue srcVal = extractInst->getOperand(); - ApplyRewriter(cast(srcVal), pass) - .convertApplyWithIndirectResults(); +// Rewrite applies with indirect paramters or results of loadable types which +// were not visited during opaque value rewritting. +static void rewriteIndirectApply(FullApplySite apply, + AddressLoweringState &pass) { + // If all indirect args were loadable, then they still need to be rewritten. + CallArgRewriter(apply, pass).rewriteArguments(); - assert(storage->storageAddress); + if (!apply.getSubstCalleeType()->hasIndirectFormalResults()) + return; + + // If the call has indirect results and wasn't already rewritten, rewrite it + // now. This handles try_apply, which is not rewritten when DefRewriter visits + // block arguments. It also handles apply with loadable indirect results. + ApplyRewriter(apply, pass).convertApplyWithIndirectResults(); + + if (!apply.getInstruction()->isDeleted()) { + assert(!getCallMultiResult(apply.getPseudoResult()) + && "replaceDirectResults deletes the destructure"); + pass.deleter.forceDelete(apply.getInstruction()); } -}; -} // end anonymous namespace +} static void rewriteFunction(AddressLoweringState &pass) { - AddressOnlyDefRewriter defVisitor(pass); - AddressOnlyUseRewriter useVisitor(pass); + // During rewriting, storage references are stable. + pass.valueStorageMap.setStable(); + + // For each opaque value in forward order, rewrite its users and its defining + // instruction. + for (auto &valueAndStorage : pass.valueStorageMap) { + SILValue valueDef = valueAndStorage.value; + // Rewrite a def that wasn't already rewritten when handling its operands. + if (!valueAndStorage.storage.isRewritten) { + DefRewriter::rewriteValue(valueDef, pass); + valueAndStorage.storage.markRewritten(); + } + // Rewrite a use of any non-address value mapped to storage (does not + // include the already rewritten uses of indirect arguments). + if (valueDef->getType().isAddress()) + continue; - for (auto &valueStorageI : pass.valueStorageMap) { - SILValue valueDef = valueStorageI.first; + SmallVector uses(valueDef->getUses()); + for (Operand *oper : uses) { + UseRewriter::rewriteUse(oper, pass); + } + } + // Rewrite any applies with indirect parameters now that all such parameters + // are rewritten. If the apply had indirect results, it was already rewritten + // by the defVisitor. + for (auto optionalApply : pass.indirectApplies) { + if (optionalApply) { + rewriteIndirectApply(optionalApply.getValue(), pass); + } + } + // Rewrite this function's return value now that all opaque values within the + // function are rewritten. This still depends on a valid ValueStorage + // projection operands. + if (pass.function->getLoweredFunctionType()->hasIndirectFormalResults()) + ReturnRewriter(pass).rewriteReturns(); +} + +// Given an array of terminator operand values, produce an array of +// operands with those corresponding to deadArgIndices stripped out. +static void filterDeadArgs(OperandValueArrayRef origArgs, + ArrayRef deadArgIndices, + SmallVectorImpl &newArgs) { + auto nextDeadArgI = deadArgIndices.begin(); + for (unsigned i : indices(origArgs)) { + if (i == *nextDeadArgI) { + ++nextDeadArgI; + continue; + } + newArgs.push_back(origArgs[i]); + } + assert(nextDeadArgI == deadArgIndices.end()); +} - // TODO: MultiValueInstruction: ApplyInst - if (auto *defInst = dyn_cast(valueDef)) - defVisitor.visitInst(defInst); +// Rewrite a BranchInst omitting dead arguments. +static void removeBranchArgs(BranchInst *branch, + SmallVectorImpl &deadArgIndices, + AddressLoweringState &pass) { - SmallVector uses(valueDef->getUses()); - for (Operand *oper : uses) - useVisitor.visitOperand(oper); - } - - // Rewrite any remaining (loadable) indirect parameters. - for (ApplySite apply : pass.indirectApplies) { - // Calls with indirect formal results have already been rewritten. - if (apply.getSubstCalleeType()->hasIndirectFormalResults()) { - bool isRewritten = false; - visitCallResults(apply, [&](SILValue result) { - if (result->getType().isAddressOnly(*pass.F)) { - assert(pass.valueStorageMap.getStorage(result).isRewritten()); - isRewritten = true; - return false; - } - return true; - }); - if (!isRewritten) { - ApplyRewriter rewriter(apply, pass); - rewriter.rewriteParameters(); - rewriter.convertApplyWithIndirectResults(); - continue; + llvm::SmallVector branchArgs; + filterDeadArgs(branch->getArgs(), deadArgIndices, branchArgs); + + pass.getBuilder(branch->getIterator()) + .createBranch(branch->getLoc(), branch->getDestBB(), branchArgs); + pass.deleter.forceDelete(branch); +} + +// Remove opaque phis. Their inputs have already been substituted with Undef. +static void removeOpaquePhis(SILBasicBlock *bb, AddressLoweringState &pass) { + if (bb->isEntry()) + return; + + SmallVector deadArgIndices; + for (auto *bbArg : bb->getArguments()) { + if (bbArg->getType().isAddressOnly(*pass.function)) + deadArgIndices.push_back(bbArg->getIndex()); + } + if (deadArgIndices.empty()) + return; + + // Iterate while modifying the predecessor's terminators. + for (auto *predecessor : bb->getPredecessorBlocks()) { + auto *branch = cast(predecessor->getTerminator()); + removeBranchArgs(branch, deadArgIndices, pass); + } + // erase in reverse to avoid index invalidation. + while (!deadArgIndices.empty()) { + bb->eraseArgument(deadArgIndices.pop_back_val()); + } +} + +// Instructions that use an address-only value without producing one are already +// deleted. The rest of the address-only definitions are now removed bottom-up +// by visiting valuestorageMap. +// +// Phis are removed here after all other instructions. +static void deleteRewrittenInstructions(AddressLoweringState &pass) { + // Add the rest of the instructions to the dead list in post order. + for (auto &valueAndStorage : llvm::reverse(pass.valueStorageMap)) { + SILValue val = valueAndStorage.value; + ValueStorage &storage = valueAndStorage.storage; + + assert(&pass.valueStorageMap.getStorage(val) == &valueAndStorage.storage + && "invalid storage map"); + + // Returned tuples and multi-result calls are not in the + // valueStorageMap. Everything else must have been rewritten. + assert(storage.isRewritten && "opaque value has not been rewritten"); + + // If the storage was unused, e.g. because all uses were projected into + // users, then delete the allocation. + if (auto *allocInst = storage.storageAddress->getDefiningInstruction()) { + pass.deleter.deleteIfDead(allocInst); + } + auto *deadInst = val->getDefiningInstruction(); + if (!deadInst || deadInst->isDeleted()) + continue; + + if (auto *destructure = dyn_cast(deadInst)) { + auto tupleVal = destructure->getOperand(); + if (auto *applyInst = dyn_cast(tupleVal)) { + deadInst = applyInst; } } - ApplyRewriter(apply, pass).rewriteParameters(); + LLVM_DEBUG(llvm::dbgs() << "DEAD "; deadInst->dump()); + if (!isa(deadInst)) { + pass.deleter.forceDeleteWithUsers(deadInst); + continue; + } + // willDeleteInstruction was already called for open_existential_value to + // update the registered type. Carry out the remaining deletion steps. + deadInst->getParent()->remove(deadInst); + pass.getModule()->scheduleForDeletion(deadInst); } - if (pass.F->getLoweredFunctionType()->hasIndirectFormalResults()) - ReturnRewriter(pass).rewriteReturns(); + + pass.valueStorageMap.clear(); + + // Remove block args after removing all instructions that may use them. + for (auto &bb : *pass.function) + removeOpaquePhis(&bb, pass); + + pass.deleter.cleanupDeadInstructions(); } //===----------------------------------------------------------------------===// -// AddressLowering: Top-Level Function Transform. +// AddressLowering: Module Pass //===----------------------------------------------------------------------===// namespace { +// Note: the only reason this is not a FunctionTransform is to change the SIL +// stage for all functions at once. class AddressLowering : public SILModuleTransform { - /// The entry point to this function transformation. + /// The entry point to this module transformation. void run() override; void runOnFunction(SILFunction *F); }; } // end anonymous namespace -void AddressLowering::runOnFunction(SILFunction *F) { - auto *DA = PM->getAnalysis(); +void AddressLowering::runOnFunction(SILFunction *function) { + if (!function->isDefinition()) + return; + + assert(function->hasOwnership() && "SIL opaque values requires OSSA"); + + PrettyStackTraceSILFunction FuncScope("address-lowering", function); + + LLVM_DEBUG(llvm::dbgs() << "Address Lowering: " << function->getName() + << "\n"); - AddressLoweringState pass(F, DA->get(F)); + // Ensure that blocks can be processed in RPO order. + removeUnreachableBlocks(*function); - // Rewrite function args and insert alloc_stack/dealloc_stack. + auto *dominance = PM->getAnalysis(); + + AddressLoweringState pass(function, dominance->get(function)); + + // ## Step #1: Map opaque values + // + // First, rewrite this function's arguments and return values, then populate + // pass.valueStorageMap with an entry for each address-only value. + prepareValueStorage(pass); + + // ## Step #2: Allocate storage + // + // For each address-only value mapped in step #1, either create an + // alloc_stack/dealloc_stack pair, or mark its ValueStorage entry as a + // def-projection out of its operand's def or a use projection into its + // composing use or into a phi (branch operand). OpaqueStorageAllocation allocator(pass); allocator.allocateOpaqueStorage(); - LLVM_DEBUG(llvm::dbgs() << "\nREWRITING: " << F->getName(); F->dump()); + LLVM_DEBUG(llvm::dbgs() << "Finished allocating storage.\n"; function->dump(); + pass.valueStorageMap.dump()); - // Rewrite instructions with address-only operands or results. + // ## Step #3. Rewrite opaque values + // + // Rewrite all instructions that either define or use an address-only value. + // Creates new '_addr' variants of instructions, obtaining the storage + // address from the 'valueStorageMap'. This materializes projections in + // forward order, setting 'storageAddress' for each projection as it goes. rewriteFunction(pass); - invalidateAnalysis(F, SILAnalysis::InvalidationKind::Instructions); - - // Instructions that were explicitly marked dead should already have no - // users. - // - // Add the rest of the instructions to the dead list in post order. - // FIXME: make sure we cleaned up address-only BB arguments. - for (auto &valueStorageI : llvm::reverse(pass.valueStorageMap)) { - // TODO: MultiValueInstruction: ApplyInst - auto *deadInst = dyn_cast(valueStorageI.first); - if (!deadInst) - continue; + deleteRewrittenInstructions(pass); - LLVM_DEBUG(llvm::dbgs() << "DEAD "; deadInst->dump()); -#ifndef NDEBUG - for (auto result : deadInst->getResults()) - for (Operand *operand : result->getUses()) - assert(pass.instsToDelete.count(operand->getUser())); -#endif - pass.instsToDelete.insert(deadInst); - } - pass.valueStorageMap.clear(); + StackNesting::fixNesting(function); - // Delete instructions in postorder - recursivelyDeleteTriviallyDeadInstructions(pass.instsToDelete.takeVector(), - true); + // The CFG may change because of criticalEdge splitting during + // createStackAllocation or StackNesting. + invalidateAnalysis(function, + SILAnalysis::InvalidationKind::BranchesAndInstructions); } -/// The entry point to this function transformation. +/// The entry point to this module transformation. void AddressLowering::run() { if (getModule()->useLoweredAddresses()) return; diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.h b/lib/SILOptimizer/Mandatory/AddressLowering.h new file mode 100644 index 0000000000000..e4c6ae79b0c2e --- /dev/null +++ b/lib/SILOptimizer/Mandatory/AddressLowering.h @@ -0,0 +1,282 @@ +//===--- AddressLowering.h - Lower SIL address-only types. ----------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2022 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include "swift/SIL/SILArgument.h" +#include "swift/SIL/SILInstruction.h" +#include "swift/SIL/SILValue.h" +#include "llvm/ADT/DenseMap.h" + +namespace swift { + +/// Track a value's storage. Stages in the storage life-cycle: +/// +/// 1. Unallocated +/// +/// 2. Allocated. Either (a) 'storageAddress' is an alloc_stack, or (b) +/// 'projectedStorageID' refers to a different ValueStorage, which recursively +/// leads to a valid 'storageAddress'. +/// +/// 3. Materialized. 'storageAddress' is valid. Address projections have been +/// emitted at the point that this value is defined. +/// +/// 4. Rewritten. The definition of this address-only value is fully translated +/// into lowered SIL. Instructions are typically materialized and rewritten at +/// the same time. A indirect result, however, is materialized as soon as its +/// alloc_stack is emitted, but only rewritten once the call itself is +/// rewritten. +/// +/// A projection may project out of an operand's definition (def-projection). +/// After allocation, before materialization or rewriting, we may have: +/// +/// %result_addr = alloc_stack // storage for %result +/// %result = apply () -> @out T +/// %extract = struct_extact %result // def-projection of %result +/// +/// Or, a projection may project into a composing use (use-projection): +/// +/// %struct_addr = alloc_stack // storage for %struct +/// %result = apply () -> @out T // use-projection of %struct at operand #0 +/// %struct = struct %result +/// +/// A phi-projection is a use projection that projects its entire value +/// through a phi rather than into a composing use. It has an invalid +/// 'projectedOperandNum'. +/// +/// Operations that destructively resuse storage (open_existential_value, +/// unchecked_enum_data, and switch_enum), are not considered storage +/// projections. Instead, these values have no ValueStorage but are rewritten to +/// directly reuse their operand's storage. +/// +/// To materialize projections, address lowering follows the original def-use +/// edges for address-only values. Consequently, values that have storage cannot +/// be removed from SIL or from the storage map until rewriting is +/// complete. Mapped values can, however, be substituted on-the-fly by emitting +/// a place-holder value and updating the map entry. This works because the +/// value storage map holds no direct references to any SIL entities, such as +/// Operands or SILValues. +struct ValueStorage { + enum : uint32_t { InvalidID = uint32_t(~0) }; + enum : uint16_t { InvalidOper = uint16_t(~0) }; + + /// The final address of this storage after rewriting the SIL. For values + /// linked to their own storage, this is set during storage allocation to an + /// alloc_stack or indirect function argument. For projections, it is only set + /// after materialization (during instruction rewriting). + SILValue storageAddress; + + /// When either isDefProjection or isUseProjection is set, this refers to the + /// storage whose "def" this value projects out of or whose operand this + /// storage projects into via its "use. + uint32_t projectedStorageID; + + /// For use-projections, identifies the operand index of the composing use. + /// Only valid for non-phi use projections. + uint16_t projectedOperandNum; + + /// Projection out of a storage def. e.g. this value is a destructure. + unsigned isDefProjection : 1; + + /// Projection into a composing use or phi. e.g. this value is used by a + /// struct, tuple, enum, or branch. + unsigned isUseProjection : 1; + + // The definition of this value is fully translated to lowered SIL. + unsigned isRewritten : 1; + + // This is a use-projection into an enum. Tracked to avoid projecting enums + // across phis, which would result in piecewise initialization. + unsigned initializesEnum : 1; + + ValueStorage() { clear(); } + + void clear() { + storageAddress = SILValue(); + projectedStorageID = InvalidID; + projectedOperandNum = InvalidOper; + isUseProjection = false; + isDefProjection = false; + isRewritten = false; + initializesEnum = false; + } + + bool isAllocated() const { + return storageAddress || isUseProjection || isDefProjection; + } + + bool isProjection() const { return isUseProjection || isDefProjection; } + + bool isPhiProjection() const { + return isUseProjection && projectedOperandNum == InvalidOper; + } + + bool isComposingUseProjection() const { + return isUseProjection && projectedOperandNum != InvalidOper; + } + + void markRewritten() { + assert(storageAddress); + isRewritten = true; + } + + SILValue getMaterializedAddress() const { + assert(isRewritten && "storage has not been materialized"); + return storageAddress; + } +}; + +/// Map each opaque/resilient SILValue to its abstract storage. +/// Iteration guarantees RPO order. +/// +/// Mapped values are expected to be created in a single RPO pass. "erase" is +/// unsupported. Values must be replaced using 'replaceValue()'. +class ValueStorageMap { + struct ValueStoragePair { + SILValue value; + ValueStorage storage; + ValueStoragePair(SILValue v, ValueStorage s) : value(v), storage(s) {} + }; + typedef std::vector ValueVector; + // Hash of values to ValueVector indices. + typedef llvm::DenseMap ValueHashMap; + + ValueVector valueVector; + ValueHashMap valueHashMap; + + // True after valueVector is done growing, so ValueStorage references will no + // longer be invalidated. + SWIFT_ASSERT_ONLY_DECL(bool stableStorage = false); + +public: + bool empty() const { return valueVector.empty(); } + + void clear() { + valueVector.clear(); + valueHashMap.clear(); + } + + /// Iterate over value storage in RPO order. Once we begin erasing + /// instructions, some entries could become invalid. ValueStorage validity can + /// be checked with valueStorageMap.contains(value). + ValueVector::iterator begin() { return valueVector.begin(); } + + ValueVector::iterator end() { return valueVector.end(); } + + ValueVector::reverse_iterator rbegin() { return valueVector.rbegin(); } + + ValueVector::reverse_iterator rend() { return valueVector.rend(); } + + bool contains(SILValue value) const { + return valueHashMap.find(value) != valueHashMap.end(); + } + + unsigned getOrdinal(SILValue value) const { + auto hashIter = valueHashMap.find(value); + assert(hashIter != valueHashMap.end() && "Missing SILValue"); + return hashIter->second; + } + + ValueStorage &getStorage(SILValue value) { + return valueVector[getOrdinal(value)].storage; + } + const ValueStorage &getStorage(SILValue value) const { + return valueVector[getOrdinal(value)].storage; + } + + const ValueStorage *getStorageOrNull(SILValue value) const { + auto iter = valueHashMap.find(value); + if (iter == valueHashMap.end()) + return nullptr; + + return &valueVector[iter->second].storage; + } + + void setStable() { SWIFT_ASSERT_ONLY(stableStorage = true); } + + /// Given storage for a projection, return the projected storage by following + /// single level of projected storage. The returned storage may + /// recursively be a another projection. + ValueStoragePair &getProjectedStorage(const ValueStorage &storage) { + assert(storage.isProjection()); + return valueVector[storage.projectedStorageID]; + } + + /// Return the non-projection storage that the given storage ultimately refers + /// to by following all projections. After allocation, this storage always has + /// a valid address. + const ValueStorage &getBaseStorage(const ValueStorage &storage) { + if (storage.isDefProjection || storage.isUseProjection) + return getBaseStorage(getProjectedStorage(storage).storage); + + return storage; + } + + /// Return the non-projection storage that the given storage ultimately refers + /// to by following all projections. + const ValueStorage &getBaseStorage(SILValue value) { + return getBaseStorage(getStorage(value)); + } + + /// Return the non-projection storage that this storage refers to. If this + /// storage holds an Enum or any intermediate storage that projects into this + /// storage holds an Enum, then return nullptr. + const ValueStorage *getNonEnumBaseStorage(const ValueStorage &storage) { + if (storage.initializesEnum) + return nullptr; + + if (storage.isUseProjection) { + auto &storageAndValue = getProjectedStorage(storage); + return getNonEnumBaseStorage(storageAndValue.storage); + } + assert(!storage.isDefProjection && "def projections should not reach here"); + return &storage; + } + + /// Return the non-projection storage that this storage refers to, or nullptr + /// if \p allowInitEnum is true and the storage initializes an Enum. + const ValueStorage *getBaseStorage(SILValue value, bool allowInitEnum) { + if (allowInitEnum) + return &getBaseStorage(value); + + return getNonEnumBaseStorage(getStorage(value)); + } + + /// Insert a value in the map, creating a ValueStorage object for it. This + /// must be called in RPO order. + ValueStorage &insertValue(SILValue value); + + /// Replace a value that is mapped to storage with another value. This allows + /// limited rewritting of original address-only values. For example, block + /// arguments can be replaced with fake loads in order to rewrite their + /// corresponding terminator. + void replaceValue(SILValue oldValue, SILValue newValue); + + /// Record a storage projection from the source of the given operand into its + /// use (e.g. struct_extract, tuple_extract, switch_enum). + void recordDefProjection(Operand *oper, SILValue projectedValue); + + /// Record a storage projection from the use of the given operand into the + /// operand's source. (e.g. Any value used by a struct, tuple, or enum may + /// project storage into its use). + void recordComposingUseProjection(Operand *oper, SILValue userValue); + + // Mark a phi operand value as coalesced with the phi storage. + void recordPhiUseProjection(Operand *oper, SILPhiArgument *phi); + + /// Return true \p oper projects into its use's aggregate storage. + bool isComposingUseProjection(Operand *oper) const; + +#ifndef NDEBUG + void dump(); +#endif +}; + +} // namespace swift diff --git a/lib/SILOptimizer/Mandatory/CMakeLists.txt b/lib/SILOptimizer/Mandatory/CMakeLists.txt index 927259804c56d..904874a03f74a 100644 --- a/lib/SILOptimizer/Mandatory/CMakeLists.txt +++ b/lib/SILOptimizer/Mandatory/CMakeLists.txt @@ -4,6 +4,7 @@ target_sources(swiftSILOptimizer PRIVATE AddressLowering.cpp CapturePromotion.cpp ClosureLifetimeFixup.cpp + PhiStorageOptimizer.cpp ConstantPropagation.cpp DefiniteInitialization.cpp DIMemoryUseCollector.cpp diff --git a/lib/SILOptimizer/Mandatory/PhiStorageOptimizer.cpp b/lib/SILOptimizer/Mandatory/PhiStorageOptimizer.cpp new file mode 100644 index 0000000000000..4e4af608679a3 --- /dev/null +++ b/lib/SILOptimizer/Mandatory/PhiStorageOptimizer.cpp @@ -0,0 +1,237 @@ +//===--- PhiStorageOptimizer.cpp - Phi storage optimizer ------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +/// +/// PhiStorageOptimizer implements an analysis used by AddressLowering +/// to reuse storage across block arguments. +/// +/// TODO: This does not yet coalesce the copy_value instructions that produce a +/// phi operand. Such a copy implies that both the operand and phi value are +/// live past the phi. Nonetheleses, they could still be coalesced as +/// follows... First coalesce all direct phi operands. Then transitively +/// coalesce copies by redoing the liveness traversal from the uses of the copy. +/// +/// TODO: This approach uses on-the-fly liveness discovery for all incoming +/// values at once. It requires no storage for liveness. Hopefully this is +/// sufficient for -Onone. At -O, we could explore implementing strong phi +/// elimination. However, that depends the ability to perform interference +/// checks between arbitrary storage locations, which requires computing and +/// storing liveness per-storage location. +/// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "address-lowering" + +#include "PhiStorageOptimizer.h" +#include "swift/SIL/BasicBlockDatastructures.h" +#include "swift/SIL/SILBasicBlock.h" +#include "swift/SIL/SILInstruction.h" + +using namespace swift; + +namespace swift { + +/// An analysis used by AddressLowering to reuse phi storage. +/// +/// Populates CoalescedPhi::coalescedOperands with all phi operands that can +/// reuse the phi's storage. +class PhiStorageOptimizer { + PhiValue phi; + const ValueStorageMap &valueStorageMap; + + CoalescedPhi &coalescedPhi; + + BasicBlockSet occupiedBlocks; + +public: + PhiStorageOptimizer(PhiValue phi, const ValueStorageMap &valueStorageMap, + CoalescedPhi &coalescedPhi) + : phi(phi), valueStorageMap(valueStorageMap), coalescedPhi(coalescedPhi), + occupiedBlocks(getFunction()) {} + + SILFunction *getFunction() const { return phi.phiBlock->getParent(); } + + void optimize(); + +protected: + bool hasUseProjection(SILInstruction *defInst); + bool canCoalesceValue(SILValue incomingVal); + void tryCoalesceOperand(SILBasicBlock *incomingPred); + bool recordUseLiveness(SILValue incomingVal, BasicBlockSetVector &liveBlocks); +}; + +} // namespace swift + +void CoalescedPhi::coalesce(PhiValue phi, + const ValueStorageMap &valueStorageMap) { + assert(empty() && "attempt to recoalesce the same phi"); + + PhiStorageOptimizer(phi, valueStorageMap, *this).optimize(); +} + +/// Optimize phi storage by coalescing phi operands. +/// +/// Finds all non-interfering phi operands and adds them to the result's +/// coalecedOperands. The algorithm can be described in the abstract as follows +/// (assuming no critical edges): +/// +/// All blocks are in one of three states at any point: +/// - clean (not present in the live or occupied set) +/// - live +/// - occupied +/// +/// All blocks start clean. +/// +/// For each incoming value: +/// +/// For all uses of the current incoming value: +/// +/// Scan the CFG backward following predecessors. +/// If the current block is: +/// +/// Clean: mark it live and continue scanning. +/// +/// Live: stop scanning and continue with the next use. +/// +/// Occupied: record interference, stop scanning, continue to next use. +/// +/// If no occupied blocks were reached, mark this phi operand coalseced. It's +/// storage can be projected from the phi storage. +/// +/// Mark all live blocks occupied. +/// +/// In the end, we have a set of non-interfering incoming values that can reuse +/// the phi's storage. +void PhiStorageOptimizer::optimize() { + // The single incoming value case always projects storage. + if (auto *predecessor = phi.phiBlock->getSinglePredecessorBlock()) { + coalescedPhi.coalescedOperands.push_back(phi.getOperand(predecessor)); + return; + } + occupiedBlocks.insert(phi.phiBlock); + for (auto *incomingPred : phi.phiBlock->getPredecessorBlocks()) { + tryCoalesceOperand(incomingPred); + } +} + +// Return true if any of \p defInst's operands are composing use projections +// into \p defInst's storage. +bool PhiStorageOptimizer::hasUseProjection(SILInstruction *defInst) { + for (Operand &oper : defInst->getAllOperands()) { + if (valueStorageMap.isComposingUseProjection(&oper)) + return true; + } + return false; +} + +// Return true in \p incomingVal can be coalesced with this phi ignoring +// possible interference. Simply determine whether storage reuse is possible. +// +// Precondition: \p incomingVal is an operand of this phi. +bool PhiStorageOptimizer::canCoalesceValue(SILValue incomingVal) { + // A Phi must not project from storage that was initialized on a path that + // reaches the phi because other uses of the storage may interfere with the + // phi. A phi may, however, be a composing use projection. + assert(!valueStorageMap.getStorage(phi.getValue()).isDefProjection + && !valueStorageMap.getStorage(phi.getValue()).isPhiProjection()); + + auto &incomingStorage = valueStorageMap.getStorage(incomingVal); + + // If the incoming use is pre-allocated it can't be coalesced. + // This also handles incoming values that are already coalesced with + // another use. + // + // Coalescing use projections from incomingVal into its other non-phi uses + // would require by recursively following uses across projections when + // computing liveness. + if (incomingStorage.isProjection()) + return false; + + auto *defInst = incomingVal->getDefiningInstruction(); + if (!defInst) { + // Indirect function arguments were replaced by loads. + assert(!isa(incomingVal)); + // Do not coalesce a phi with other phis. This would require liveness + // analysis of the whole phi web before coalescing phi operands. + return false; + } + assert(incomingStorage.isAllocated() && "nonphi must be allocated"); + + // Don't coalesce an incoming value unless it's storage is from a stack + // allocation, which can be replaced with another alloc_stack. + if (!isa(incomingStorage.storageAddress)) + return false; + + // Make sure that the incomingVal is not coalesced with any of its operands. + // + // Handling incomingValues whose operands project into them would require by + // recursively finding the set of value definitions and their dominating defBB + // instead of simply incomingVal->getParentBlock(). + if (hasUseProjection(defInst)) + return false; + + return true; +} + +// Process a single incoming phi operand. Compute the value's liveness while +// checking for interference. If no interference exists, mark it coalesced. +void PhiStorageOptimizer::tryCoalesceOperand(SILBasicBlock *incomingPred) { + Operand *incomingOper = phi.getOperand(incomingPred); + SILValue incomingVal = incomingOper->get(); + + if (!canCoalesceValue(incomingVal)) + return; + + BasicBlockSetVector liveBlocks(getFunction()); + if (!recordUseLiveness(incomingVal, liveBlocks)) + return; + + for (auto *block : liveBlocks) { + occupiedBlocks.insert(block); + } + assert(occupiedBlocks.contains(incomingPred)); + coalescedPhi.coalescedOperands.push_back(incomingOper); +} + +// Record liveness generated by uses of \p incomingVal. +// +// Return true if no interference was detected along the way. +bool PhiStorageOptimizer::recordUseLiveness(SILValue incomingVal, + BasicBlockSetVector &liveBlocks) { + assert(liveBlocks.empty()); + + // Stop liveness traversal at defBB. + SILBasicBlock *defBB = incomingVal->getParentBlock(); + for (auto *use : incomingVal->getUses()) { + StackList liveBBWorklist(getFunction()); + + auto visitLiveBlock = [&](SILBasicBlock *liveBB) { + if (occupiedBlocks.contains(liveBB)) + return false; + + if (liveBlocks.insert(liveBB) && liveBB != defBB) { + liveBBWorklist.push_back(liveBB); + } + return true; + }; + if (!visitLiveBlock(use->getUser()->getParent())) + return false; + + while (!liveBBWorklist.empty()) { + auto *succBB = liveBBWorklist.pop_back_val(); + for (auto *predBB : succBB->getPredecessorBlocks()) { + if (!visitLiveBlock(predBB)) + return false; + } + } + } + return true; +} diff --git a/lib/SILOptimizer/Mandatory/PhiStorageOptimizer.h b/lib/SILOptimizer/Mandatory/PhiStorageOptimizer.h new file mode 100644 index 0000000000000..07bae9c484033 --- /dev/null +++ b/lib/SILOptimizer/Mandatory/PhiStorageOptimizer.h @@ -0,0 +1,51 @@ +//===--- PhiStorageOptimizer.h - Phi storage optimizer --------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +/// +/// This file defines PhiStorageOptimizer, a utility for use with the +/// mandatory AddressLowering pass. +/// +//===----------------------------------------------------------------------===// + +#include "AddressLowering.h" +#include "swift/SIL/SILArgument.h" +#include "swift/SIL/SILBasicBlock.h" +#include "swift/SIL/SILValue.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallVector.h" + +namespace swift { + +class CoalescedPhi { + friend class PhiStorageOptimizer; + + SmallVector coalescedOperands; + + CoalescedPhi(const CoalescedPhi &) = delete; + CoalescedPhi &operator=(const CoalescedPhi &) = delete; + +public: + CoalescedPhi() = default; + CoalescedPhi(CoalescedPhi &&) = default; + CoalescedPhi &operator=(CoalescedPhi &&) = default; + + void coalesce(PhiValue phi, const ValueStorageMap &valueStorageMap); + + bool empty() const { return coalescedOperands.empty(); } + + ArrayRef getCoalescedOperands() const { return coalescedOperands; } + + SILInstruction::OperandValueRange getCoalescedValues() const { + return SILInstruction::getOperandValues(getCoalescedOperands()); + } +}; + +} // namespace swift diff --git a/test/IRGen/opaque_values_irgen.sil b/test/IRGen/opaque_values_irgen.sil index 5ed4a8fd374d9..3e4d6f8d447f4 100644 --- a/test/IRGen/opaque_values_irgen.sil +++ b/test/IRGen/opaque_values_irgen.sil @@ -2,7 +2,7 @@ import Builtin -sil_stage canonical +sil_stage raw // CHECK: define hidden swiftcc void @f010_irgen_identity(%swift.opaque* noalias nocapture sret({{.*}}) %0, %swift.opaque* noalias nocapture %1, %swift.type* %T) // CHECK: entry: @@ -12,7 +12,7 @@ sil_stage canonical // CHECK: %{{.*}} = call %swift.opaque* %initializeWithTake(%swift.opaque* noalias %0, %swift.opaque* noalias %1, %swift.type* %T) // CHECK-NOT: call // CHECK: ret void -sil hidden @f010_irgen_identity : $@convention(thin) (@in T) -> @out T { -bb0(%0 : $T): +sil hidden [ossa] @f010_irgen_identity : $@convention(thin) (@in T) -> @out T { +bb0(%0 : @owned $T): return %0 : $T } diff --git a/test/SIL/Parser/opaque_values_parse.sil b/test/SIL/Parser/opaque_values_parse.sil index 750177c5696b1..2ca772047fdae 100644 --- a/test/SIL/Parser/opaque_values_parse.sil +++ b/test/SIL/Parser/opaque_values_parse.sil @@ -3,7 +3,7 @@ import Builtin import Swift -sil_stage canonical +sil_stage raw protocol Foo { func foo() diff --git a/test/SIL/Serialization/opaque_values_serialize.sil b/test/SIL/Serialization/opaque_values_serialize.sil index 376e5202f7b2b..976dd7683b82a 100644 --- a/test/SIL/Serialization/opaque_values_serialize.sil +++ b/test/SIL/Serialization/opaque_values_serialize.sil @@ -5,7 +5,7 @@ // RUN: %target-sil-opt %t/tmp.sib -enable-sil-opaque-values -verify -o %t/tmp.2.sib -module-name opaqueval // RUN: %target-sil-opt %t/tmp.2.sib -enable-sil-opaque-values -emit-sorted-sil -verify -module-name opaqueval | %FileCheck %s -sil_stage canonical +sil_stage raw import Builtin import Swift diff --git a/test/SIL/ownership-verifier/opaque_use_verifier.sil b/test/SIL/ownership-verifier/opaque_use_verifier.sil index 8475f38cd39dd..90c4131a3bced 100644 --- a/test/SIL/ownership-verifier/opaque_use_verifier.sil +++ b/test/SIL/ownership-verifier/opaque_use_verifier.sil @@ -5,7 +5,7 @@ // incorrectly. This is important to ensure that the verifier does not // regress. It should only deal with use matching of opaque types. -sil_stage canonical +sil_stage raw import Builtin diff --git a/test/SILOptimizer/address_lowering.sil b/test/SILOptimizer/address_lowering.sil index c4338e72b28eb..afcbbfb965fad 100644 --- a/test/SILOptimizer/address_lowering.sil +++ b/test/SILOptimizer/address_lowering.sil @@ -1,133 +1,175 @@ -// RUN: %target-sil-opt -address-lowering -enable-sil-opaque-values -emit-sorted-sil %s | %FileCheck %s +// RUN: %target-sil-opt -address-lowering -enable-sil-opaque-values -emit-sorted-sil -module-name Swift -sil-verify-all %s | %FileCheck %s +// +// The module name must be Swift so that declarations like Error are parsed as the correct loadable type. import Builtin -import Swift -sil_stage canonical -// CHECK: sil_stage lowered +sil_stage raw +typealias AnyObject = Builtin.AnyObject typealias Int = Builtin.Int64 +typealias Bool = Builtin.Int1 -// CHECK-LABEL: sil hidden @f010_addrlower_identity : $@convention(thin) (@in T) -> @out T { +public protocol C : AnyObject {} + +sil_default_witness_table C {} + +protocol P { + func foo() +} + +enum Optional { + case none + case some(T) +} + +protocol Error {} + +struct I {} + +struct SI { + var element: T + var index: I +} + +struct SRef { + var object: AnyObject + var element: T +} + +struct Pair { + var x : T + var y : T +} + +enum Mixed { + case i(Int) + case t(T) + case o(AnyObject) +}; + +precedencegroup ComparisonPrecedence { + assignment: true + associativity: right +} +infix operator <: ComparisonPrecedence +public protocol Comparable { + static func < (lhs: Self, rhs: Self) -> Bool +} + +sil [ossa] @takeGuaranteedObject : $@convention(thin) (@guaranteed AnyObject) -> () +sil [ossa] @takeIndirectClass : $@convention(thin) (@in_guaranteed C) -> () +sil [ossa] @takeTuple : $@convention(thin) <τ_0_0> (@in_guaranteed (τ_0_0, C)) -> () + +sil [ossa] @takeIn : $@convention(thin) (@in T) -> () +sil [ossa] @takeInGuaranteed : $@convention(thin) (@in_guaranteed T) -> () + +sil [ossa] @throwsError : $@convention(thin) (@in T) -> (@out T, @error Error) +sil [ossa] @returnInt : $@convention(thin) (@in T) -> (Int, @error Error) +sil [ossa] @returnIntOut : $@convention(thin) (@in T) -> (@out Int, @error Error) +sil [ossa] @returnTuple : $@convention(thin) (@in T) -> (@out T, Int, @out Int, @out T, @error Error) + +// CHECK-LABEL: sil [ossa] @f010_addrlower_identity : $@convention(thin) (@in T) -> @out T { // CHECK: bb0(%0 : $*T, %1 : $*T): // CHECK: copy_addr [take] %1 to [initialization] %0 : $*T // CHECK: return %{{.*}} : $() // CHECK-LABEL: } // end sil function 'f010_addrlower_identity' -sil hidden @f010_addrlower_identity : $@convention(thin) (@in T) -> @out T { -bb0(%0 : $T): +sil [ossa] @f010_addrlower_identity : $@convention(thin) (@in T) -> @out T { +bb0(%0 : @owned $T): return %0 : $T } - -sil hidden [noinline] @f020_multiResult : $@convention(thin) (@in T) -> (@out T, @out T, @out T) { -bb0(%0 : $T): +// CHECK-LABEL: sil [ossa] @f020_multiResult : $@convention(thin) (@in T) -> (@out T, @out T, @out T) { +// CHECK: %0 "$return_value" +// CHECK: %1 "$return_value" +// CHECK: %2 "$return_value" +// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T, %3 : $*T): +// CHECK: copy_addr %3 to [initialization] %1 : $*T +// CHECK: copy_addr %3 to [initialization] %2 : $*T +// CHECK: copy_addr [take] %3 to [initialization] %0 : $*T +// CHECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function 'f020_multiResult' +sil [ossa] @f020_multiResult : $@convention(thin) (@in T) -> (@out T, @out T, @out T) { +bb0(%0 : @owned $T): + %1 = copy_value %0 : $T %2 = copy_value %0 : $T - %3 = copy_value %0 : $T - %4 = copy_value %0 : $T - destroy_value %0 : $T - %6 = tuple (%2 : $T, %3 : $T, %4 : $T) + %6 = tuple (%0 : $T, %1 : $T, %2 : $T) return %6 : $(T, T, T) } // Test returning an opaque tuple of tuples as a concrete tuple. // The multiResult call is specialized, but the SIL result convention does not change. // --- -// CHECK-LABEL: sil @f021_callMultiResult : $@convention(thin) (Builtin.Int64) -> (Builtin.Int64, Builtin.Int64, Builtin.Int64) { +// CHECK-LABEL: sil [ossa] @f021_callMultiResult : $@convention(thin) (Builtin.Int64) -> (Builtin.Int64, Builtin.Int64, Builtin.Int64) { // CHECK: bb0(%0 : $Builtin.Int64): // CHECK: %[[FN:.*]] = function_ref @f020_multiResult : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) // CHECK: %[[IN:.*]] = alloc_stack $Builtin.Int64 -// CHECK: store %0 to %[[IN]] : $*Builtin.Int64 +// CHECK: store %0 to [trivial] %[[IN]] : $*Builtin.Int64 // CHECK: %[[OUT1:.*]] = alloc_stack $Builtin.Int64 // CHECK: %[[OUT2:.*]] = alloc_stack $Builtin.Int64 // CHECK: %[[OUT3:.*]] = alloc_stack $Builtin.Int64 // CHECK: %{{.*}} = apply %[[FN]](%[[OUT1]], %[[OUT2]], %[[OUT3]], %[[IN]]) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) -// CHECK: %[[R3:.*]] = load %[[OUT3]] : $*Builtin.Int64 +// CHECK: %[[R3:.*]] = load [trivial] %[[OUT3]] : $*Builtin.Int64 // CHECK: dealloc_stack %[[OUT3]] : $*Builtin.Int64 -// CHECK: %[[R2:.*]] = load %[[OUT2]] : $*Builtin.Int64 +// CHECK: %[[R2:.*]] = load [trivial] %[[OUT2]] : $*Builtin.Int64 // CHECK: dealloc_stack %[[OUT2]] : $*Builtin.Int64 -// CHECK: %[[R1:.*]] = load %[[OUT1]] : $*Builtin.Int64 +// CHECK: %[[R1:.*]] = load [trivial] %[[OUT1]] : $*Builtin.Int64 // CHECK: dealloc_stack %[[OUT1]] : $*Builtin.Int64 // CHECK: dealloc_stack %[[IN]] : $*Builtin.Int64 // CHECK: %[[R:.*]] = tuple (%[[R1]] : $Builtin.Int64, %[[R2]] : $Builtin.Int64, %[[R3]] : $Builtin.Int64) // CHECK: return %[[R]] : $(Builtin.Int64, Builtin.Int64, Builtin.Int64) // CHECK-LABEL: } // end sil function 'f021_callMultiResult' -sil @f021_callMultiResult : $@convention(thin) (Int) -> (Int, Int, Int) { +sil [ossa] @f021_callMultiResult : $@convention(thin) (Int) -> (Int, Int, Int) { bb0(%0 : $Int): %1 = function_ref @f020_multiResult : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) %2 = apply %1(%0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) - %3 = tuple_extract %2 : $(Int, Int, Int), 0 - %4 = tuple_extract %2 : $(Int, Int, Int), 1 - %5 = tuple_extract %2 : $(Int, Int, Int), 2 + (%3, %4, %5) = destructure_tuple %2 : $(Int, Int, Int) %6 = tuple (%3 : $Int, %4 : $Int, %5 : $Int) return %6 : $(Int, Int, Int) } -// CHECK-LABEL: sil @f030_returnPair : $@convention(thin) (@in T) -> (@out T, @out T) { +// CHECK-LABEL: sil [ossa] @f030_returnPair : $@convention(thin) (@in T) -> (@out T, @out T) { // CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T): -// CHECK: %[[LOCAL:.*]] = alloc_stack $T -// CHECK: copy_addr %2 to [initialization] %[[LOCAL]] : $*T -// CHECK: copy_addr [take] %[[LOCAL]] to [initialization] %0 : $*T -// CHECK: copy_addr [take] %2 to [initialization] %1 : $*T -// CHECK: %[[R:.*]] = tuple () -// CHECK: dealloc_stack %[[LOCAL]] : $*T -// CHECK: return %[[R]] : $() +// CHECK: copy_addr %2 to [initialization] %1 : $*T +// CHECK: copy_addr [take] %2 to [initialization] %0 : $*T +// CHECK: return %{{.*}} : $() // CHECK-LABEL: } // end sil function 'f030_returnPair' -sil @f030_returnPair : $@convention(thin) (@in T) -> (@out T, @out T) { -bb0(%0 : $T): - %2 = copy_value %0 : $T - %3 = tuple (%2 : $T, %0 : $T) +sil [ossa] @f030_returnPair : $@convention(thin) (@in T) -> (@out T, @out T) { +bb0(%0 : @owned $T): + %1 = copy_value %0 : $T + %3 = tuple (%0 : $T, %1 : $T) return %3 : $(T, T) } -// CHECK-LABEL: sil @f031_unusedIndirect : $@convention(thin) (@in T) -> @out T { +// CHECK-LABEL: sil [ossa] @f031_unusedIndirect : $@convention(thin) (@in T) -> @out T { // CHECK: bb0(%0 : $*T, %1 : $*T): // CHECK: %[[LOC0:.*]] = alloc_stack $T -// CHECK: %[[OUT1:.*]] = alloc_stack $T -// CHECK: %[[LOC1:.*]] = alloc_stack $T -// CHECK: %[[OUT2:.*]] = alloc_stack $T -// CHECK: %[[LOC2:.*]] = alloc_stack $T -// CHECK: // function_ref f030_returnPair // CHECK: %[[F:.*]] = function_ref @f030_returnPair : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0) -// CHECK: copy_addr %1 to [initialization] %[[LOC0]] : $*T -// CHECK: %[[R0:.*]] = apply %[[F]](%[[OUT1]], %[[OUT2]], %[[LOC0]]) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0) -// CHECK: copy_addr %[[OUT1]] to [initialization] %[[LOC1]] : $*T -// CHECK: copy_addr %[[OUT2]] to [initialization] %[[LOC2]] : $*T -// CHECK: destroy_addr %[[OUT1]] : $*T -// CHECK: destroy_addr %[[OUT2]] : $*T -// CHECK: destroy_addr %[[LOC1]] : $*T -// CHECK: destroy_addr %1 : $*T -// CHECK: copy_addr [take] %[[LOC2]] to [initialization] %0 : $*T -// CHECK: %[[R:.*]] = tuple () -// CHECK: dealloc_stack %[[LOC2]] : $*T -// CHECK: dealloc_stack %[[OUT2]] : $*T -// CHECK: dealloc_stack %[[LOC1]] : $*T -// CHECK: dealloc_stack %[[OUT1]] : $*T +// CHECK: %[[R0:.*]] = apply %[[F]](%[[LOC0]], %0, %1) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0) +// CHECK: destroy_addr %[[LOC0]] : $*T // CHECK: dealloc_stack %[[LOC0]] : $*T -// CHECK: return %[[R]] : $() +// CHECK: return %{{.*}} : $() // CHECK-LABEL: } // end sil function 'f031_unusedIndirect' -sil @f031_unusedIndirect : $@convention(thin) (@in T) -> @out T { -bb0(%0 : $T): +sil [ossa] @f031_unusedIndirect : $@convention(thin) (@in T) -> @out T { +bb0(%0 : @owned $T): %2 = function_ref @f030_returnPair : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0) - %3 = copy_value %0 : $T - %4 = apply %2(%3) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0) - %5 = tuple_extract %4 : $(T, T), 0 - %6 = copy_value %5 : $T - %7 = tuple_extract %4 : $(T, T), 1 - %8 = copy_value %7 : $T - destroy_value %4 : $(T, T) - destroy_value %6 : $T - destroy_value %0 : $T - return %8 : $T + %4 = apply %2(%0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0) + (%5, %6) = destructure_tuple %4 : $(T, T) + destroy_value %5 : $T + return %6 : $T } -sil hidden @f040_consumeArg : $@convention(thin) (@in T) -> () { -bb0(%0 : $T): +// CHECK-LABEL: sil [ossa] @f040_consumeArg : $@convention(thin) (@in T) -> () { +// CHECK: destroy_addr %0 : $*T +// CHECK-LABEL: } // end sil function 'f040_consumeArg' +sil [ossa] @f040_consumeArg : $@convention(thin) (@in T) -> () { +bb0(%0 : @owned $T): destroy_value %0 : $T %3 = tuple () return %3 : $() } -// CHECK-LABEL: sil @f041_opaqueArg : $@convention(thin) (@in T) -> () { +// CHECK-LABEL: sil [ossa] @f041_opaqueArg : $@convention(thin) (@in T) -> () { // CHECK: bb0(%0 : $*T): // CHECK: %[[LOC:.*]] = alloc_stack $T // CHECK: %[[FN:.*]] = function_ref @f040_consumeArg : $@convention(thin) <τ_0_0> (@in τ_0_0) -> () @@ -138,8 +180,8 @@ bb0(%0 : $T): // CHECK: dealloc_stack %[[LOC]] : $*T // CHECK: return %[[R]] : $() // CHECK-LABEL: } // end sil function 'f041_opaqueArg' -sil @f041_opaqueArg : $@convention(thin) (@in T) -> () { -bb0(%0 : $T): +sil [ossa] @f041_opaqueArg : $@convention(thin) (@in T) -> () { +bb0(%0 : @owned $T): %2 = function_ref @f040_consumeArg : $@convention(thin) <τ_0_0> (@in τ_0_0) -> () %3 = copy_value %0 : $T %4 = apply %2(%3) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> () @@ -148,83 +190,120 @@ bb0(%0 : $T): return %6 : $() } -// CHECK-LABEL: sil @f050_storeinout : $@convention(thin) (@inout T, @inout T, @in T) -> () { +// FIXME: Optimize transfers from indirect function args to indirect call args. +// Verify that a subsequent pass remove the temp allocation. +// +// CHECK-LABEL: sil [ossa] @f043_indirectGuaranteedArg : $@convention(thin) (@in C) -> () { +// CHECK: bb0(%0 : $*C): +// CHECK: [[LD:%.*]] = load [take] %0 : $*C +// CHECK: [[TMP:%.*]] = alloc_stack $C +// CHECK: [[B:%.*]] = begin_borrow [[LD]] : $C +// CHECK: [[SB:%.*]] = store_borrow [[B]] to [[TMP]] : $*C +// CHECK: apply %{{.*}}([[TMP]]) : $@convention(thin) (@in_guaranteed C) -> () +// CHECK: end_borrow [[B]] : $C +// CHECK: dealloc_stack [[TMP]] : $*C +// CHECK: destroy_value [[LD]] : $C +// CHECK-LABEL: } // end sil function 'f043_indirectGuaranteedArg' +sil [ossa] @f043_indirectGuaranteedArg : $@convention(thin) (@in C) -> () { +bb0(%0 : @owned $C): + %1 = function_ref @takeIndirectClass : $@convention(thin) (@in_guaranteed C) -> () + %2 = apply %1(%0) : $@convention(thin) (@in_guaranteed C) -> () + destroy_value %0 : $C + %6 = tuple () + return %6 : $() +} + +sil [ossa] @f044_indirectGuaranteed : $@convention(thin) (@in_guaranteed T) -> () + +// CHECK-LABEL: sil [ossa] @f045_indirectGuaranteedArg : $@convention(thin) (@in T) -> () { +// CHECK: bb0(%0 : $*T): +// CHECK: apply %{{.*}}(%0) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> () +// CHECK: destroy_addr %0 : $*T +// CHECK-LABEL: } // end sil function 'f045_indirectGuaranteedArg' +sil [ossa] @f045_indirectGuaranteedArg : $@convention(thin) (@in T) -> () { +bb0(%0 : @owned $T): + %1 = function_ref @f044_indirectGuaranteed : $@convention(thin) <τ_0_0>(@in_guaranteed τ_0_0) -> () + %2 = apply %1(%0) : $@convention(thin) <τ_0_0>(@in_guaranteed τ_0_0) -> () + destroy_value %0 : $T + %6 = tuple () + return %6 : $() +} + +// CHECK-LABEL: sil [ossa] @f050_storeinout : $@convention(thin) (@inout T, @inout T, @in T) -> () { // CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T): -// CHECK: %[[ARG1:.*]] = alloc_stack $T // CHECK: %[[PREV1:.*]] = alloc_stack $T -// CHECK: %[[ARG2:.*]] = alloc_stack $T // CHECK: %[[PREV2:.*]] = alloc_stack $T -// CHECK: debug_value %0 : $*T, var, name "t", argno 1, expr op_deref -// CHECK: debug_value %1 : $*T, var, name "u", argno 2, expr op_deref -// CHECK: debug_value %2 : $*T, {{.*}} expr op_deref -// CHECK: copy_addr %2 to [initialization] %[[ARG1]] : $*T +// CHECK: debug_value %0 : $*T, var, name "t", argno 1 +// CHECK: debug_value %1 : $*T, var, name "u", argno 2 +// CHECK: debug_value %2 : $*T // CHECK: copy_addr [take] %0 to [initialization] %[[PREV1]] : $*T -// CHECK: copy_addr [take] %[[ARG1]] to [initialization] %0 : $*T +// CHECK: copy_addr %2 to [initialization] %0 : $*T // CHECK: destroy_addr %[[PREV1]] : $*T -// CHECK: copy_addr %2 to [initialization] %[[ARG2]] : $*T -// CHECK: copy_addr [take] %1 to [initialization] %[[PREV2]] : $*T -// CHECK: copy_addr [take] %[[ARG2]] to [initialization] %1 : $*T +// CHECK: copy_addr %1 to [initialization] %[[PREV2]] : $*T +// CHECK: copy_addr %2 to %1 : $*T // CHECK: destroy_addr %[[PREV2]] : $*T // CHECK: destroy_addr %2 : $*T -// CHECK: %[[R:.*]] = tuple () // CHECK: dealloc_stack %[[PREV2]] : $*T -// CHECK: dealloc_stack %[[ARG2]] : $*T // CHECK: dealloc_stack %[[PREV1]] : $*T -// CHECK: dealloc_stack %[[ARG1]] : $*T -// CHECK: return %[[R]] : $() +// CHECK: return %{{.*}} : $() // CHECK-LABEL: } // end sil function 'f050_storeinout' -sil @f050_storeinout : $@convention(thin) (@inout T, @inout T, @in T) -> () { -bb0(%0 : $*T, %1 : $*T, %2 : $T): +sil [ossa] @f050_storeinout : $@convention(thin) (@inout T, @inout T, @in T) -> () { +bb0(%0 : $*T, %1 : $*T, %2 : @owned $T): debug_value %0 : $*T, var, name "t", argno 1, expr op_deref debug_value %1 : $*T, var, name "u", argno 2, expr op_deref debug_value %2 : $T, let, name "x", argno 3 + %7 = load [take] %0 : $*T %6 = copy_value %2 : $T - %7 = load %0 : $*T - store %6 to %0 : $*T + store %6 to [init] %0 : $*T destroy_value %7 : $T + %11 = load [copy] %1 : $*T %10 = copy_value %2 : $T - %11 = load %1 : $*T - store %10 to %1 : $*T + store %10 to [assign] %1 : $*T destroy_value %11 : $T destroy_value %2 : $T %15 = tuple () return %15 : $() } -sil hidden @f060_mutate : $@convention(thin) (@inout T, @in T) -> () { -bb0(%0 : $*T, %1 : $T): +// CHECK-LABEL: sil [ossa] @f060_mutate : $@convention(thin) (@inout T, @in T) -> () { +// CHECK: bb0(%0 : $*T, %1 : $*T): +// CHECK: [[A0:%.*]] = alloc_stack $T +// CHECK: copy_addr [take] %0 to [initialization] [[A0]] : $*T +// CHECK: copy_addr %1 to [initialization] %0 : $*T +// CHECK: destroy_addr [[A0]] : $*T +// CHECK: destroy_addr %1 : $*T +// CHECK: dealloc_stack [[A0]] : $*T +// CHECK-LABEL: } // end sil function 'f060_mutate' +sil [ossa] @f060_mutate : $@convention(thin) (@inout T, @in T) -> () { +bb0(%0 : $*T, %1 : @owned $T): %4 = copy_value %1 : $T - %5 = load %0 : $*T - store %4 to %0 : $*T + %5 = load [take] %0 : $*T + store %4 to [init] %0 : $*T destroy_value %5 : $T destroy_value %1 : $T %9 = tuple () return %9 : $() } -// CHECK-LABEL: sil @f061_callinout : $@convention(thin) (@in T) -> () { +// CHECK-LABEL: sil [ossa] @f061_callinout : $@convention(thin) (@in T) -> () { // CHECK: bb0(%0 : $*T): // CHECK: %[[LOC1:.*]] = alloc_stack $T -// CHECK: %[[LOC2:.*]] = alloc_stack $T // CHECK: %[[INOUT:.*]] = alloc_stack $T, var, name "u" -// CHECK: copy_addr %0 to [initialization] %[[LOC1]] : $*T -// CHECK: copy_addr [take] %[[LOC1]] to [initialization] %[[INOUT]] : $*T +// CHECK: copy_addr %0 to [initialization] %[[INOUT]] : $*T // CHECK: %[[FN:.*]] = function_ref @f060_mutate : $@convention(thin) <τ_0_0> (@inout τ_0_0, @in τ_0_0) -> () -// CHECK: copy_addr %0 to [initialization] %[[LOC2]] : $*T -// CHECK: %{{.*}} = apply %[[FN]](%[[INOUT]], %[[LOC2]]) : $@convention(thin) <τ_0_0> (@inout τ_0_0, @in τ_0_0) -> () +// CHECK: copy_addr %0 to [initialization] %[[LOC1]] : $*T +// CHECK: %{{.*}} = apply %[[FN]](%[[INOUT]], %[[LOC1]]) : $@convention(thin) <τ_0_0> (@inout τ_0_0, @in τ_0_0) -> () // CHECK: destroy_addr %[[INOUT]] : $*T // CHECK: destroy_addr %0 : $*T -// CHECK: %[[R:.*]] = tuple () // CHECK: dealloc_stack %[[INOUT]] : $*T -// CHECK: dealloc_stack %[[LOC2]] : $*T // CHECK: dealloc_stack %[[LOC1]] : $*T -// CHECK: return %[[R]] : $() +// CHECK: return %{{.*}} : $() // CHECK-LABEL: } // end sil function 'f061_callinout' -sil @f061_callinout : $@convention(thin) (@in T) -> () { -bb0(%0 : $T): +sil [ossa] @f061_callinout : $@convention(thin) (@in T) -> () { +bb0(%0 : @owned $T): %1 = alloc_stack $T, var, name "u" %3 = copy_value %0 : $T - store %3 to %1 : $*T + store %3 to [init] %1 : $*T %5 = function_ref @f060_mutate : $@convention(thin) <τ_0_0> (@inout τ_0_0, @in τ_0_0) -> () %6 = copy_value %0 : $T %7 = apply %5(%1, %6) : $@convention(thin) <τ_0_0> (@inout τ_0_0, @in τ_0_0) -> () @@ -235,242 +314,233 @@ bb0(%0 : $T): return %10 : $() } -public protocol C : class {} - -// CHECK-LABEL: sil @f070_mixedResult1 : $@convention(thin) (@in T, @owned C) -> (@out T, @owned C) { -// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $C): +// CHECK-LABEL: sil [ossa] @f070_mixedResult1 : $@convention(thin) (@in T, @owned C) -> (@out T, @owned C) { +// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : @owned $C): // CHECK: copy_addr [take] %1 to [initialization] %0 : $*T // CHECK: return %2 : $C // CHECK-LABEL: } // end sil function 'f070_mixedResult1' -sil @f070_mixedResult1 : $@convention(thin) (@in T, @owned C) -> (@out T, @owned C) { -bb0(%0 : $T, %1 : $C): +sil [ossa] @f070_mixedResult1 : $@convention(thin) (@in T, @owned C) -> (@out T, @owned C) { +bb0(%0 : @owned $T, %1 : @owned $C): %4 = tuple (%0 : $T, %1 : $C) return %4 : $(T, C) } -// CHECK-LABEL: sil @f071_mixedResult2 : $@convention(thin) (@in T, @owned C) -> (@out T, @out T, @owned C, @owned C) { -// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T, %3 : $C): -// CHECK: %[[L:.*]] = alloc_stack $T -// CHECK: copy_addr %2 to [initialization] %[[L]] : $*T -// CHECK: strong_retain %3 : $C -// CHECK: copy_addr [take] %[[L]] to [initialization] %0 : $*T +// CHECK-LABEL: sil [ossa] @f071_mixedResult2 : $@convention(thin) (@in T, @owned C) -> (@out T, @out T, @owned C, @owned C) { +// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T, %3 : @owned $C): +// CHECK: copy_addr %2 to [initialization] %0 : $*T +// CHECK: [[C:%.*]] = copy_value %3 : $C // CHECK: copy_addr [take] %2 to [initialization] %1 : $*T -// CHECK: %[[T:.*]] = tuple (%3 : $C, %3 : $C) -// CHECK: dealloc_stack %[[L]] : $*T -// CHECK: return %[[T]] : $(C, C) +// CHECK: [[T:%.*]] = tuple ([[C]] : $C, %3 : $C) +// CHECK: return [[T]] : $(C, C) // CHECK-LABEL: } // end sil function 'f071_mixedResult2' -sil @f071_mixedResult2 : $@convention(thin) (@in T, @owned C) -> (@out T, @out T, @owned C, @owned C) { -bb0(%0 : $T, %1 : $C): +sil [ossa] @f071_mixedResult2 : $@convention(thin) (@in T, @owned C) -> (@out T, @out T, @owned C, @owned C) { +bb0(%0 : @owned $T, %1 : @owned $C): %4 = copy_value %0 : $T - strong_retain %1 : $C - %6 = tuple (%4 : $T, %0 : $T, %1 : $C, %1 : $C) + %5 = copy_value %1 : $C + %6 = tuple (%4 : $T, %0 : $T, %5 : $C, %1 : $C) return %6 : $(T, T, C, C) } -// CHECK-LABEL: sil @f072_callMixedResult1 : $@convention(thin) (@in T, @owned C) -> (@out T, @owned C) { -// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $C): -// CHECK: %[[LIN:.*]] = alloc_stack $T -// CHECK: %[[OUT:.*]] = alloc_stack $T -// CHECK: %[[LOUT:.*]] = alloc_stack $T +// CHECK-LABEL: sil [ossa] @f072_callMixedResult1 : $@convention(thin) (@in T, @owned C) -> (@out T, @owned C) { +// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : @owned $C): +// CHECK: [[IN:%.*]] = alloc_stack $T // CHECK: // function_ref f070_mixedResult1 -// CHECK: %[[F:.*]] = function_ref @f070_mixedResult1 : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @owned C) -// CHECK: copy_addr %1 to [initialization] %[[LIN]] : $*T -// CHECK: strong_retain %2 : $C -// CHECK: %[[R:.*]] = apply %[[F]](%[[OUT]], %[[LIN]], %2) : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @owned C) -// CHECK: copy_addr %[[OUT]] to [initialization] %[[LOUT]] : $*T -// CHECK: strong_retain %[[R]] : $C -// CHECK: destroy_addr %[[OUT]] : $*T -// CHECK: strong_release %[[R]] : $C -// CHECK: strong_release %2 : $C +// CHECK: [[F:%.*]] = function_ref @f070_mixedResult1 : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @owned C) +// CHECK: copy_addr %1 to [initialization] [[IN]] : $*T +// CHECK: [[C:%.*]] = copy_value %2 : $C +// CHECK: [[R:%.*]] = apply [[F]](%0, [[IN]], [[C]]) : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @owned C) +// CHECK: destroy_value %2 : $C // CHECK: destroy_addr %1 : $*T -// CHECK: copy_addr [take] %[[LOUT]] to [initialization] %0 : $*T -// CHECK: dealloc_stack %[[LOUT]] : $*T -// CHECK: dealloc_stack %[[OUT]] : $*T -// CHECK: dealloc_stack %[[LIN]] : $*T -// CHECK: return %[[R]] : $C +// CHECK: dealloc_stack [[IN]] : $*T +// CHECK: return [[R]] : $C // CHECK-LABEL: } // end sil function 'f072_callMixedResult1' -sil @f072_callMixedResult1 : $@convention(thin) (@in T, @owned C) -> (@out T, @owned C) { -bb0(%0 : $T, %1 : $C): +sil [ossa] @f072_callMixedResult1 : $@convention(thin) (@in T, @owned C) -> (@out T, @owned C) { +bb0(%0 : @owned $T, %1 : @owned $C): %4 = function_ref @f070_mixedResult1 : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @owned C) %5 = copy_value %0 : $T - strong_retain %1 : $C - %7 = apply %4(%5, %1) : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @owned C) - %8 = tuple_extract %7 : $(T, C), 0 - %9 = copy_value %8 : $T - %10 = tuple_extract %7 : $(T, C), 1 - strong_retain %10 : $C - destroy_value %7 : $(T, C) - strong_release %1 : $C + %6 = copy_value %1 : $C + %7 = apply %4(%5, %6) : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @owned C) + (%8, %9) = destructure_tuple %7 : $(T, C) + destroy_value %1 : $C destroy_value %0 : $T - %15 = tuple (%9 : $T, %10 : $C) + %15 = tuple (%8 : $T, %9 : $C) return %15 : $(T, C) } -// CHECK-LABEL: sil @f073_callMixedResult2 : $@convention(thin) (@in T, @owned C) -> (@out T, @out T, @owned C, @owned C) { -// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T, %3 : $C): -// CHECK: %[[LOC0:.*]] = alloc_stack $T -// CHECK: %[[OUT1:.*]] = alloc_stack $T -// CHECK: %[[LOC1:.*]] = alloc_stack $T -// CHECK: %[[OUT2:.*]] = alloc_stack $T -// CHECK: %[[LOC2:.*]] = alloc_stack $T -// CHECK: %[[F:.*]] = function_ref @f071_mixedResult2 : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @out τ_0_0, @owned C, @owned C) -// CHECK: copy_addr %2 to [initialization] %[[LOC0]] : $*T -// CHECK: strong_retain %3 : $C -// CHECK: %[[R:.*]] = apply %[[F]](%[[OUT1]], %[[OUT2]], %[[LOC0]], %3) : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @out τ_0_0, @owned C, @owned C) -// CHECK: %[[T2:.*]] = tuple_extract %[[R]] : $(C, C), 1 -// CHECK: %[[T1:.*]] = tuple_extract %[[R]] : $(C, C), 0 -// CHECK: copy_addr %[[OUT1]] to [initialization] %[[LOC1]] : $*T -// CHECK: copy_addr %[[OUT2]] to [initialization] %[[LOC2]] : $*T -// CHECK: strong_retain %[[T1]] : $C -// CHECK: strong_retain %[[T2]] : $C -// CHECK: destroy_addr %[[OUT1]] : $*T -// CHECK: destroy_addr %[[OUT2]] : $*T -// CHECK: strong_release %[[T1]] : $C -// CHECK: strong_release %[[T2]] : $C -// CHECK: strong_release %3 : $C +// CHECK-LABEL: sil [ossa] @f073_callMixedResult2 : $@convention(thin) (@in T, @owned C) -> (@out T, @out T, @owned C, @owned C) { +// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T, %3 : @owned $C): +// CHECK: [[IN:%.*]] = alloc_stack $T +// CHECK: [[F:%.*]] = function_ref @f071_mixedResult2 : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @out τ_0_0, @owned C, @owned C) +// CHECK: copy_addr %2 to [initialization] [[IN]] : $*T +// CHECK: [[C:%.*]] = copy_value %3 : $C +// CHECK: [[T:%.*]] = apply [[F]](%0, %1, [[IN]], [[C]]) : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @out τ_0_0, @owned C, @owned C) +// CHECK: ([[OUT0:%.*]], [[OUT1:%.*]]) = destructure_tuple [[T]] : $(C, C) +// CHECK: destroy_value %3 : $C // CHECK: destroy_addr %2 : $*T -// CHECK: copy_addr [take] %[[LOC1]] to [initialization] %0 : $*T -// CHECK: copy_addr [take] %[[LOC2]] to [initialization] %1 : $*T -// CHECK: %[[T:.*]] = tuple (%[[T1]] : $C, %[[T2]] : $C) -// CHECK: dealloc_stack %[[LOC2]] : $*T -// CHECK: dealloc_stack %[[OUT2]] : $*T -// CHECK: dealloc_stack %[[LOC1]] : $*T -// CHECK: dealloc_stack %[[OUT1]] : $*T -// CHECK: dealloc_stack %[[LOC0]] : $*T -// CHECK: return %[[T]] : $(C, C) +// CHECK: [[R:%.*]] = tuple ([[OUT0]] : $C, [[OUT1]] : $C) +// CHECK: dealloc_stack [[IN]] : $*T +// CHECK: return [[R]] : $(C, C) // CHECK-LABEL: } // end sil function 'f073_callMixedResult2' -sil @f073_callMixedResult2 : $@convention(thin) (@in T, @owned C) -> (@out T, @out T, @owned C, @owned C) { -bb0(%0 : $T, %1 : $C): +sil [ossa] @f073_callMixedResult2 : $@convention(thin) (@in T, @owned C) -> (@out T, @out T, @owned C, @owned C) { +bb0(%0 : @owned $T, %1 : @owned $C): %4 = function_ref @f071_mixedResult2 : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @out τ_0_0, @owned C, @owned C) %5 = copy_value %0 : $T - strong_retain %1 : $C - %7 = apply %4(%5, %1) : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @out τ_0_0, @owned C, @owned C) - %8 = tuple_extract %7 : $(T, T, C, C), 0 - %9 = copy_value %8 : $T - %10 = tuple_extract %7 : $(T, T, C, C), 1 - %11 = copy_value %10 : $T - %12 = tuple_extract %7 : $(T, T, C, C), 2 - strong_retain %12 : $C - %14 = tuple_extract %7 : $(T, T, C, C), 3 - strong_retain %14 : $C - destroy_value %7 : $(T, T, C, C) - strong_release %1 : $C + %6 = copy_value %1 : $C + %7 = apply %4(%5, %6) : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @out τ_0_0, @owned C, @owned C) + (%8, %9, %10, %11) = destructure_tuple %7 : $(T, T, C, C) + destroy_value %1 : $C destroy_value %0 : $T - %19 = tuple (%9 : $T, %11 : $T, %12 : $C, %14 : $C) + %19 = tuple (%8 : $T, %9 : $T, %10 : $C, %11 : $C) return %19 : $(T, T, C, C) } -sil_default_witness_table C {} +sil [ossa] @returnMixedResult3 : $@convention(thin) () -> (@out T, @out C, @owned C) -enum Optional { - case none - case some(T) +// Example directly from the comments in +// ApplyRewriter::convertApplyWithIndirectResults() +// +// FIXME: verify that a subsequent pass remove the temp allocation. +// +// CHECK-LABEL: sil [ossa] @f074_callMixedResult3 : $@convention(thin) () -> (@out T, @out C, @owned C) { +// CHECK: bb0(%0 : $*T, %1 : $*C): +// CHECK: [[OUT1:%.*]] = alloc_stack $C +// CHECK: [[OUT2:%.*]] = apply %{{.*}}(%0, [[OUT1]]) : $@convention(thin) <τ_0_0> () -> (@out τ_0_0, @out C, @owned C) +// CHECK: [[LD:%.*]] = load [take] [[OUT1]] : $*C +// CHECK: dealloc_stack [[OUT1]] : $*C +// CHECK: store [[LD]] to [init] %1 : $*C +// CHECK: return [[OUT2]] : $C +// CHECK-LABEL: } // end sil function 'f074_callMixedResult3' +sil [ossa] @f074_callMixedResult3 : $@convention(thin) () -> (@out T, @out C, @owned C) { +bb0: + %0 = function_ref @returnMixedResult3 : $@convention(thin) () -> (@out T, @out C, @owned C) + %1 = apply %0() : $@convention(thin) () -> (@out T, @out C, @owned C) + (%2, %3, %4) = destructure_tuple %1 : $(T, C, C) + %5 = tuple (%2 : $T, %3 : $C, %4 : $C) + return %5 : $(T, C, C) +} + +// CHECK-LABEL: sil [ossa] @f075_reusedResult : $@convention(thin) (@in T, @owned C) -> (@out T, @owned C) { +// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : @owned $C): +// CHECK: [[TUPLE:%.*]] = alloc_stack $(T, C) +// CHECK: [[E1:%.*]] = tuple_element_addr [[TUPLE]] : $*(T, C), 0 +// CHECK: copy_addr [take] %1 to [initialization] [[E1]] : $*T +// CHECK: [[E2:%.*]] = tuple_element_addr [[TUPLE]] : $*(T, C), 1 +// CHECK: store %2 to [init] [[E2]] : $*C +// CHECK: apply %{{.*}}([[TUPLE]]) : $@convention(thin) <τ_0_0> (@in_guaranteed (τ_0_0, C)) -> () +// CHECK: [[E1:%.*]] = tuple_element_addr [[TUPLE]] : $*(T, C), 0 +// CHECK: [[E2:%.*]] = tuple_element_addr [[TUPLE]] : $*(T, C), 1 +// CHECK: [[LD:%.*]] = load [take] [[E2]] : $*C +// CHECK: copy_addr [take] [[E1]] to [initialization] %0 : $*T +// CHECK: dealloc_stack [[TUPLE]] : $*(T, C) +// CHECK: return [[LD]] : $C +// CHECK-LABEL: } // end sil function 'f075_reusedResult' +sil [ossa] @f075_reusedResult : $@convention(thin) (@in T, @owned C) -> (@out T, @owned C) { +bb0(%0 : @owned $T, %1 : @owned $C): + %2 = tuple (%0 : $T, %1 : $C) + %f = function_ref @takeTuple : $@convention(thin) <τ_0_0> (@in_guaranteed (τ_0_0, C)) -> () + %c = apply %f(%2) : $@convention(thin) <τ_0_0> (@in_guaranteed (τ_0_0, C)) -> () + return %2 : $(T, C) } -// CHECK-LABEL: sil @f080_optional : $@convention(thin) (@in T) -> @out Optional { +// CHECK-LABEL: sil [ossa] @f080_optional : $@convention(thin) (@in T) -> @out Optional { // CHECK: bb0(%0 : $*Optional, %1 : $*T): -// CHECK: %[[L1:.*]] = alloc_stack $T -// CHECK: %[[L2:.*]] = alloc_stack $Optional -// CHECK: copy_addr %1 to [initialization] %[[L1]] : $*T -// CHECK: %[[DATA:.*]] = init_enum_data_addr %[[L2]] : $*Optional, #Optional.some!enumelt -// CHECK: copy_addr [take] %[[L1]] to [initialization] %[[DATA]] : $*T -// CHECK: inject_enum_addr %[[L2]] : $*Optional, #Optional.some!enumelt +// CHECK: [[DATA:%.*]] = init_enum_data_addr %0 : $*Optional, #Optional.some!enumelt +// CHECK: copy_addr %1 to [initialization] [[DATA]] : $*T +// CHECK: inject_enum_addr %0 : $*Optional, #Optional.some!enumelt // CHECK: destroy_addr %1 : $*T -// CHECK: copy_addr [take] %[[L2]] to [initialization] %0 : $*Optional -// CHECK: %[[T:.*]] = tuple () -// CHECK: dealloc_stack %[[L2]] : $*Optional -// CHECK: dealloc_stack %[[L1]] : $*T -// CHECK: return %[[T]] : $() +// CHECK: return %{{.*}} : $() // CHECK-LABEL: } // end sil function 'f080_optional' -sil @f080_optional : $@convention(thin) (@in T) -> @out Optional { -bb0(%0 : $T): +sil [ossa] @f080_optional : $@convention(thin) (@in T) -> @out Optional { +bb0(%0 : @owned $T): %cpy = copy_value %0 : $T %opt = enum $Optional, #Optional.some!enumelt, %cpy : $T destroy_value %0 : $T return %opt : $Optional } -// CHECK-LABEL: sil @f090_tupletuple : $@convention(thin) ((Builtin.Int64, Builtin.Int64), Builtin.Int64) -> (@out (Builtin.Int64, Builtin.Int64), @out (Builtin.Int64, Builtin.Int64), Builtin.Int64, Builtin.Int64) { +// CHECK-LABEL: sil [ossa] @f081_unwrap : $@convention(thin) (@in Optional) -> () { +// CHECK: bb0(%0 : $*Optional): +// CHECK: [[A:%.*]] = unchecked_take_enum_data_addr %0 : $*Optional, #Optional.some!enumelt +// CHECK: apply %{{.*}}([[A]]) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> () +// CHECK-LABEL: } // end sil function 'f081_unwrap' +sil [ossa] @f081_unwrap : $@convention(thin) (@in Optional) -> () { +bb0(%0 : @owned $Optional): + %d = unchecked_enum_data %0 : $Optional, #Optional.some!enumelt + %f = function_ref @takeIn : $@convention(thin) <τ_0_0> (@in τ_0_0) -> () + %call = apply %f(%d) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> () + %4 = tuple () + return %4 : $() +} + +// CHECK-LABEL: sil [ossa] @f090_tupletuple : $@convention(thin) ((Builtin.Int64, Builtin.Int64), Builtin.Int64) -> (@out (Builtin.Int64, Builtin.Int64), @out (Builtin.Int64, Builtin.Int64), Builtin.Int64, Builtin.Int64) { // CHECK: bb0(%0 : $*(Builtin.Int64, Builtin.Int64), %1 : $*(Builtin.Int64, Builtin.Int64), %2 : $(Builtin.Int64, Builtin.Int64), %3 : $Builtin.Int64): -// CHECK: store %2 to %0 : $*(Builtin.Int64, Builtin.Int64) -// CHECK: store %2 to %1 : $*(Builtin.Int64, Builtin.Int64) +// CHECK: store %2 to [trivial] %0 : $*(Builtin.Int64, Builtin.Int64) +// CHECK: store %2 to [trivial] %1 : $*(Builtin.Int64, Builtin.Int64) // CHECK: %[[T:.*]] = tuple (%3 : $Builtin.Int64, %3 : $Builtin.Int64) // CHECK: return %[[T]] : $(Builtin.Int64, Builtin.Int64) // CHECK-LABEL: } // end sil function 'f090_tupletuple' -sil @f090_tupletuple : $@convention(thin) ((Int, Int), Int) -> (@out (Int, Int), @out (Int, Int), Int, Int) { +sil [ossa] @f090_tupletuple : $@convention(thin) ((Int, Int), Int) -> (@out (Int, Int), @out (Int, Int), Int, Int) { bb0(%0 : $(Int, Int), %1 : $Int): %2 = tuple (%0 : $(Int, Int), %0 : $(Int, Int), %1 : $Int, %1 : $Int) return %2 : $((Int, Int), (Int, Int), Int, Int) } -// CHECK-LABEL: sil @f091_callTuple : $@convention(thin) (Builtin.Int64) -> (Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64) { +// CHECK-LABEL: sil [ossa] @f091_callTuple : $@convention(thin) (Builtin.Int64) -> (Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64) { // CHECK: bb0(%0 : $Builtin.Int64): -// CHECK: %[[T1:.*]] = tuple (%0 : $Builtin.Int64, %0 : $Builtin.Int64) -// CHECK: %[[F:.*]] = function_ref @f090_tupletuple : $@convention(thin) ((Builtin.Int64, Builtin.Int64), Builtin.Int64) -> (@out (Builtin.Int64, Builtin.Int64), @out (Builtin.Int64, Builtin.Int64), Builtin.Int64, Builtin.Int64) -// CHECK: %[[O1:.*]] = alloc_stack $(Builtin.Int64, Builtin.Int64) -// CHECK: %[[O2:.*]] = alloc_stack $(Builtin.Int64, Builtin.Int64) -// CHECK: %[[RT:.*]] = apply %[[F]](%[[O1]], %4, %1, %0) : $@convention(thin) ((Builtin.Int64, Builtin.Int64), Builtin.Int64) -> (@out (Builtin.Int64, Builtin.Int64), @out (Builtin.Int64, Builtin.Int64), Builtin.Int64, Builtin.Int64) -// CHECK: %[[R1:.*]] = tuple_extract %[[RT]] : $(Builtin.Int64, Builtin.Int64), 1 -// CHECK: %[[R0:.*]] = tuple_extract %[[RT]] : $(Builtin.Int64, Builtin.Int64), 0 -// CHECK: %[[L2:.*]] = load %[[O2]] : $*(Builtin.Int64, Builtin.Int64) -// CHECK: dealloc_stack %[[O2]] : $*(Builtin.Int64, Builtin.Int64) -// CHECK: %[[L1:.*]] = load %[[O1]] : $*(Builtin.Int64, Builtin.Int64) -// CHECK: dealloc_stack %[[O1]] : $*(Builtin.Int64, Builtin.Int64) -// CHECK: %[[E10:.*]] = tuple_extract %[[L1]] : $(Builtin.Int64, Builtin.Int64), 0 -// CHECK: %[[E11:.*]] = tuple_extract %[[L1]] : $(Builtin.Int64, Builtin.Int64), 1 -// CHECK: %[[E20:.*]] = tuple_extract %[[L2]] : $(Builtin.Int64, Builtin.Int64), 0 -// CHECK: %[[E21:.*]] = tuple_extract %[[L2]] : $(Builtin.Int64, Builtin.Int64), 1 -// CHECK: %[[RET:.*]] = tuple (%[[E10]] : $Builtin.Int64, %[[E11]] : $Builtin.Int64, %[[E20]] : $Builtin.Int64, %[[E21]] : $Builtin.Int64, %[[R0]] : $Builtin.Int64, %[[R1]] : $Builtin.Int64) -// CHECK: return %[[RET]] : $(Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64) +// CHECK: [[T1:%.*]] = tuple (%0 : $Builtin.Int64, %0 : $Builtin.Int64) +// CHECK: [[F:%.*]] = function_ref @f090_tupletuple : $@convention(thin) ((Builtin.Int64, Builtin.Int64), Builtin.Int64) -> (@out (Builtin.Int64, Builtin.Int64), @out (Builtin.Int64, Builtin.Int64), Builtin.Int64, Builtin.Int64) +// CHECK: [[O1:%.*]] = alloc_stack $(Builtin.Int64, Builtin.Int64) +// CHECK: [[O2:%.*]] = alloc_stack $(Builtin.Int64, Builtin.Int64) +// CHECK: [[R:%.*]] = apply [[F]]([[O1]], [[O2]], %1, %0) : $@convention(thin) ((Builtin.Int64, Builtin.Int64), Builtin.Int64) -> (@out (Builtin.Int64, Builtin.Int64), @out (Builtin.Int64, Builtin.Int64), Builtin.Int64, Builtin.Int64) +// CHECK: [[L2:%.*]] = load [trivial] [[O2]] : $*(Builtin.Int64, Builtin.Int64) +// CHECK: dealloc_stack [[O2]] : $*(Builtin.Int64, Builtin.Int64) +// CHECK: [[L1:%.*]] = load [trivial] [[O1]] : $*(Builtin.Int64, Builtin.Int64) +// CHECK: dealloc_stack [[O1]] : $*(Builtin.Int64, Builtin.Int64) +// CHECK: ([[R4:%.*]], [[R5:%.*]]) = destructure_tuple [[R]] : $(Builtin.Int64, Builtin.Int64) +// CHECK: ([[R0:%.*]], [[R1:%.*]]) = destructure_tuple [[L1]] : $(Builtin.Int64, Builtin.Int64) +// CHECK: ([[R2:%.*]], [[R3:%.*]]) = destructure_tuple [[L2]] : $(Builtin.Int64, Builtin.Int64) +// CHECK: [[RET:%.*]] = tuple ([[R0]] : $Builtin.Int64, [[R1]] : $Builtin.Int64, [[R2]] : $Builtin.Int64, [[R3]] : $Builtin.Int64, [[R4]] : $Builtin.Int64, [[R5]] : $Builtin.Int64) +// CHECK: return [[RET]] : $(Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64) // CHECK-LABEL: } // end sil function 'f091_callTuple' -sil @f091_callTuple : $@convention(thin) (Int) -> (Int, Int, Int, Int, Int, Int) { +sil [ossa] @f091_callTuple : $@convention(thin) (Int) -> (Int, Int, Int, Int, Int, Int) { bb0(%0: $Int): %1 = tuple (%0 : $Int, %0 : $Int) %2 = function_ref @f090_tupletuple : $@convention(thin) ((Int, Int), Int) -> (@out (Int, Int), @out (Int, Int), Int, Int) %3 = apply %2(%1, %0) : $@convention(thin) ((Int, Int), Int) -> (@out (Int, Int), @out (Int, Int), Int, Int) - %9 = tuple_extract %3 : $((Int, Int), (Int, Int), Int, Int), 0 - %10 = tuple_extract %3 : $((Int, Int), (Int, Int), Int, Int), 1 - %11 = tuple_extract %3 : $((Int, Int), (Int, Int), Int, Int), 2 - %12 = tuple_extract %3 : $((Int, Int), (Int, Int), Int, Int), 3 - %13 = tuple_extract %9 : $(Int, Int), 0 - %14 = tuple_extract %9 : $(Int, Int), 1 - %15 = tuple_extract %10 : $(Int, Int), 0 - %16 = tuple_extract %10 : $(Int, Int), 1 - %17 = tuple (%13 : $Int, %14 : $Int, %15 : $Int, %16 : $Int, %11 : $Int, %12 : $Int) - return %17 : $(Int, Int, Int, Int, Int, Int) -} - -// CHECK-LABEL: sil hidden @f100_any : $@convention(thin) (@in Any) -> () { + (%4, %5, %6, %7) = destructure_tuple %3 : $((Int, Int), (Int, Int), Int, Int) + (%8, %9) = destructure_tuple %4 : $(Int, Int) + (%10, %11) = destructure_tuple %5 : $(Int, Int) + %12 = tuple (%8 : $Int, %9 : $Int, %10 : $Int, %11 : $Int, %6 : $Int, %7 : $Int) + return %12 : $(Int, Int, Int, Int, Int, Int) +} + +// CHECK-LABEL: sil [ossa] @f100_any : $@convention(thin) (@in Any) -> () { // CHECK: bb0(%0 : $*Any): // CHECK: destroy_addr %0 : $*Any // CHECK: %[[T:.*]] = tuple () // CHECK: return %[[T]] : $() // CHECK-LABEL: } // end sil function 'f100_any' -sil hidden @f100_any : $@convention(thin) (@in Any) -> () { -bb0(%0 : $Any): +sil [ossa] @f100_any : $@convention(thin) (@in Any) -> () { +bb0(%0 : @owned $Any): debug_value %0 : $Any, let, name "any", argno 1 destroy_value %0 : $Any %3 = tuple () return %3 : $() } -// CHECK-LABEL: sil @f101_passAny : $@convention(thin) (@in T) -> () { +// CHECK-LABEL: sil [ossa] @f101_passAny : $@convention(thin) (@in T) -> () { // CHECK: bb0(%0 : $*T): -// CHECK: %[[T1:.*]] = alloc_stack $T // CHECK: %[[A:.*]] = alloc_stack $Any // CHECK: %[[F:.*]] = function_ref @f100_any : $@convention(thin) (@in Any) -> () -// CHECK: copy_addr %0 to [initialization] %[[T1]] : $*T // CHECK: %[[T2:.*]] = init_existential_addr %[[A]] : $*Any, $T -// CHECK: copy_addr [take] %[[T1]] to [initialization] %[[T2]] : $*T +// CHECK: copy_addr %0 to [initialization] %[[T2]] : $*T // CHECK: %{{.*}} = apply %[[F]](%[[A]]) : $@convention(thin) (@in Any) -> () // CHECK: destroy_addr %0 : $*T -// CHECK: %[[R:.*]] = tuple () // CHECK: dealloc_stack %[[A]] : $*Any -// CHECK: dealloc_stack %[[T1]] : $*T -// CHECK: return %[[R]] : $() +// CHECK: return %{{.*}} : $() // CHECK-LABEL: } // end sil function 'f101_passAny' -sil @f101_passAny : $@convention(thin) (@in T) -> () { -bb0(%0 : $T): +sil [ossa] @f101_passAny : $@convention(thin) (@in T) -> () { +bb0(%0 : @owned $T): %2 = function_ref @f100_any : $@convention(thin) (@in Any) -> () %3 = copy_value %0 : $T %4 = init_existential_value %3 : $T, $T, $Any @@ -482,27 +552,527 @@ bb0(%0 : $T): // Test convertIndirectFunctionArgs and init_existential_value on concrete // types. -// CHECK-LABEL: sil @f102_passAnyObjectAsAny : $@convention(thin) (@in AnyObject) -> () { +// CHECK-LABEL: sil [ossa] @f102_passAnyObjectAsAny : $@convention(thin) (@in AnyObject) -> () { // CHECK: bb0(%0 : $*AnyObject): -// CHECK: %[[A:.*]] = alloc_stack $Any -// CHECK: %[[ARG:.*]] = load %0 : $*AnyObject -// CHECK: %[[F:.*]] = function_ref @f100_any : $@convention(thin) (@in Any) -> () -// CHECK: strong_retain %[[ARG]] : $AnyObject -// CHECK: %[[VAL:.*]] = init_existential_addr %[[A]] : $*Any, $AnyObject -// CHECK: store %[[ARG]] to %[[VAL]] : $*AnyObject -// CHECK: %{{.*}} = apply %[[F]](%[[A]]) : $@convention(thin) (@in Any) -> () -// CHECK: strong_release %[[ARG]] : $AnyObject -// CHECK: %[[R:.*]] = tuple () -// CHECK: dealloc_stack %[[A]] : $*Any -// CHECK: return %[[R]] : $() +// CHECK: [[A:%.*]] = alloc_stack $Any +// CHECK: [[ARG:%.*]] = load [take] %0 : $*AnyObject +// CHECK: [[F:%.*]] = function_ref @f100_any : $@convention(thin) (@in Any) -> () +// CHECK: [[VAL:%.*]] = init_existential_addr [[A]] : $*Any, $AnyObject +// CHECK: store [[ARG]] to [init] [[VAL]] : $*AnyObject +// CHECK: %{{.*}} = apply [[F]]([[A]]) : $@convention(thin) (@in Any) -> () +// CHECK: [[R:%.*]] = tuple () +// CHECK: dealloc_stack [[A]] : $*Any +// CHECK: return [[R]] : $() // CHECK-LABEL: } // end sil function 'f102_passAnyObjectAsAny' -sil @f102_passAnyObjectAsAny : $@convention(thin) (@in AnyObject) -> () { -bb0(%0 : $AnyObject): +sil [ossa] @f102_passAnyObjectAsAny : $@convention(thin) (@in AnyObject) -> () { +bb0(%0 : @owned $AnyObject): %2 = function_ref @f100_any : $@convention(thin) (@in Any) -> () - strong_retain %0 : $AnyObject %4 = init_existential_value %0 : $AnyObject, $AnyObject, $Any %5 = apply %2(%4) : $@convention(thin) (@in Any) -> () - strong_release %0 : $AnyObject %7 = tuple () return %7 : $() } + +// Helper +sil [ossa] @f110_singleIndirectFunc : $@convention(thin) <τ_0_0> () -> @out τ_0_0 + +// Test convertApplyWithIndirectResults. +// CHECK-LABEL: sil [ossa] @f111_singleIndirectApply : $@convention(thin) <τ_0_0> () -> @out τ_0_0 { +// CHECK: bb0(%0 : $*τ_0_0): +// CHECK: [[F:%.*]] = function_ref @f110_singleIndirectFunc : $@convention(thin) <τ_0_0> () -> @out τ_0_0 +// CHECK: %{{.*}} = apply [[F]]<τ_0_0>(%0) : $@convention(thin) <τ_0_0> () -> @out τ_0_0 +// CHECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function 'f111_singleIndirectApply' +sil [ossa] @f111_singleIndirectApply : $@convention(thin) <τ_0_0> () -> @out τ_0_0 { +bb0: + %2 = function_ref @f110_singleIndirectFunc : $@convention(thin) <τ_0_0> () -> @out τ_0_0 + %3 = apply %2<τ_0_0>() : $@convention(thin) <τ_0_0> () -> @out τ_0_0 + return %3 : $τ_0_0 +} + +// CHECK-LABEL: sil [ossa] @f120_testDestructure : $@convention(method) (@in SI) -> (@out Element, @out I) { +// CHECK: bb0(%0 : $*Element, %1 : $*I, %2 : $*SI): +// CHECK: [[ELT_ADR:%.*]] = struct_element_addr %2 : $*SI, #SI.element +// CHECK: [[IDX_ADR:%.*]] = struct_element_addr %2 : $*SI, #SI.index +// CHECK: [[IDX:%.*]] = load [trivial] [[IDX_ADR]] : $*I +// CHECK: copy_addr [take] [[ELT_ADR]] to [initialization] %0 : $*Element // id: %6 +// CHECK: store [[IDX]] to [trivial] %1 : $*I +// CHECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function 'f120_testDestructure' +sil [ossa] @f120_testDestructure : $@convention(method) (@in SI) -> (@out Element, @out I) { +bb0(%0 : @owned $SI): + (%element, %index) = destructure_struct %0 : $SI + %tuple = tuple(%element : $Element, %index : $I) + return %tuple : $(Element, I) +} + +// CHECK-LABEL: sil [ossa] @f121_testStructExtract : $@convention(method) (@in SI) -> (@out AnyObject, @out I) { +// CHECK: bb0(%0 : $*AnyObject, %1 : $*I, %2 : $*SI): +// CHECK: [[IN:%.*]] = load [take] %2 : $*SI +// CHECK: [[B:%.*]] = begin_borrow [[IN]] : $SI +// CHECK: [[E0:%.*]] = struct_extract [[B]] : $SI, #SI.element +// CHECK: [[C:%.*]] = copy_value [[E0]] : $AnyObject +// CHECK: [[E1:%.*]] = struct_extract [[B]] : $SI, #SI.index +// CHECK: end_borrow [[B]] : $SI +// CHECK: destroy_value [[IN]] : $SI +// CHECK: store [[C]] to [init] %0 : $*AnyObject +// CHECK: store [[E1]] to [trivial] %1 : $*I +// CHECK-LABEL: } // end sil function 'f121_testStructExtract' +sil [ossa] @f121_testStructExtract : $@convention(method) (@in SI) -> (@out AnyObject, @out I) { +bb0(%0 : @owned $SI): + %borrow = begin_borrow %0 : $SI + %object = struct_extract %borrow : $SI, #SI.element + %copy = copy_value %object : $AnyObject + %index = struct_extract %borrow : $SI, #SI.index + end_borrow %borrow : $SI + destroy_value %0 : $SI + %tuple = tuple(%copy : $AnyObject, %index : $I) + return %tuple : $(AnyObject, I) +} + +// CHECK-LABEL: sil [ossa] @f122_testStructExtract : $@convention(method) (@in SRef) -> (@out AnyObject, @out T) { +// CHECK: bb0(%0 : $*AnyObject, %1 : $*T, %2 : $*SRef): +// CHECK-NOT: alloc_stack +// CHECK: [[E0:%.*]] = struct_element_addr %2 : $*SRef, #SRef.object +// CHECK: [[C:%.*]] = load [copy] [[E0]] : $*AnyObject +// CHECK: [[E1:%.*]] = struct_element_addr %2 : $*SRef, #SRef.element +// CHECK: copy_addr [[E1]] to [initialization] %1 : $*T +// CHECK: destroy_addr %2 : $*SRef +// CHECK: store [[C]] to [init] %0 : $*AnyObject +// CHECK-NOT: dealloc_stack +// CHECK-LABEL: } // end sil function 'f122_testStructExtract' +sil [ossa] @f122_testStructExtract : $@convention(method) (@in SRef) -> (@out AnyObject, @out T) { +bb0(%0 : @owned $SRef): + %borrow = begin_borrow %0 : $SRef + %object = struct_extract %borrow : $SRef, #SRef.object + %copy1 = copy_value %object : $AnyObject + %element = struct_extract %borrow : $SRef, #SRef.element + %copy2 = copy_value %element : $T + end_borrow %borrow : $SRef + destroy_value %0 : $SRef + %tuple = tuple(%copy1 : $AnyObject, %copy2 : $T) + return %tuple : $(AnyObject, T) +} + +// CHECK-LABEL: sil [ossa] @f123_testStructExtract : $@convention(method) (@in SRef) -> (@out AnyObject, @out T) { +// CHECK: bb0(%0 : $*AnyObject, %1 : $*T, %2 : $*SRef): +// CHECK-NOT: alloc_stack +// CHECK: [[E0:%.*]] = struct_element_addr %2 : $*SRef, #SRef.object +// CHECK: [[L:%.*]] = load_borrow [[E0]] : $*AnyObject +// CHECK: apply %{{.*}}([[L]]) : $@convention(thin) (@guaranteed AnyObject) -> () +// CHECK: [[C:%.*]] = copy_value [[L]] : $AnyObject +// CHECK: end_borrow [[L]] : $AnyObject +// CHECK: [[E1:%.*]] = struct_element_addr %2 : $*SRef, #SRef.element +// CHECK: copy_addr [[E1]] to [initialization] %1 : $*T +// CHECK: destroy_addr %2 : $*SRef +// CHECK: store [[C]] to [init] %0 : $*AnyObject +// CHECK-NOT: dealloc_stack +// CHECK-LABEL: } // end sil function 'f123_testStructExtract' +sil [ossa] @f123_testStructExtract : $@convention(method) (@in SRef) -> (@out AnyObject, @out T) { +bb0(%0 : @owned $SRef): + %borrow = begin_borrow %0 : $SRef + %object = struct_extract %borrow : $SRef, #SRef.object + %f = function_ref @takeGuaranteedObject : $@convention(thin) (@guaranteed AnyObject) -> () + %call = apply%f(%object) : $@convention(thin) (@guaranteed AnyObject) -> () + %copy1 = copy_value %object : $AnyObject + %element = struct_extract %borrow : $SRef, #SRef.element + %copy2 = copy_value %element : $T + end_borrow %borrow : $SRef + destroy_value %0 : $SRef + %tuple = tuple(%copy1 : $AnyObject, %copy2 : $T) + return %tuple : $(AnyObject, T) +} + +// CHECK-LABEL: sil [ossa] @f124_testTupleExtract : $@convention(method) (@in (AnyObject, T)) -> (@out AnyObject, @out T) { +// CHECK: bb0(%0 : $*AnyObject, %1 : $*T, %2 : $*(AnyObject, T)): +// CHECK-NOT: alloc_stack +// CHECK: [[E0:%.*]] = tuple_element_addr %2 : $*(AnyObject, T), 0 +// CHECK: [[C:%.*]] = load [copy] [[E0]] : $*AnyObject +// CHECK: [[E1:%.*]] = tuple_element_addr %2 : $*(AnyObject, T), 1 +// CHECK: copy_addr [[E1]] to [initialization] %1 : $*T +// CHECK: destroy_addr %2 : $*(AnyObject, T) +// CHECK: store [[C]] to [init] %0 : $*AnyObject +// CHECK-NOT: dealloc_stack +sil [ossa] @f124_testTupleExtract : $@convention(method) (@in (AnyObject, T)) -> (@out AnyObject, @out T) { +bb0(%0 : @owned $(AnyObject, T)): + %borrow = begin_borrow %0 : $(AnyObject, T) + %object = tuple_extract %borrow : $(AnyObject, T), 0 + %copy1 = copy_value %object : $AnyObject + %element = tuple_extract %borrow : $(AnyObject, T), 1 + %copy2 = copy_value %element : $T + end_borrow %borrow : $(AnyObject, T) + destroy_value %0 : $(AnyObject, T) + %tuple = tuple(%copy1 : $AnyObject, %copy2 : $T) + return %tuple : $(AnyObject, T) +} + +// CHECK-LABEL: sil [ossa] @f125_testTupleExtract : $@convention(method) (@in (AnyObject, T)) -> (@out AnyObject, @out T) { +// CHECK: bb0(%0 : $*AnyObject, %1 : $*T, %2 : $*(AnyObject, T)): +// CHECK-NOT: alloc_stack +// CHECK: [[E0:%.*]] = tuple_element_addr %2 : $*(AnyObject, T), 0 +// CHECK: [[L:%.*]] = load_borrow %3 : $*AnyObject +// CHECK: apply %{{.*}}([[L]]) : $@convention(thin) (@guaranteed AnyObject) -> () +// CHECK: [[C:%.*]] = copy_value [[L]] : $AnyObject +// CHECK: end_borrow [[L]] : $AnyObject +// CHECK: [[E1:%.*]] = tuple_element_addr %2 : $*(AnyObject, T), 1 +// CHECK: copy_addr [[E1]] to [initialization] %1 : $*T +// CHECK: destroy_addr %2 : $*(AnyObject, T) +// CHECK: store [[C]] to [init] %0 : $*AnyObject +// CHECK-NOT: dealloc_stack +// CHECK-LABEL: } // end sil function 'f125_testTupleExtract' +sil [ossa] @f125_testTupleExtract : $@convention(method) (@in (AnyObject, T)) -> (@out AnyObject, @out T) { +bb0(%0 : @owned $(AnyObject, T)): + %borrow = begin_borrow %0 : $(AnyObject, T) + %object = tuple_extract %borrow : $(AnyObject, T), 0 + %f = function_ref @takeGuaranteedObject : $@convention(thin) (@guaranteed AnyObject) -> () + %call = apply%f(%object) : $@convention(thin) (@guaranteed AnyObject) -> () + %copy1 = copy_value %object : $AnyObject + %element = tuple_extract %borrow : $(AnyObject, T), 1 + %copy2 = copy_value %element : $T + end_borrow %borrow : $(AnyObject, T) + destroy_value %0 : $(AnyObject, T) + %tuple = tuple(%copy1 : $AnyObject, %copy2 : $T) + return %tuple : $(AnyObject, T) +} + +// CHECK-LABEL: sil [ossa] @f130_testReleaseValue : $@convention(thin) (@in T) -> () { +// CHECK: bb0(%0 : $*T): +// CHECK: destroy_addr %0 : $*T +// CHECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function 'f130_testReleaseValue' +sil [ossa] @f130_testReleaseValue : $@convention(thin) (@in T) -> () { +bb0(%0 : @owned $T): + destroy_value %0 : $T + %r = tuple () + return %r : $() +} + +// CHECK-LABEL: sil [ossa] @f140_testTupleProject : $@convention(thin) (@in T) -> () { +// CHECK: bb0(%0 : $*T): +// CHECK: [[LOCAL:%.*]] = alloc_stack $((T, T), T) +// CHECK: [[ELT0:%.*]] = tuple_element_addr [[LOCAL]] : $*((T, T), T), 0 +// CHECK: [[ELT0_0:%.*]] = tuple_element_addr [[ELT0]] : $*(T, T), 0 +// CHECK: copy_addr %0 to [initialization] [[ELT0_0]] : $*T +// CHECK: [[ELT1:%.*]] = tuple_element_addr [[LOCAL]] : $*((T, T), T), 1 +// CHECK: copy_addr %0 to [initialization] [[ELT1]] : $*T +// CHECK: [[ELT0_1:%.*]] = tuple_element_addr [[ELT0]] : $*(T, T), 1 +// CHECK: copy_addr [take] %0 to [initialization] [[ELT0_1]] : $*T +// CHECK: destroy_addr [[LOCAL]] : $*((T, T), T) +// CHECK: dealloc_stack [[LOCAL]] : $*((T, T), T) +// CHECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function 'f140_testTupleProject' +sil [ossa] @f140_testTupleProject : $@convention(thin) (@in T) -> () { +bb0(%0 : @owned $T): + %copy0 = copy_value %0 : $T + %copy1 = copy_value %0 : $T + %tuple1 = tuple (%copy0 : $T, %0 : $T) + %tuple2 = tuple (%tuple1 : $(T, T), %copy1 : $T) + destroy_value %tuple2 : $((T, T), T) + %r = tuple () + return %r : $() +} + +// CHECK-LABEL: sil [ossa] @f150_testStructProject : $@convention(thin) (@in T) -> () { +// CHECK: bb0(%0 : $*T): +// CHECK: [[ALLOC:%.*]] = alloc_stack $Pair> +// CHECK: [[ELT_X:%.*]] = struct_element_addr [[ALLOC]] : $*Pair>, #Pair.x +// CHECK: [[ELT_XY:%.*]] = struct_element_addr [[ELT_X]] : $*Pair, #Pair.y +// CHECK: copy_addr %0 to [initialization] [[ELT_XY]] : $*T +// CHECK: [[ELT_XX:%.*]] = struct_element_addr [[ELT_X]] : $*Pair, #Pair.x +// CHECK: copy_addr [take] %0 to [initialization] [[ELT_XX]] : $*T +// CHECK: [[ELT_Y:%.*]] = struct_element_addr [[ALLOC]] : $*Pair>, #Pair.y +// CHECK: copy_addr [[ELT_X]] to [initialization] [[ELT_Y]] : $*Pair +// CHECK: destroy_addr [[ALLOC]] : $*Pair> +// CHECK: dealloc_stack [[ALLOC]] : $*Pair> +// CHECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function 'f150_testStructProject' +sil [ossa] @f150_testStructProject : $@convention(thin) (@in T) -> () { +bb0(%0 : @owned $T): + %copy0 = copy_value %0 : $T + %struct1 = struct $Pair (%0 : $T, %copy0 : $T) + %struct_copy = copy_value %struct1 : $Pair + %struct2 = struct $Pair> (%struct1 : $Pair, %struct_copy : $Pair) + destroy_value %struct2 : $Pair> + %r = tuple () + return %r : $() +} + +// CHECK-LABEL: sil [ossa] @f160_testOpenedArchetype : $@convention(thin) (@in P) -> () { +// CHECK: bb0(%0 : $*P): +// CHECK: [[ALLOC:%.*]] = alloc_stack $P, var, name "q" +// CHECK: copy_addr %0 to [initialization] [[ALLOC]] : $*P +// CHECK: [[OPEN:%.*]] = open_existential_addr immutable_access %0 : $*P to $*[[ARCHETYPE:@opened(.*)]] P +// CHECK: [[CP:%.*]] = alloc_stack $[[ARCHETYPE]] P // type-defs: [[OPEN]]; +// CHECK: [[WT:%.*]] = witness_method $[[ARCHETYPE]] P, #P.foo : (Self) -> () -> (), [[OPEN]] : $*[[ARCHETYPE]] P : $@convention(witness_method: P) <τ_0_0 where τ_0_0 : P> (@in_guaranteed τ_0_0) -> () +// CHECK: copy_addr [[OPEN]] to [initialization] [[CP]] : $*[[ARCHETYPE]] P +// CHECK: %{{.*}} = apply [[WT]]<[[ARCHETYPE]] P>([[CP]]) : $@convention(witness_method: P) <τ_0_0 where τ_0_0 : P> (@in_guaranteed τ_0_0) -> () +// CHECK: destroy_addr [[CP]] : $*[[ARCHETYPE]] P +// CHECK: destroy_addr [[ALLOC]] : $*P +// CHECK: destroy_addr %0 : $*P +// CHECK: %{{.*}} = tuple () +// CHECK: dealloc_stack [[CP]] : $*[[ARCHETYPE]] P +// CHECK: dealloc_stack [[ALLOC]] : $*P +// CHECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function 'f160_testOpenedArchetype' +sil [ossa] @f160_testOpenedArchetype : $@convention(thin) (@in P) -> () { +bb0(%0 : @owned $P): + %2 = alloc_stack $P, var, name "q" + %3 = copy_value %0 : $P + store %3 to [init] %2 : $*P + %b = begin_borrow %0 : $P + %8 = open_existential_value %b : $P to $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P + %9 = witness_method $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P, #P.foo, %8 : $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P : $@convention(witness_method: P) <τ_0_0 where τ_0_0 : P> (@in_guaranteed τ_0_0) -> () + // Test that we can handle owned value of type opened archetype. + %10 = copy_value %8 : $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P + end_borrow %b : $P + %11 = apply %9<@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P>(%10) : $@convention(witness_method: P) <τ_0_0 where τ_0_0 : P> (@in_guaranteed τ_0_0) -> () + destroy_value %10 : $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P + destroy_addr %2 : $*P + dealloc_stack %2 : $*P + destroy_value %0 : $P + %16 = tuple () + return %16 : $() +} + +// CHECK-LABEL: sil [ossa] @f161_testOpenedArchetype : $@convention(thin) (@in P) -> () { +// CHECK: bb0(%0 : $*P): +// CHECK: [[ALLOCP:%.*]] = alloc_stack $P, var, name "q" +// CHECK: copy_addr %0 to [initialization] [[ALLOCP]] : $*P +// CHECK: [[OPEN:%.*]] = open_existential_addr immutable_access %0 : $*P to $*@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P +// CHECK: [[OPTIONAL:%.*]] = alloc_stack $Optional<@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P> +// CHECK: witness_method $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P, #P.foo : (Self) -> () -> (), [[OPEN]] : $*@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P : $@convention(witness_method: P) <τ_0_0 where τ_0_0 : P> (@in_guaranteed τ_0_0) -> () +// CHECK: [[INIT:%.*]] = init_enum_data_addr [[OPTIONAL]] : $*Optional<@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P>, #Optional.some!enumelt +// CHECK: copy_addr [[OPEN]] to [initialization] [[INIT]] : $*@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P +// CHECK: inject_enum_addr [[OPTIONAL]] : $*Optional<@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P>, #Optional.some!enumelt +// CHECK: [[DATA:%.*]] = unchecked_take_enum_data_addr [[OPTIONAL]] : $*Optional<@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P>, #Optional.some!enumelt +// CHECK: %10 = apply %{{.*}}<@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P>([[DATA]]) : $@convention(witness_method: P) <τ_0_0 where τ_0_0 : P> (@in_guaranteed τ_0_0) -> () +// CHECK: destroy_addr %9 : $*@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P +// CHECK: destroy_addr [[ALLOCP]] : $*P +// CHECK: destroy_addr %0 : $*P +// CHECK: dealloc_stack [[OPTIONAL]] : $*Optional<@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P> +// CHECK: dealloc_stack [[ALLOCP]] : $*P +// CHECK-LABEL: } // end sil function 'f161_testOpenedArchetype' +sil [ossa] @f161_testOpenedArchetype : $@convention(thin) (@in P) -> () { +bb0(%0 : @owned $P): + %2 = alloc_stack $P, var, name "q" + %3 = copy_value %0 : $P + store %3 to [init] %2 : $*P + %b = begin_borrow %0 : $P + %8 = open_existential_value %b : $P to $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P + %9 = witness_method $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P, #P.foo, %8 : $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P : $@convention(witness_method: P) <τ_0_0 where τ_0_0 : P> (@in_guaranteed τ_0_0) -> () + %cpy = copy_value %8 : $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P + end_borrow %b : $P + // This optional is an aggregate that contains an opened exsitential. May sure it's allocated after open_existential_addr. + %opt = enum $Optional<@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P>, #Optional.some!enumelt, %cpy : $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P + %some = unchecked_enum_data %opt : $Optional<@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P>, #Optional.some!enumelt + %11 = apply %9<@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P>(%some) : $@convention(witness_method: P) <τ_0_0 where τ_0_0 : P> (@in_guaranteed τ_0_0) -> () + destroy_value %some : $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P + destroy_addr %2 : $*P + dealloc_stack %2 : $*P + destroy_value %0 : $P + %16 = tuple () + return %16 : $() +} + +// CHECK-LABEL: sil [ossa] @f170_compare : $@convention(thin) (@in_guaranteed T, @in_guaranteed T) -> @out T { +// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T): +// CHECK: [[WT:%.*]] = witness_method $T, #Comparable."<" : (Self.Type) -> (Self, Self) -> Builtin.Int1 : $@convention(witness_method: Comparable) <τ_0_0 where τ_0_0 : Comparable> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_0, @thick τ_0_0.Type) -> Builtin.Int1 +// CHECK: [[MT:%.*]] = metatype $@thick T.Type +// CHECK: [[COND:%.*]] = apply [[WT]](%1, %2, [[MT]]) : $@convention(witness_method: Comparable) <τ_0_0 where τ_0_0 : Comparable> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_0, @thick τ_0_0.Type) -> Builtin.Int1 +// CHECK: cond_br [[COND]], bb2, bb1 +// CHECK: bb1: +// CHECK: copy_addr %1 to [initialization] %0 : $*T +// CHECK: br bb3 +// CHECK: bb2: +// CHECK: copy_addr %2 to [initialization] %0 : $*T +// CHECK: br bb3 +// CHECK-LABEL: } // end sil function 'f170_compare' +sil [ossa] @f170_compare : $@convention(thin) (@in_guaranteed T, @in_guaranteed T) -> @out T { +bb0(%0 : @guaranteed $T, %1 : @guaranteed $T): + %2 = witness_method $T, #Comparable."<" : (Self.Type) -> (Self, Self) -> Bool : $@convention(witness_method: Comparable) <τ_0_0 where τ_0_0 : Comparable> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_0, @thick τ_0_0.Type) -> Bool + %3 = metatype $@thick T.Type + %4 = apply %2(%0, %1, %3) : $@convention(witness_method: Comparable) <τ_0_0 where τ_0_0 : Comparable> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_0, @thick τ_0_0.Type) -> Bool + cond_br %4, bb1, bb2 + +bb1: + %6 = copy_value %1 : $T + br bb3(%6 : $T) + +bb2: + %8 = copy_value %0 : $T + br bb3(%8 : $T) + +bb3(%15 : @owned $T): + return %15 : $T +} + +// Test switching on a single opaque value. +// CHECK-LABEL: sil [ossa] @f210_testSwitchEnum : $@convention(method) (@in Optional, @inout T) -> () { +// CHECK: bb0(%0 : $*Optional, %1 : $*T): +// CHECK: switch_enum_addr %0 : $*Optional, case #Optional.some!enumelt: [[SOMEBB:bb[0-9]+]], case #Optional.none!enumelt: [[NONEBB:bb[0-9]+]] +// CHECK: [[NONEBB]]: +// CHECK: br [[RETBB:bb[0-9]+]] +// CHECK: [[SOMEBB]]: +// CHECK: [[CAST:%.*]] = unchecked_take_enum_data_addr %0 : $*Optional, #Optional.some!enumelt +// CHECK: copy_addr [take] [[CAST]] to [initialization] %1 : $*T +// CHECK: br [[RETBB]] +// CHECK: [[RETBB]]: +// CHECK-LABEL: } // end sil function 'f210_testSwitchEnum' +sil [ossa] @f210_testSwitchEnum : $@convention(method) (@in Optional, @inout T) -> () { +bb0(%0 : @owned $Optional, %1 : $*T): + switch_enum %0 : $Optional, case #Optional.some: bb2, case #Optional.none: bb1 + +bb1: + br bb3 + +bb2(%some : @owned $T): + destroy_addr %1 : $*T + store %some to [init] %1 : $*T + br bb3 + +bb3: + %31 = tuple () + return %31 : $() +} + +// f220_testSwitchMixed +// CHECK-LABEL: sil [ossa] @f220_testSwitchMixed : $@convention(method) (@in Mixed, @inout Builtin.Int64, @inout T) -> () { +// CHECK: bb0(%0 : $*Mixed, %1 : $*Builtin.Int64, %2 : $*T): +// CHECK: switch_enum_addr %0 : $*Mixed, case #Mixed.i!enumelt: [[IBB:bb[0-9]+]], case #Mixed.t!enumelt: [[TBB:bb[0-9]+]], default [[DBB:bb[0-9]+]] +// CHECK: [[DBB]]: +// CHECK: [[OBJADDR:%.*]] = unchecked_take_enum_data_addr %0 : $*Mixed, #Mixed.o!enumelt +// CHECK: [[LD:%.*]] = load [take] [[OBJADDR]] : $*AnyObject +// CHECK: destroy_value [[LD]] : $AnyObject +// CHECK: br [[RBB:bb[0-9]+]] +// CHECK: [[TBB]]: +// CHECK: [[CAST:%.*]] = unchecked_take_enum_data_addr %0 : $*Mixed, #Mixed.t!enumelt +// CHECK: destroy_addr %2 : $*T +// CHECK: copy_addr [take] [[CAST]] to [initialization] %2 : $*T +// CHECK: br [[RBB]] +// CHECK: [[IBB]]: +// CHECK: [[CAST:%.*]] = unchecked_take_enum_data_addr %0 : $*Mixed, #Mixed.i!enumelt +// CHECK: [[VAL:%.*]] = load [trivial] [[CAST]] : $*Builtin.Int64 +// CHECK: store [[VAL]] to [trivial] %1 : $*Builtin.Int64 +// CHECK: br [[RBB]] +// CHECK: [[RBB]]: +// CHECK-LABEL: } // end sil function 'f220_testSwitchMixed' +sil [ossa] @f220_testSwitchMixed : $@convention(method) (@in Mixed, @inout Int, @inout T) -> () { +bb0(%0 : @owned $Mixed, %1 : $*Int, %2 : $*T): + switch_enum %0 : $Mixed, case #Mixed.i: bb1, case #Mixed.t: bb2, default bb3 + +bb1(%13 : $Int): + store %13 to [trivial] %1 : $*Int + br bb4 + +bb2(%14 : @owned $T): + destroy_addr %2 : $*T + store %14 to [init] %2 : $*T + br bb4 + +bb3(%18: @owned $AnyObject): + destroy_value %18 : $AnyObject + br bb4 + +bb4: + %31 = tuple () + return %31 : $() +} + +// CHECK-LABEL: sil [ossa] @f230_testTryApply : $@convention(thin) (@in T) -> (@out T, @error Error) { +// CHECK: bb0(%0 : $*T, %1 : $*T): +// CHECK: [[F:%.*]] = function_ref @throwsError : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @error Error) +// CHECK: try_apply [[F]](%0, %1) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @error Error), normal bb2, error bb1 +// CHECK: bb1([[E:%.*]] : $Error): +// CHECK: throw [[E]] : $Error +// CHECK: bb2([[NONE:%.*]] : $()): +// CHECK: %{{.*}} = tuple () +// CHECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function 'f230_testTryApply' +sil [ossa] @f230_testTryApply : $@convention(thin) (@in T) -> (@out T, @error Error) { +bb0(%0 : @owned $T): + %3 = function_ref @throwsError : $@convention(thin) (@in T) -> (@out T, @error Error) + try_apply %3(%0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @error Error), normal bb1, error bb2 + +bb1(%5 : @owned $T): + return %5 : $T + +bb2(%7 : $Error): + throw %7 : $Error +} + +// CHECK-LABEL: sil [ossa] @f240_testTryApplyDirect : $@convention(thin) (@in T) -> (Builtin.Int64, @error Error) { +// CHECK: bb0(%0 : $*T): +// CHECK: [[F:%.*]] = function_ref @returnInt : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (Builtin.Int64, @error Error) +// CHECK: try_apply [[F]](%0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (Builtin.Int64, @error Error), normal bb2, error bb1 +// CHECK: bb1([[E:%.*]] : $Error): +// CHECK: throw [[E]] : $Error +// CHECK: bb2([[V:%.*]] : $Builtin.Int64): +// CHECK: return [[V]] : $Builtin.Int64 +// CHECK-LABEL: } // end sil function 'f240_testTryApplyDirect' +sil [ossa] @f240_testTryApplyDirect : $@convention(thin) (@in T) -> (Int, @error Error) { +bb0(%0 : @owned $T): + %3 = function_ref @returnInt : $@convention(thin) (@in T) -> (Int, @error Error) + try_apply %3(%0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (Int, @error Error), normal bb1, error bb2 + +bb1(%5 : $Int): + return %5 : $Int + +bb2(%7 : $Error): + throw %7 : $Error +} + +// CHECK-LABEL: sil [ossa] @f250_testTryApplyIndirect : $@convention(thin) (@in T) -> (Builtin.Int64, @error Error) { +// CHECK: bb0(%0 : $*T): +// CHECK: [[F:%.*]] = function_ref @returnIntOut : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out Builtin.Int64, @error Error) +// CHECK: [[OUT_I:%.*]] = alloc_stack $Builtin.Int64 +// CHECK: try_apply %1([[OUT_I]], %0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out Builtin.Int64, @error Error), normal bb2, error bb1 +// CHECK: bb1([[E:%.*]] : $Error): +// CHECK: dealloc_stack [[OUT_I]] : $*Builtin.Int64 +// CHECK: throw [[E]] : $Error +// CHECK: bb2(%{{.*}} : $()): +// CHECK: [[V:%.*]] = load [trivial] [[OUT_I]] : $*Builtin.Int64 +// CHECK: dealloc_stack [[OUT_I]] : $*Builtin.Int64 +// CHECK: return [[V]] : $Builtin.Int64 +// CHECK-LABEL: } // end sil function 'f250_testTryApplyIndirect' +sil [ossa] @f250_testTryApplyIndirect : $@convention(thin) (@in T) -> (Int, @error Error) { +bb0(%0 : @owned $T): + %3 = function_ref @returnIntOut : $@convention(thin) (@in T) -> (@out Int, @error Error) + try_apply %3(%0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out Int, @error Error), normal bb1, error bb2 + +bb1(%5 : $Int): + return %5 : $Int + +bb2(%7 : $Error): + throw %7 : $Error +} + +// CHECK-LABEL: sil [ossa] @f260_testTryApplyTuple : $@convention(thin) (@in T) -> (@out T, @error Error) { +// CHECK: bb0(%0 : $*T, %1 : $*T): +// CHECK: [[OUT_T:%.*]] = alloc_stack $T +// CHECK: [[F:%.*]] = function_ref @returnTuple : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, Builtin.Int64, @out Builtin.Int64, @out τ_0_0, @error Error) +// CHECK: [[OUT_I:%.*]] = alloc_stack $Builtin.Int64 +// CHECK: try_apply [[F]]([[OUT_T]], [[OUT_I]], %0, %1) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, Builtin.Int64, @out Builtin.Int64, @out τ_0_0, @error Error), normal bb2, error bb1 +// CHECK: bb1([[E:%.*]] : $Error): +// CHECK: dealloc_stack [[OUT_I]] : $*Builtin.Int64 +// CHECK: dealloc_stack [[OUT_T]] : $*T +// CHECK: throw [[E]] : $Error +// CHECK: bb2([[RESULT:%.*]] : $Builtin.Int64): +// CHECK: dealloc_stack [[OUT_I]] : $*Builtin.Int64 +// CHECK: destroy_addr [[OUT_T]] : $*T +// CHECK: %{{.*}} = tuple () +// CHECK: dealloc_stack [[OUT_T]] : $*T +// CHECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function 'f260_testTryApplyTuple' +sil [ossa] @f260_testTryApplyTuple : $@convention(thin) (@in T) -> (@out T, @error Error) { +bb0(%0 : @owned $T): + %1 = function_ref @returnTuple : $@convention(thin) (@in T) -> (@out T, Int, @out Int, @out T, @error Error) + try_apply %1(%0) : $@convention(thin) (@in T) -> (@out T, Int, @out Int, @out T, @error Error), normal bb1, error bb2 + +bb1(%3 : @owned $(T, Int, Int, T)): + (%4, %5, %6, %7) = destructure_tuple %3 : $(T, Int, Int, T) + destroy_value %4 : $T + return %7 : $T + +bb2(%9 : $Error): + throw %9 : $Error +} diff --git a/test/SILOptimizer/address_lowering_phi.sil b/test/SILOptimizer/address_lowering_phi.sil new file mode 100644 index 0000000000000..a7ec7e882ac8b --- /dev/null +++ b/test/SILOptimizer/address_lowering_phi.sil @@ -0,0 +1,440 @@ +// RUN: %target-sil-opt -address-lowering -enable-sil-opaque-values -emit-sorted-sil -module-name Swift -sil-verify-all %s | %FileCheck %s +// +// Test the PhiStorageOptimizer within the AddressLowering pass. + +import Builtin + +sil_stage raw + +typealias AnyObject = Builtin.AnyObject +typealias Int = Builtin.Int64 +typealias Bool = Builtin.Int1 + +struct SRef { + @_hasStorage var object: AnyObject { get set } + @_hasStorage var element: T { get set } + init(object: AnyObject, element: T) +} + +enum InnerEnum { + case payload(T, AnyObject) +} +enum OuterEnum { + case inner(InnerEnum, AnyObject) +} + +struct InnerStruct { + var t: T + var object: AnyObject +} +struct OuterStruct { + var inner: InnerStruct + var object: AnyObject +} + +sil [ossa] @getOut : $@convention(thin) () -> @out T + +// Test BBArgs allocation. + +// No projection from incoming values. No interference. +// CHECK-LABEL: sil [ossa] @f010_testBBArgSelect : $@convention(thin) () -> @out T { +// CHECK: bb0(%0 : $*T): +// CHECK: [[F:%.*]] = function_ref @getOut : $@convention(thin) <τ_0_0> () -> @out τ_0_0 +// CHECK: cond_br undef, bb2, bb1 +// CHECK: bb1: +// CHECK: [[GET0:%.*]] = apply [[F]](%0) : $@convention(thin) <τ_0_0> () -> @out τ_0_0 +// CHECK: br bb3 +// CHECK: bb2: +// CHECK: [[GET1:%.*]] = apply [[F]](%0) : $@convention(thin) <τ_0_0> () -> @out τ_0_0 +// CHECK: br bb3 +// CHECK: bb3: +// CHECK: %{{.*}} = tuple () +// CHECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function 'f010_testBBArgSelect' +sil [ossa] @f010_testBBArgSelect : $@convention(thin) () -> @out T { +bb0: + %get = function_ref @getOut : $@convention(thin) <τ_0_0>() -> @out τ_0_0 + cond_br undef, bb1, bb2 + +bb1: + %get0 = apply %get() : $@convention(thin) <τ_0_0>() -> @out τ_0_0 + br bb3(%get0 : $T) + +bb2: + %get1 = apply %get() : $@convention(thin) <τ_0_0>() -> @out τ_0_0 + br bb3(%get1 : $T) + +// %15 +bb3(%15 : @owned $T): + return %15 : $T +} + +// One projection from incoming values. One interference. +// +// CHECK-LABEL: sil [ossa] @f020_testBBArgProjectOne : $@convention(thin) () -> @out T { +// CHECK: bb0(%0 : $*T): +// CHECK: [[ALLOC:%.*]] = alloc_stack $T +// CHECK: apply %{{.*}}(%0) : $@convention(thin) <τ_0_0> () -> @out τ_0_0 +// CHECK: apply %{{.*}}([[ALLOC]]) : $@convention(thin) <τ_0_0> () -> @out τ_0_0 +// CHECK: cond_br undef, bb2, bb1 +// CHECK: bb1: +// CHECK: destroy_addr %0 : $*T +// CHECK: copy_addr [take] %1 to [initialization] %0 : $*T +// CHECK: br bb3 +// CHECK: bb2: +// CHECK: destroy_addr %1 : $*T +// CHECK: br bb3 +// CHECK: bb3: +// CHECK: dealloc_stack [[ALLOC]] : $*T +// CHECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function 'f020_testBBArgProjectOne' +sil [ossa] @f020_testBBArgProjectOne : $@convention(thin) () -> @out T { +bb0: + %get = function_ref @getOut : $@convention(thin) <τ_0_0>() -> @out τ_0_0 + %get0 = apply %get() : $@convention(thin) <τ_0_0>() -> @out τ_0_0 + %get1 = apply %get() : $@convention(thin) <τ_0_0>() -> @out τ_0_0 + cond_br undef, bb2, bb1 + +bb1: + destroy_value %get0 : $T + br bb3(%get1 : $T) + +bb2: + destroy_value %get1 : $T + br bb3(%get0 : $T) + +bb3(%arg : @owned $T): + return %arg : $T +} + +// Projection from incoming values. No interference. +// CHECK-LABEL: sil [ossa] @f030_testBBArgProjectIn : $@convention(thin) (@in T, @in T) -> @out T { +// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T): +// CHECK: cond_br undef, bb2, bb1 +// CHECK: bb1: +// CHECK: destroy_addr %2 : $*T +// CHECK: copy_addr [take] %1 to [initialization] %0 : $*T +// CHECK: br bb3 +// CHECK: bb2: +// CHECK: destroy_addr %1 : $*T +// CHECK: copy_addr [take] %2 to [initialization] %0 : $*T +// CHECK: br bb3 +// CHECK: bb3: +// CHECK-LABEL: } // end sil function 'f030_testBBArgProjectIn' +sil [ossa] @f030_testBBArgProjectIn : $@convention(thin) (@in T, @in T) -> @out T { +bb0(%0 : @owned $T, %1 : @owned $T): + cond_br undef, bb1, bb2 + +bb1: + destroy_value %0 : $T + br bb3(%1 : $T) + +bb2: + destroy_value %1 : $T + br bb3(%0 : $T) + +bb3(%arg : @owned $T): + return %arg : $T +} + +// CHECK-LABEL: sil [ossa] @f040_testInSwapOut : $@convention(thin) (@in T, @in T) -> (@out T, @out T) { +// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T, %3 : $*T): +// CHECK: cond_br undef, bb2, bb1 +// CHECK: bb1: +// CHECK: copy_addr [take] %2 to [initialization] %1 : $*T +// CHECK: copy_addr [take] %3 to [initialization] %0 : $*T +// CHECK: br bb3 +// CHECK: bb2: +// CHECK: copy_addr [take] %2 to [initialization] %0 : $*T +// CHECK: copy_addr [take] %3 to [initialization] %1 : $*T +// CHECK: br bb3 +// CHECK-LABEL: } // end sil function 'f040_testInSwapOut' +sil [ossa] @f040_testInSwapOut : $@convention(thin) (@in T, @in T) -> (@out T, @out T) { +bb0(%0 : @owned $T, %1 : @owned $T): + cond_br undef, bb1, bb2 + +bb1: + br bb3(%0 : $T, %1 : $T) + +bb2: + br bb3(%1 : $T, %0 : $T) + +bb3(%arg0 : @owned $T, %arg1 : @owned $T): + %result = tuple (%arg0 : $T, %arg1 : $T) + return %result : $(T, T) +} + +// CHECK-LABEL: sil [ossa] @f050_testCombine : $@convention(thin) (@in T, @in T) -> (@out T, @out T) { +// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T, %3 : $*T): +// CHECK: cond_br undef, bb2, bb1 +// CHECK: bb1: +// CHECK: copy_addr [take] %2 to [initialization] %0 : $*T +// CHECK: copy_addr [take] %3 to [initialization] %1 : $*T +// CHECK: br bb3 +// CHECK: bb2: +// CHECK: copy_addr %2 to [initialization] %1 : $*T +// CHECK: destroy_addr %3 : $*T +// CHECK: copy_addr [take] %2 to [initialization] %0 : $*T +// CHECK: br bb3 +// CHECK: bb3: +// CHECK-LABEL: } // end sil function 'f050_testCombine' +sil [ossa] @f050_testCombine : $@convention(thin) (@in T, @in T) -> (@out T, @out T) { +bb0(%0 : @owned $T, %1 : @owned $T): + cond_br undef, bb2, bb1 + +bb1: + br bb3(%0 : $T, %1 : $T) + +bb2: + %copy = copy_value %0 : $T + destroy_value %1 : $T + br bb3(%0 : $T, %copy : $T) + +bb3(%arg0 : @owned $T, %arg1 : @owned $T): + %result = tuple (%arg0 : $T, %arg1 : $T) + return %result : $(T, T) +} + +// Test cyclic anti-dependence on phi copies. +// +// CHECK-LABEL: sil [ossa] @f060_testInoutSwap : $@convention(thin) (@inout T, @inout T) -> () { +// CHECK: bb0(%0 : $*T, %1 : $*T): +// CHECK: [[ALLOC0:%.*]] = alloc_stack $T +// CHECK: [[ALLOC1:%.*]] = alloc_stack $T +// CHECK: copy_addr [take] %0 to [initialization] [[ALLOC1]] : $*T +// CHECK: copy_addr [take] %1 to [initialization] [[ALLOC0]] : $*T +// CHECK: cond_br undef, bb2, bb1 +// CHECK: bb1: +// CHECK: [[TMP:%.*]] = alloc_stack $T +// CHECK: copy_addr [take] [[ALLOC0]] to [initialization] [[TMP]] : $*T +// CHECK: copy_addr [take] [[ALLOC1]] to [initialization] [[ALLOC0]] : $*T +// CHECK: copy_addr [take] [[TMP]] to [initialization] [[ALLOC1]] : $*T +// CHECK: dealloc_stack [[TMP]] : $*T +// CHECK: br bb3 +// CHECK: bb2: +// CHECK: br bb3 +// CHECK: bb3: +// CHECK: copy_addr [take] [[ALLOC0]] to [initialization] %0 : $*T +// CHECK: copy_addr [take] [[ALLOC1]] to [initialization] %1 : $*T +// CHECK: dealloc_stack [[ALLOC1]] : $*T +// CHECK: dealloc_stack [[ALLOC0]] : $*T +// CHECK-LABEL: } // end sil function 'f060_testInoutSwap' +sil [ossa] @f060_testInoutSwap : $@convention(thin) (@inout T, @inout T) -> () { +bb0(%0 : $*T, %1 : $*T): + %2 = load [take] %0 : $*T + %3 = load [take] %1 : $*T + cond_br undef, bb2, bb1 + +bb1: + br bb3(%2 : $T, %3 : $T) + +bb2: + br bb3(%3 : $T, %2 : $T) + +bb3(%phi0 : @owned $T, %phi1 : @owned $T): + store %phi0 to [init] %0 : $*T + store %phi1 to [init] %1 : $*T + %99 = tuple () + return %99 : $() +} + +// Test phi copies that project into their use. +// +// CHECK-LABEL: sil [ossa] @f070_testInoutFieldSwap : $@convention(thin) (@inout SRef, @inout SRef) -> () { +// CHECK: bb0(%0 : $*SRef, %1 : $*SRef): +// CHECK: [[ALLOCA:%.*]] = alloc_stack $SRef +// CHECK: [[ALLOCB:%.*]] = alloc_stack $SRef +// CHECK: [[ALLOCSA:%.*]] = alloc_stack $SRef +// CHECK: [[ALLOCSB:%.*]] = alloc_stack $SRef +// CHECK: copy_addr [take] %0 to [initialization] [[ALLOCA]] : $*SRef +// CHECK: copy_addr [take] %1 to [initialization] [[ALLOCB]] : $*SRef +// CHECK: cond_br undef, bb2, bb1 +// CHECK: bb1: +// CHECK: [[A1OADR:%.*]] = struct_element_addr [[ALLOCA]] : $*SRef, #SRef.object +// CHECK: [[A1O:%.*]] = load [take] [[A1OADR]] : $*AnyObject +// CHECK: [[A1EADR:%.*]] = struct_element_addr [[ALLOCA]] : $*SRef, #SRef.element +// CHECK: [[B1OADR:%.*]] = struct_element_addr [[ALLOCB]] : $*SRef, #SRef.object +// CHECK: [[B1O:%.*]] = load [take] [[B1OADR]] : $*AnyObject +// CHECK: [[B1EADR:%.*]] = struct_element_addr [[ALLOCB]] : $*SRef, #SRef.element +// CHECK: destroy_value [[B1O]] : $AnyObject +// CHECK: [[CP1:%.*]] = copy_value [[A1O]] : $AnyObject +// CHECK: [[SA1EADR:%.*]] = struct_element_addr [[ALLOCSA]] : $*SRef, #SRef.element +// CHECK: copy_addr [take] [[A1EADR]] to [initialization] [[SA1EADR]] : $*T +// CHECK: [[SB1EADR:%.*]] = struct_element_addr [[ALLOCSB]] : $*SRef, #SRef.element +// CHECK: copy_addr [take] [[B1EADR]] to [initialization] [[SB1EADR]] : $*T +// CHECK: br bb3([[A1O]] : $AnyObject, [[CP1]] : $AnyObject) +// CHECK: bb2: +// CHECK: [[A2OADR:%.*]] = struct_element_addr [[ALLOCA]] : $*SRef, #SRef.object +// CHECK: [[A2O:%.*]] = load [take] [[A2OADR]] : $*AnyObject +// CHECK: [[A2EADR:%.*]] = struct_element_addr [[ALLOCA]] : $*SRef, #SRef.element +// CHECK: [[B2OADR:%.*]] = struct_element_addr [[ALLOCB]] : $*SRef, #SRef.object +// CHECK: [[B2O:%.*]] = load [take] [[B2OADR]] : $*AnyObject +// CHECK: [[B2EADR:%.*]] = struct_element_addr [[ALLOCB]] : $*SRef, #SRef.element +// CHECK: destroy_value [[B2O]] : $AnyObject +// CHECK: [[CP2:%.*]] = copy_value [[A2O]] : $AnyObject +// CHECK: [[SB2EADR:%.*]] = struct_element_addr [[ALLOCSB]] : $*SRef, #SRef.element +// CHECK: copy_addr [take] [[A2EADR]] to [initialization] [[SB2EADR]] : $*T +// CHECK: [[SA2EADR:%.*]] = struct_element_addr [[ALLOCSA]] : $*SRef, #SRef.element +// CHECK: copy_addr [take] [[B2EADR]] to [initialization] [[SA2EADR]] : $*T +// CHECK: br bb3([[A2O]] : $AnyObject, [[CP2]] : $AnyObject) +// CHECK: bb3([[PHI0:%.*]] : @owned $AnyObject, [[PHI1:%.*]] : @owned $AnyObject): +// CHECK: [[SA3EADR:%.*]] = struct_element_addr [[ALLOCSA]] : $*SRef, #SRef.object +// CHECK: store [[PHI0]] to [init] [[SA3EADR]] : $*AnyObject +// CHECK: [[SA3EADR:%.*]] = struct_element_addr [[ALLOCSB]] : $*SRef, #SRef.object +// CHECK: store [[PHI1]] to [init] [[SA3EADR]] : $*AnyObject +// CHECK: copy_addr [take] [[ALLOCSA]] to [initialization] %0 : $*SRef +// CHECK: copy_addr [take] [[ALLOCSB]] to [initialization] %1 : $*SRef +// CHECK-LABEL: } // end sil function 'f070_testInoutFieldSwap' +sil [ossa] @f070_testInoutFieldSwap : $@convention(thin) (@inout SRef, @inout SRef) -> () { +bb0(%0 : $*SRef, %1 : $*SRef): + %la = load [take] %0 : $*SRef + %lb = load [take] %1 : $*SRef + cond_br undef, bb2, bb1 + +bb1: + (%da1o, %da1e) = destructure_struct %la : $SRef + (%db1o, %db1e) = destructure_struct %lb : $SRef + destroy_value %db1o : $AnyObject + %ca1o = copy_value %da1o : $AnyObject + br bb3(%da1o : $AnyObject, %ca1o : $AnyObject, %da1e : $T, %db1e : $T) + +bb2: + (%da2o, %da2e) = destructure_struct %la : $SRef + (%db2o, %db2e) = destructure_struct %lb : $SRef + destroy_value %db2o : $AnyObject + %ca2o = copy_value %da2o : $AnyObject + br bb3(%da2o : $AnyObject, %ca2o : $AnyObject, %db2e : $T, %da2e : $T) + +bb3(%phio0 : @owned $AnyObject, %phio1 : @owned $AnyObject, %phie0 : @owned $T, %phie1 : @owned $T): + %sa = struct $SRef (%phio0 : $AnyObject, %phie0 : $T) + %sb = struct $SRef (%phio1 : $AnyObject, %phie1 : $T) + store %sa to [init] %0 : $*SRef + store %sb to [init] %1 : $*SRef + %99 = tuple () + return %99 : $() +} + +// CHECK-LABEL: sil [ossa] @f080_testNestedComposeEnumPhi : $@convention(thin) (@in T, @in T, @owned AnyObject, @owned AnyObject) -> @out OuterEnum { +// CHECK: bb0(%0 : $*OuterEnum, %1 : $*T, %2 : $*T, %3 : @owned $AnyObject, %4 : @owned $AnyObject): +// CHECK: cond_br undef, bb2, bb1 +// CHECK: bb1: +// CHECK: destroy_addr %2 : $*T +// CHECK: [[TUPLE1:%.*]] = init_enum_data_addr [[INNER1:%.*]] : $*InnerEnum, #InnerEnum.payload!enumelt +// CHECK: [[TUPLE1_0:%.*]] = tuple_element_addr [[TUPLE1]] : $*(T, AnyObject), 0 +// CHECK: copy_addr [take] %1 to [initialization] [[TUPLE1_0]] : $*T +// CHECK: [[TUPLE1_1:%.*]] = tuple_element_addr [[TUPLE1]] : $*(T, AnyObject), 1 +// CHECK: store %3 to [init] [[TUPLE1_1]] : $*AnyObject +// CHECK: inject_enum_addr [[INNER1]] : $*InnerEnum, #InnerEnum.payload!enumelt +// CHECK: copy_addr [take] [[INNER1]] to [initialization] [[PHI6:%.*]] : $*InnerEnum +// CHECK: br bb6 +// CHECK: bb2: +// CHECK: cond_br undef, bb4, bb3 +// CHECK: bb3: +// CHECK: destroy_addr %1 : $*T +// CHECK: copy_addr [take] %2 to [initialization] [[PHI5:%.*]] : $*T +// CHECK: br bb5 +// CHECK: bb4: +// CHECK: destroy_addr %2 : $*T +// CHECK: copy_addr [take] %1 to [initialization] [[PHI5]] : $*T +// CHECK: br bb5 +// CHECK: bb5: +// CHECK: [[TUPLE5:%.*]] = init_enum_data_addr [[INNER5:%.*]] : $*InnerEnum, #InnerEnum.payload!enumelt +// CHECK: [[TUPLE5_0:%.*]] = tuple_element_addr [[TUPLE5]] : $*(T, AnyObject), 0 +// CHECK: copy_addr [take] [[PHI5]] to [initialization] [[TUPLE5_0]] : $*T +// CHECK: [[TUPLE5_1:%.*]] = tuple_element_addr [[TUPLE5]] : $*(T, AnyObject), 1 +// CHECK: store %3 to [init] [[TUPLE5_1]] : $*AnyObject +// CHECK: inject_enum_addr [[INNER5]] : $*InnerEnum, #InnerEnum.payload!enumelt +// CHECK: copy_addr [take] [[INNER5]] to [initialization] [[PHI6:%.*]] : $*InnerEnum +// CHECK: br bb6 +// CHECK: bb6: +// CHECK: [[TUPLE6:%.*]] = init_enum_data_addr %0 : $*OuterEnum, #OuterEnum.inner!enumelt +// CHECK: [[TUPLE6_0:%.*]] = tuple_element_addr [[TUPLE6]] : $*(InnerEnum, AnyObject), 0 +// CHECK: copy_addr [take] [[PHI6]] to [initialization] [[TUPLE6_0]] : $*InnerEnum +// CHECK: [[TUPLE6_1:%.*]] = tuple_element_addr [[TUPLE6]] : $*(InnerEnum, AnyObject), 1 +// CHECK: store %4 to [init] [[TUPLE6_1]] : $*AnyObject +// CHECK: inject_enum_addr %0 : $*OuterEnum, #OuterEnum.inner!enumelt +// CHECK-LABEL: } // end sil function 'f080_testNestedComposeEnumPhi' +sil [ossa] @f080_testNestedComposeEnumPhi : $@convention(thin) (@in T, @in T, @owned AnyObject, @owned AnyObject) -> @out OuterEnum { +bb0(%0 : @owned $T, %1 : @owned $T, %2 : @owned $AnyObject, %3 : @owned $AnyObject): + cond_br undef, bb2, bb1 +bb1: + destroy_value %1 : $T + %tuple1 = tuple (%0 : $T, %2 : $AnyObject) + %inner1 = enum $InnerEnum, #InnerEnum.payload, %tuple1 : $(T, AnyObject) + br bb6(%inner1 : $InnerEnum) +bb2: + cond_br undef, bb4, bb3 +bb3: + destroy_value %0 : $T + br bb5(%1 : $T) +bb4: + destroy_value %1 : $T + br bb5(%0 : $T) +bb5(%phi5 : @owned $T): + %tuple5 = tuple (%phi5 : $T, %2 : $AnyObject) + %inner5 = enum $InnerEnum, #InnerEnum.payload, %tuple5 : $(T, AnyObject) + br bb6(%inner5 : $InnerEnum) +bb6(%phi6 : @owned $InnerEnum): + %tuple6 = tuple (%phi6 : $InnerEnum, %3 : $AnyObject) + %outer = enum $OuterEnum, #OuterEnum.inner, %tuple6 : $(InnerEnum, AnyObject) + return %outer : $OuterEnum +} + +// CHECK-LABEL: sil [ossa] @f080_testNestedComposeStructWithPhi : $@convention(thin) (@in T, @in T, @owned AnyObject, @owned AnyObject) -> @out OuterStruct { +// CHECK: bb0(%0 : $*OuterStruct, %1 : $*T, %2 : $*T, %3 : @owned $AnyObject, %4 : @owned $AnyObject): +// CHECK-NOT: alloc +// CHECK: cond_br undef, bb2, bb1 +// CHECK: bb1: +// CHECK: destroy_addr %2 : $*T +// CHECK: [[INNER1:%.*]] = struct_element_addr %0 : $*OuterStruct, #OuterStruct.inner +// CHECK: [[T2:%.*]] = struct_element_addr [[INNER1]] : $*InnerStruct, #InnerStruct.t +// CHECK: copy_addr [take] %1 to [initialization] [[T2]] : $*T +// CHECK: [[O2:%.*]] = struct_element_addr [[INNER1]] : $*InnerStruct, #InnerStruct.object +// CHECK: store %3 to [init] [[O2]] : $*AnyObject +// CHECK: br bb6 +// CHECK: bb2: +// CHECK: cond_br undef, bb4, bb3 +// CHECK: bb3: +// CHECK: destroy_addr %1 : $*T +// CHECK: [[INNER3:%.*]] = struct_element_addr %0 : $*OuterStruct, #OuterStruct.inner +// CHECK: [[T3:%.*]] = struct_element_addr [[INNER3]] : $*InnerStruct, #InnerStruct.t +// CHECK: copy_addr [take] %2 to [initialization] [[T3]] : $*T +// CHECK: br bb5 +// CHECK: bb4: +// CHECK: destroy_addr %2 : $*T +// CHECK: [[INNER4:%.*]] = struct_element_addr %0 : $*OuterStruct, #OuterStruct.inner +// CHECK: [[T4:%.*]] = struct_element_addr [[INNER4]] : $*InnerStruct, #InnerStruct.t +// CHECK: copy_addr [take] %1 to [initialization] [[T4]] : $*T +// CHECK: br bb5 +// CHECK: bb5: +// CHECK: [[INNER5:%.*]] = struct_element_addr %0 : $*OuterStruct, #OuterStruct.inner +// CHECK: [[O5:%.*]] = struct_element_addr [[INNER5]] : $*InnerStruct, #InnerStruct.object +// CHECK: store %3 to [init] [[O5]] : $*AnyObject +// CHECK: br bb6 +// CHECK: bb6: +// CHECK: [[O6:%.*]] = struct_element_addr %0 : $*OuterStruct, #OuterStruct.object +// CHECK: store %4 to [init] [[O6]] : $*AnyObject +// CHECK-NOT: dealloc +// CHECK-LABEL: } // end sil function 'f080_testNestedComposeStructWithPhi' +sil [ossa] @f080_testNestedComposeStructWithPhi : $@convention(thin) (@in T, @in T, @owned AnyObject, @owned AnyObject) -> @out OuterStruct { +bb0(%0 : @owned $T, %1 : @owned $T, %2 : @owned $AnyObject, %3 : @owned $AnyObject): + cond_br undef, bb2, bb1 +bb1: + destroy_value %1 : $T + %inner2 = struct $InnerStruct (%0 : $T, %2 : $AnyObject) + br bb6(%inner2 : $InnerStruct) +bb2: + cond_br undef, bb4, bb3 +bb3: + destroy_value %0 : $T + br bb5(%1 : $T) +bb4: + destroy_value %1 : $T + br bb5(%0 : $T) +bb5(%phi5 : @owned $T): + %inner5 = struct $InnerStruct (%phi5 : $T, %2 : $AnyObject) + br bb6(%inner5 : $InnerStruct) +bb6(%phi6 : @owned $InnerStruct): + %outer = struct $OuterStruct (%phi6 : $InnerStruct, %3 : $AnyObject) + return %outer : $OuterStruct +} diff --git a/test/SILOptimizer/address_projection.sil b/test/SILOptimizer/address_projection.sil deleted file mode 100644 index 9cd4240929a52..0000000000000 --- a/test/SILOptimizer/address_projection.sil +++ /dev/null @@ -1,444 +0,0 @@ -// RUN: %target-sil-opt -address-lowering -enable-sil-opaque-values -optimize-opaque-address-lowering -emit-sorted-sil %s | %FileCheck %s - -import Builtin - -sil_stage canonical -// CHECK: sil_stage lowered - -typealias AnyObject = Builtin.AnyObject -typealias Int = Builtin.Int64 - -// CHECK-LABEL: sil hidden @f010_addrlower_identity : $@convention(thin) (@in T) -> @out T { -// CHECK: bb0(%0 : $*T, %1 : $*T): -// CHECK: copy_addr [take] %1 to [initialization] %0 : $*T -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function 'f010_addrlower_identity' -sil hidden @f010_addrlower_identity : $@convention(thin) (@in T) -> @out T { -bb0(%0 : $T): - return %0 : $T -} - -sil hidden [noinline] @f020_multiResult : $@convention(thin) (@in T) -> (@out T, @out T, @out T) { -bb0(%0 : $T): - %2 = copy_value %0 : $T - %3 = copy_value %0 : $T - %4 = copy_value %0 : $T - destroy_value %0 : $T - %6 = tuple (%2 : $T, %3 : $T, %4 : $T) - return %6 : $(T, T, T) -} - -// Test returning an opaque tuple of tuples as a concrete tuple. -// The multiResult call is specialized, but the SIL result convention does not change. -// --- -// CHECK-LABEL: sil @f021_callMultiResult : $@convention(thin) (Builtin.Int64) -> (Builtin.Int64, Builtin.Int64, Builtin.Int64) { -// CHECK: bb0(%0 : $Builtin.Int64): -// CHECK: %[[FN:.*]] = function_ref @f020_multiResult : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) -// CHECK: %[[IN:.*]] = alloc_stack $Builtin.Int64 -// CHECK: store %0 to %[[IN]] : $*Builtin.Int64 -// CHECK: %[[OUT1:.*]] = alloc_stack $Builtin.Int64 -// CHECK: %[[OUT2:.*]] = alloc_stack $Builtin.Int64 -// CHECK: %[[OUT3:.*]] = alloc_stack $Builtin.Int64 -// CHECK: %{{.*}} = apply %[[FN]](%[[OUT1]], %[[OUT2]], %[[OUT3]], %[[IN]]) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) -// CHECK: %[[R3:.*]] = load %[[OUT3]] : $*Builtin.Int64 -// CHECK: dealloc_stack %[[OUT3]] : $*Builtin.Int64 -// CHECK: %[[R2:.*]] = load %[[OUT2]] : $*Builtin.Int64 -// CHECK: dealloc_stack %[[OUT2]] : $*Builtin.Int64 -// CHECK: %[[R1:.*]] = load %[[OUT1]] : $*Builtin.Int64 -// CHECK: dealloc_stack %[[OUT1]] : $*Builtin.Int64 -// CHECK: dealloc_stack %[[IN]] : $*Builtin.Int64 -// CHECK: %[[R:.*]] = tuple (%[[R1]] : $Builtin.Int64, %[[R2]] : $Builtin.Int64, %[[R3]] : $Builtin.Int64) -// CHECK: return %[[R]] : $(Builtin.Int64, Builtin.Int64, Builtin.Int64) -// CHECK-LABEL: } // end sil function 'f021_callMultiResult' -sil @f021_callMultiResult : $@convention(thin) (Int) -> (Int, Int, Int) { -bb0(%0 : $Int): - %1 = function_ref @f020_multiResult : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) - %2 = apply %1(%0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) - %3 = tuple_extract %2 : $(Int, Int, Int), 0 - %4 = tuple_extract %2 : $(Int, Int, Int), 1 - %5 = tuple_extract %2 : $(Int, Int, Int), 2 - %6 = tuple (%3 : $Int, %4 : $Int, %5 : $Int) - return %6 : $(Int, Int, Int) -} - -// CHECK-LABEL: sil @f030_returnPair : $@convention(thin) (@in T) -> (@out T, @out T) { -// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T): -// CHECK: copy_addr %2 to [initialization] %0 : $*T -// CHECK: copy_addr [take] %2 to [initialization] %1 : $*T -// CHECK: %[[R:.*]] = tuple () -// CHECK: return %[[R]] : $() -// CHECK-LABEL: } // end sil function 'f030_returnPair' -sil @f030_returnPair : $@convention(thin) (@in T) -> (@out T, @out T) { -bb0(%0 : $T): - %2 = copy_value %0 : $T - %3 = tuple (%2 : $T, %0 : $T) - return %3 : $(T, T) -} - -// CHECK-LABEL: sil @f031_unusedIndirect : $@convention(thin) (@in T) -> @out T { -// CHECK: bb0(%0 : $*T, %1 : $*T): -// CHECK: %[[LOC0:.*]] = alloc_stack $T -// CHECK: %[[OUT1:.*]] = alloc_stack $T -// CHECK: %[[LOC1:.*]] = alloc_stack $T -// CHECK: %[[OUT2:.*]] = alloc_stack $T -// CHECK: // function_ref f030_returnPair -// CHECK: %[[F:.*]] = function_ref @f030_returnPair : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0) -// CHECK: copy_addr %1 to [initialization] %[[LOC0]] : $*T -// CHECK: %[[R0:.*]] = apply %[[F]](%[[OUT1]], %[[OUT2]], %[[LOC0]]) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0) -// CHECK: copy_addr %[[OUT1]] to [initialization] %[[LOC1]] : $*T -// CHECK: copy_addr %[[OUT2]] to [initialization] %0 : $*T -// CHECK: destroy_addr %[[OUT1]] : $*T -// CHECK: destroy_addr %[[OUT2]] : $*T -// CHECK: destroy_addr %[[LOC1]] : $*T -// CHECK: destroy_addr %1 : $*T -// CHECK: %[[R:.*]] = tuple () -// CHECK: dealloc_stack %[[OUT2]] : $*T -// CHECK: dealloc_stack %[[LOC1]] : $*T -// CHECK: dealloc_stack %[[OUT1]] : $*T -// CHECK: dealloc_stack %[[LOC0]] : $*T -// CHECK: return %[[R]] : $() -// CHECK-LABEL: } // end sil function 'f031_unusedIndirect' -sil @f031_unusedIndirect : $@convention(thin) (@in T) -> @out T { -bb0(%0 : $T): - %2 = function_ref @f030_returnPair : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0) - %3 = copy_value %0 : $T - %4 = apply %2(%3) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0) - %5 = tuple_extract %4 : $(T, T), 0 - %6 = copy_value %5 : $T - %7 = tuple_extract %4 : $(T, T), 1 - %8 = copy_value %7 : $T - destroy_value %4 : $(T, T) - destroy_value %6 : $T - destroy_value %0 : $T - return %8 : $T -} - -sil hidden @f040_consumeArg : $@convention(thin) (@in T) -> () { -bb0(%0 : $T): - destroy_value %0 : $T - %3 = tuple () - return %3 : $() -} - -// CHECK-LABEL: sil @f041_opaqueArg : $@convention(thin) (@in T) -> () { -// CHECK: bb0(%0 : $*T): -// CHECK: %[[LOC:.*]] = alloc_stack $T -// CHECK: %[[FN:.*]] = function_ref @f040_consumeArg : $@convention(thin) <τ_0_0> (@in τ_0_0) -> () -// CHECK: copy_addr %0 to [initialization] %[[LOC]] : $*T -// CHECK: %{{.*}} = apply %[[FN]](%[[LOC]]) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> () -// CHECK: destroy_addr %0 : $*T -// CHECK: %[[R:.*]] = tuple () -// CHECK: dealloc_stack %[[LOC]] : $*T -// CHECK: return %[[R]] : $() -// CHECK-LABEL: } // end sil function 'f041_opaqueArg' -sil @f041_opaqueArg : $@convention(thin) (@in T) -> () { -bb0(%0 : $T): - %2 = function_ref @f040_consumeArg : $@convention(thin) <τ_0_0> (@in τ_0_0) -> () - %3 = copy_value %0 : $T - %4 = apply %2(%3) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> () - destroy_value %0 : $T - %6 = tuple () - return %6 : $() -} - -// CHECK-LABEL: sil @f050_storeinout : $@convention(thin) (@inout T, @inout T, @in T) -> () { -// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T): -// CHECK: %[[PREV1:.*]] = alloc_stack $T -// CHECK: %[[PREV2:.*]] = alloc_stack $T -// CHECK: debug_value %0 : $*T, var, name "t", argno 1, expr op_deref -// CHECK: debug_value %1 : $*T, var, name "u", argno 2, expr op_deref -// CHECK: debug_value %2 : $*T, {{.*}} expr op_deref -// CHECK: copy_addr [take] %0 to [initialization] %[[PREV1]] : $*T -// CHECK: copy_addr %2 to [initialization] %0 : $*T -// CHECK: destroy_addr %[[PREV1]] : $*T -// CHECK: copy_addr [take] %1 to [initialization] %[[PREV2]] : $*T -// CHECK: copy_addr %2 to [initialization] %1 : $*T -// CHECK: destroy_addr %[[PREV2]] : $*T -// CHECK: destroy_addr %2 : $*T -// CHECK: %[[R:.*]] = tuple () -// CHECK: dealloc_stack %[[PREV2]] : $*T -// CHECK: dealloc_stack %[[PREV1]] : $*T -// CHECK: return %[[R]] : $() -// CHECK-LABEL: } // end sil function 'f050_storeinout' -sil @f050_storeinout : $@convention(thin) (@inout T, @inout T, @in T) -> () { -bb0(%0 : $*T, %1 : $*T, %2 : $T): - debug_value %0 : $*T, var, name "t", argno 1, expr op_deref - debug_value %1 : $*T, var, name "u", argno 2, expr op_deref - debug_value %2 : $T, let, name "x", argno 3 - %6 = copy_value %2 : $T - %7 = load %0 : $*T - store %6 to %0 : $*T - destroy_value %7 : $T - %10 = copy_value %2 : $T - %11 = load %1 : $*T - store %10 to %1 : $*T - destroy_value %11 : $T - destroy_value %2 : $T - %15 = tuple () - return %15 : $() -} - -sil hidden @f060_mutate : $@convention(thin) (@inout T, @in T) -> () { -bb0(%0 : $*T, %1 : $T): - %4 = copy_value %1 : $T - %5 = load %0 : $*T - store %4 to %0 : $*T - destroy_value %5 : $T - destroy_value %1 : $T - %9 = tuple () - return %9 : $() -} - -// CHECK-LABEL: sil @f061_callinout : $@convention(thin) (@in T) -> () { -// CHECK: bb0(%0 : $*T): -// CHECK: %[[LOC2:.*]] = alloc_stack $T -// CHECK: %[[LOC1:.*]] = alloc_stack $T -// CHECK: copy_addr %0 to [initialization] %[[LOC1]] : $*T -// CHECK: %[[FN:.*]] = function_ref @f060_mutate : $@convention(thin) <τ_0_0> (@inout τ_0_0, @in τ_0_0) -> () -// CHECK: copy_addr %0 to [initialization] %[[LOC2]] : $*T -// CHECK: %{{.*}} = apply %[[FN]](%[[LOC1]], %[[LOC2]]) : $@convention(thin) <τ_0_0> (@inout τ_0_0, @in τ_0_0) -> () -// CHECK: destroy_addr %[[LOC1]] : $*T -// CHECK: destroy_addr %0 : $*T -// CHECK: %[[R:.*]] = tuple () -// CHECK: dealloc_stack %[[LOC1]] : $*T -// CHECK: dealloc_stack %[[LOC2]] : $*T -// CHECK: return %[[R]] : $() -// CHECK-LABEL: } // end sil function 'f061_callinout' -sil @f061_callinout : $@convention(thin) (@in T) -> () { -bb0(%0 : $T): - %1 = alloc_stack $T, var, name "u" - %3 = copy_value %0 : $T - store %3 to %1 : $*T - %5 = function_ref @f060_mutate : $@convention(thin) <τ_0_0> (@inout τ_0_0, @in τ_0_0) -> () - %6 = copy_value %0 : $T - %7 = apply %5(%1, %6) : $@convention(thin) <τ_0_0> (@inout τ_0_0, @in τ_0_0) -> () - destroy_addr %1 : $*T - destroy_value %0 : $T - %10 = tuple () - dealloc_stack %1 : $*T - return %10 : $() -} - -public protocol C : class {} - -// CHECK-LABEL: sil @f070_mixedResult1 : $@convention(thin) (@in T, @owned C) -> (@out T, @owned C) { -// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $C): -// CHECK: copy_addr [take] %1 to [initialization] %0 : $*T -// CHECK: return %2 : $C -// CHECK-LABEL: } // end sil function 'f070_mixedResult1' -sil @f070_mixedResult1 : $@convention(thin) (@in T, @owned C) -> (@out T, @owned C) { -bb0(%0 : $T, %1 : $C): - %4 = tuple (%0 : $T, %1 : $C) - return %4 : $(T, C) -} - -// CHECK-LABEL: sil @f071_mixedResult2 : $@convention(thin) (@in T, @owned C) -> (@out T, @out T, @owned C, @owned C) { -// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T, %3 : $C): -// CHECK: copy_addr %2 to [initialization] %0 : $*T -// CHECK: strong_retain %3 : $C -// CHECK: copy_addr [take] %2 to [initialization] %1 : $*T -// CHECK: %[[T:.*]] = tuple (%3 : $C, %3 : $C) -// CHECK: return %[[T]] : $(C, C) -// CHECK-LABEL: } // end sil function 'f071_mixedResult2' -sil @f071_mixedResult2 : $@convention(thin) (@in T, @owned C) -> (@out T, @out T, @owned C, @owned C) { -bb0(%0 : $T, %1 : $C): - %4 = copy_value %0 : $T - strong_retain %1 : $C - %6 = tuple (%4 : $T, %0 : $T, %1 : $C, %1 : $C) - return %6 : $(T, T, C, C) -} - -// CHECK-LABEL: sil @f072_callMixedResult1 : $@convention(thin) (@in T, @owned C) -> (@out T, @owned C) { -// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $C): -// CHECK: %[[LIN:.*]] = alloc_stack $T -// CHECK: %[[OUT:.*]] = alloc_stack $T -// CHECK: // function_ref f070_mixedResult1 -// CHECK: %[[F:.*]] = function_ref @f070_mixedResult1 : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @owned C) -// CHECK: copy_addr %1 to [initialization] %[[LIN]] : $*T -// CHECK: strong_retain %2 : $C -// CHECK: %[[R:.*]] = apply %[[F]](%[[OUT]], %[[LIN]], %2) : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @owned C) -// CHECK: copy_addr %[[OUT]] to [initialization] %0 : $*T -// CHECK: strong_retain %[[R]] : $C -// CHECK: destroy_addr %[[OUT]] : $*T -// CHECK: strong_release %[[R]] : $C -// CHECK: strong_release %2 : $C -// CHECK: destroy_addr %1 : $*T -// CHECK: dealloc_stack %[[OUT]] : $*T -// CHECK: dealloc_stack %[[LIN]] : $*T -// CHECK: return %[[R]] : $C -// CHECK-LABEL: } // end sil function 'f072_callMixedResult1' -sil @f072_callMixedResult1 : $@convention(thin) (@in T, @owned C) -> (@out T, @owned C) { -bb0(%0 : $T, %1 : $C): - %4 = function_ref @f070_mixedResult1 : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @owned C) - %5 = copy_value %0 : $T - strong_retain %1 : $C - %7 = apply %4(%5, %1) : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @owned C) - %8 = tuple_extract %7 : $(T, C), 0 - %9 = copy_value %8 : $T - %10 = tuple_extract %7 : $(T, C), 1 - strong_retain %10 : $C - destroy_value %7 : $(T, C) - strong_release %1 : $C - destroy_value %0 : $T - %15 = tuple (%9 : $T, %10 : $C) - return %15 : $(T, C) -} - -// CHECK-LABEL: sil @f073_callMixedResult2 : $@convention(thin) (@in T, @owned C) -> (@out T, @out T, @owned C, @owned C) { -// CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T, %3 : $C): -// CHECK: %[[LOC0:.*]] = alloc_stack $T -// CHECK: %[[OUT1:.*]] = alloc_stack $T -// CHECK: %[[OUT2:.*]] = alloc_stack $T -// CHECK: %[[F:.*]] = function_ref @f071_mixedResult2 : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @out τ_0_0, @owned C, @owned C) -// CHECK: copy_addr %2 to [initialization] %[[LOC0]] : $*T -// CHECK: strong_retain %3 : $C -// CHECK: %[[R:.*]] = apply %[[F]](%[[OUT1]], %[[OUT2]], %[[LOC0]], %3) : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @out τ_0_0, @owned C, @owned C) -// CHECK: %[[T2:.*]] = tuple_extract %[[R]] : $(C, C), 1 -// CHECK: %[[T1:.*]] = tuple_extract %[[R]] : $(C, C), 0 -// CHECK: copy_addr %[[OUT1]] to [initialization] %0 : $*T -// CHECK: copy_addr %[[OUT2]] to [initialization] %1 : $*T -// CHECK: strong_retain %[[T1]] : $C -// CHECK: strong_retain %[[T2]] : $C -// CHECK: destroy_addr %[[OUT1]] : $*T -// CHECK: destroy_addr %[[OUT2]] : $*T -// CHECK: strong_release %[[T1]] : $C -// CHECK: strong_release %[[T2]] : $C -// CHECK: strong_release %3 : $C -// CHECK: destroy_addr %2 : $*T -// CHECK: %[[T:.*]] = tuple (%[[T1]] : $C, %[[T2]] : $C) -// CHECK: dealloc_stack %[[OUT2]] : $*T -// CHECK: dealloc_stack %[[OUT1]] : $*T -// CHECK: dealloc_stack %[[LOC0]] : $*T -// CHECK: return %[[T]] : $(C, C) -// CHECK-LABEL: } // end sil function 'f073_callMixedResult2' -sil @f073_callMixedResult2 : $@convention(thin) (@in T, @owned C) -> (@out T, @out T, @owned C, @owned C) { -bb0(%0 : $T, %1 : $C): - %4 = function_ref @f071_mixedResult2 : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @out τ_0_0, @owned C, @owned C) - %5 = copy_value %0 : $T - strong_retain %1 : $C - %7 = apply %4(%5, %1) : $@convention(thin) <τ_0_0> (@in τ_0_0, @owned C) -> (@out τ_0_0, @out τ_0_0, @owned C, @owned C) - %8 = tuple_extract %7 : $(T, T, C, C), 0 - %9 = copy_value %8 : $T - %10 = tuple_extract %7 : $(T, T, C, C), 1 - %11 = copy_value %10 : $T - %12 = tuple_extract %7 : $(T, T, C, C), 2 - strong_retain %12 : $C - %14 = tuple_extract %7 : $(T, T, C, C), 3 - strong_retain %14 : $C - destroy_value %7 : $(T, T, C, C) - strong_release %1 : $C - destroy_value %0 : $T - %19 = tuple (%9 : $T, %11 : $T, %12 : $C, %14 : $C) - return %19 : $(T, T, C, C) -} - -sil_default_witness_table C {} - -enum Optional { - case none - case some(T) -} - -// CHECK-LABEL: sil @f080_optional : $@convention(thin) (@in T) -> @out Optional { -// CHECK: bb0(%0 : $*Optional, %1 : $*T): -// CHECK: %[[DATA:.*]] = init_enum_data_addr %0 : $*Optional, #Optional.some!enumelt -// CHECK: copy_addr %1 to [initialization] %[[DATA]] : $*T -// CHECK: inject_enum_addr %0 : $*Optional, #Optional.some!enumelt -// CHECK: destroy_addr %1 : $*T -// CHECK: %[[T:.*]] = tuple () -// CHECK: return %[[T]] : $() -// CHECK-LABEL: } // end sil function 'f080_optional' -sil @f080_optional : $@convention(thin) (@in T) -> @out Optional { -bb0(%0 : $T): - %cpy = copy_value %0 : $T - %opt = enum $Optional, #Optional.some!enumelt, %cpy : $T - destroy_value %0 : $T - return %opt : $Optional -} - -// CHECK-LABEL: sil @f090_tupletuple : $@convention(thin) ((Builtin.Int64, Builtin.Int64), Builtin.Int64) -> (@out (Builtin.Int64, Builtin.Int64), @out (Builtin.Int64, Builtin.Int64), Builtin.Int64, Builtin.Int64) { -// CHECK: bb0(%0 : $*(Builtin.Int64, Builtin.Int64), %1 : $*(Builtin.Int64, Builtin.Int64), %2 : $(Builtin.Int64, Builtin.Int64), %3 : $Builtin.Int64): -// CHECK: store %2 to %0 : $*(Builtin.Int64, Builtin.Int64) -// CHECK: store %2 to %1 : $*(Builtin.Int64, Builtin.Int64) -// CHECK: %[[T:.*]] = tuple (%3 : $Builtin.Int64, %3 : $Builtin.Int64) -// CHECK: return %[[T]] : $(Builtin.Int64, Builtin.Int64) -// CHECK-LABEL: } // end sil function 'f090_tupletuple' -sil @f090_tupletuple : $@convention(thin) ((Int, Int), Int) -> (@out (Int, Int), @out (Int, Int), Int, Int) { -bb0(%0 : $(Int, Int), %1 : $Int): - %2 = tuple (%0 : $(Int, Int), %0 : $(Int, Int), %1 : $Int, %1 : $Int) - return %2 : $((Int, Int), (Int, Int), Int, Int) -} - -// CHECK-LABEL: sil @f091_callTuple : $@convention(thin) (Builtin.Int64) -> (Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64) { -// CHECK: bb0(%0 : $Builtin.Int64): -// CHECK: %[[T1:.*]] = tuple (%0 : $Builtin.Int64, %0 : $Builtin.Int64) -// CHECK: %[[F:.*]] = function_ref @f090_tupletuple : $@convention(thin) ((Builtin.Int64, Builtin.Int64), Builtin.Int64) -> (@out (Builtin.Int64, Builtin.Int64), @out (Builtin.Int64, Builtin.Int64), Builtin.Int64, Builtin.Int64) -// CHECK: %[[O1:.*]] = alloc_stack $(Builtin.Int64, Builtin.Int64) -// CHECK: %[[O2:.*]] = alloc_stack $(Builtin.Int64, Builtin.Int64) -// CHECK: %[[RT:.*]] = apply %[[F]](%[[O1]], %4, %1, %0) : $@convention(thin) ((Builtin.Int64, Builtin.Int64), Builtin.Int64) -> (@out (Builtin.Int64, Builtin.Int64), @out (Builtin.Int64, Builtin.Int64), Builtin.Int64, Builtin.Int64) -// CHECK: %[[R1:.*]] = tuple_extract %[[RT]] : $(Builtin.Int64, Builtin.Int64), 1 -// CHECK: %[[R0:.*]] = tuple_extract %[[RT]] : $(Builtin.Int64, Builtin.Int64), 0 -// CHECK: %[[L2:.*]] = load %[[O2]] : $*(Builtin.Int64, Builtin.Int64) -// CHECK: dealloc_stack %[[O2]] : $*(Builtin.Int64, Builtin.Int64) -// CHECK: %[[L1:.*]] = load %[[O1]] : $*(Builtin.Int64, Builtin.Int64) -// CHECK: dealloc_stack %[[O1]] : $*(Builtin.Int64, Builtin.Int64) -// CHECK: %[[E10:.*]] = tuple_extract %[[L1]] : $(Builtin.Int64, Builtin.Int64), 0 -// CHECK: %[[E11:.*]] = tuple_extract %[[L1]] : $(Builtin.Int64, Builtin.Int64), 1 -// CHECK: %[[E20:.*]] = tuple_extract %[[L2]] : $(Builtin.Int64, Builtin.Int64), 0 -// CHECK: %[[E21:.*]] = tuple_extract %[[L2]] : $(Builtin.Int64, Builtin.Int64), 1 -// CHECK: %[[RET:.*]] = tuple (%[[E10]] : $Builtin.Int64, %[[E11]] : $Builtin.Int64, %[[E20]] : $Builtin.Int64, %[[E21]] : $Builtin.Int64, %[[R0]] : $Builtin.Int64, %[[R1]] : $Builtin.Int64) -// CHECK: return %[[RET]] : $(Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64, Builtin.Int64) -// CHECK-LABEL: } // end sil function 'f091_callTuple' -sil @f091_callTuple : $@convention(thin) (Int) -> (Int, Int, Int, Int, Int, Int) { -bb0(%0: $Int): - %1 = tuple (%0 : $Int, %0 : $Int) - %2 = function_ref @f090_tupletuple : $@convention(thin) ((Int, Int), Int) -> (@out (Int, Int), @out (Int, Int), Int, Int) - %3 = apply %2(%1, %0) : $@convention(thin) ((Int, Int), Int) -> (@out (Int, Int), @out (Int, Int), Int, Int) - %9 = tuple_extract %3 : $((Int, Int), (Int, Int), Int, Int), 0 - %10 = tuple_extract %3 : $((Int, Int), (Int, Int), Int, Int), 1 - %11 = tuple_extract %3 : $((Int, Int), (Int, Int), Int, Int), 2 - %12 = tuple_extract %3 : $((Int, Int), (Int, Int), Int, Int), 3 - %13 = tuple_extract %9 : $(Int, Int), 0 - %14 = tuple_extract %9 : $(Int, Int), 1 - %15 = tuple_extract %10 : $(Int, Int), 0 - %16 = tuple_extract %10 : $(Int, Int), 1 - %17 = tuple (%13 : $Int, %14 : $Int, %15 : $Int, %16 : $Int, %11 : $Int, %12 : $Int) - return %17 : $(Int, Int, Int, Int, Int, Int) -} - -// CHECK-LABEL: sil hidden @f100_any : $@convention(thin) (@in Any) -> () { -// CHECK: bb0(%0 : $*Any): -// CHECK: destroy_addr %0 : $*Any -// CHECK: %[[T:.*]] = tuple () -// CHECK: return %[[T]] : $() -// CHECK-LABEL: } // end sil function 'f100_any' -sil hidden @f100_any : $@convention(thin) (@in Any) -> () { -bb0(%0 : $Any): - debug_value %0 : $Any, let, name "any", argno 1 - destroy_value %0 : $Any - %3 = tuple () - return %3 : $() -} - -// CHECK-LABEL: sil @f101_passAny : $@convention(thin) (@in T) -> () { -// CHECK: bb0(%0 : $*T): -// CHECK: %[[A:.*]] = alloc_stack $Any -// CHECK: %[[F:.*]] = function_ref @f100_any : $@convention(thin) (@in Any) -> () -// CHECK: %[[T:.*]] = init_existential_addr %[[A]] : $*Any, $T -// CHECK: copy_addr %0 to [initialization] %[[T]] : $*T -// CHECK: %{{.*}} = apply %[[F]](%[[A]]) : $@convention(thin) (@in Any) -> () -// CHECK: destroy_addr %0 : $*T -// CHECK: %[[R:.*]] = tuple () -// CHECK: dealloc_stack %[[A]] : $*Any -// CHECK: return %[[R]] : $() -// CHECK-LABEL: } // end sil function 'f101_passAny' -sil @f101_passAny : $@convention(thin) (@in T) -> () { -bb0(%0 : $T): - %2 = function_ref @f100_any : $@convention(thin) (@in Any) -> () - %3 = copy_value %0 : $T - %4 = init_existential_value %3 : $T, $T, $Any - %5 = apply %2(%4) : $@convention(thin) (@in Any) -> () - destroy_value %0 : $T - %7 = tuple () - return %7 : $() -} diff --git a/test/SILOptimizer/copy_propagation_opaque.sil b/test/SILOptimizer/copy_propagation_opaque.sil index 25e184ff83ba2..8fda26d12dfcf 100644 --- a/test/SILOptimizer/copy_propagation_opaque.sil +++ b/test/SILOptimizer/copy_propagation_opaque.sil @@ -12,7 +12,7 @@ // // REQUIRES: asserts -sil_stage canonical +sil_stage raw import Builtin import Swift diff --git a/test/SILOptimizer/opaque_values_mandatory.sil b/test/SILOptimizer/opaque_values_mandatory.sil index 91a998b4211e7..7a5128afc550f 100644 --- a/test/SILOptimizer/opaque_values_mandatory.sil +++ b/test/SILOptimizer/opaque_values_mandatory.sil @@ -4,6 +4,9 @@ // RUN: -enable-ossa-modules -enable-copy-propagation \ // RUN: -enable-lexical-borrow-scopes | \ // RUN: %FileCheck %s +// +// These tests assume that opaque values are not lowered until OSSA lowering. +// REQUIRES: enable_opaque_values import Builtin @@ -33,7 +36,7 @@ bb0(%0 : @owned $T): // CHECK: destroy_value %1 : $T // CHECK: return %{{.*}} : $() // CHECK-LABEL: } // end sil function 'f020_assign_inout' -sil hidden [ossa] @f020_assign_inout : $@convention(thin) (@inout T, @in T) -> () { +sil [ossa] @f020_assign_inout : $@convention(thin) (@inout T, @in T) -> () { bb0(%0 : $*T, %1 : @owned $T): %2 = copy_value %1 : $T assign %2 to %0 : $*T @@ -51,20 +54,18 @@ bb0(%0 : $*T, %1 : @owned $T): // Note: the tuple construction is simplified away. // CHECK: return %2 : $(Builtin.Int64, Builtin.Int64, Builtin.Int64) // CHECK-LABEL: } // end sil function 'f030_callMultiResult' -sil @f030_callMultiResult : $@convention(thin) (Int) -> (Int, Int, Int) { +sil [ossa] @f030_callMultiResult : $@convention(thin) (Int) -> (Int, Int, Int) { bb0(%0 : $Int): %1 = function_ref @f040_multiResult : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) %2 = apply %1(%0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) - %3 = tuple_extract %2 : $(Int, Int, Int), 0 - %4 = tuple_extract %2 : $(Int, Int, Int), 1 - %5 = tuple_extract %2 : $(Int, Int, Int), 2 + (%3, %4, %5) = destructure_tuple %2 : $(Int, Int, Int) %6 = tuple (%3 : $Int, %4 : $Int, %5 : $Int) return %6 : $(Int, Int, Int) } // Test returning an opaque tuple of tuples. // --- -// CHECK-LABEL: sil hidden [noinline] @f040_multiResult : $@convention(thin) (@in T) -> (@out T, @out T, @out T) { +// CHECK-LABEL: sil [noinline] @f040_multiResult : $@convention(thin) (@in T) -> (@out T, @out T, @out T) { // CHECK: bb0(%0 : $T): // CHECK: %1 = copy_value %0 : $T // CHECK: %2 = copy_value %0 : $T @@ -73,8 +74,8 @@ bb0(%0 : $Int): // CHECK: %5 = tuple (%1 : $T, %2 : $T, %3 : $T) // CHECK: return %5 : $(T, T, T) // CHECK-LABEL: } // end sil function 'f040_multiResult' -sil hidden [noinline] @f040_multiResult : $@convention(thin) (@in T) -> (@out T, @out T, @out T) { -bb0(%0 : $T): +sil [noinline] [ossa] @f040_multiResult : $@convention(thin) (@in T) -> (@out T, @out T, @out T) { +bb0(%0 : @owned $T): %2 = copy_value %0 : $T %3 = copy_value %0 : $T %4 = copy_value %0 : $T diff --git a/test/SILOptimizer/opaque_values_opt.sil b/test/SILOptimizer/opaque_values_opt.sil index f1618f2db9d76..430f157d2ba1f 100644 --- a/test/SILOptimizer/opaque_values_opt.sil +++ b/test/SILOptimizer/opaque_values_opt.sil @@ -1,9 +1,12 @@ // RUN: %target-sil-opt -O -enable-sil-opaque-values -emit-sorted-sil %s | %FileCheck %s -// REQUIRES: atrick-to-look-at +// +// These tests assume that opaque values are lowered in the +// optimization pipeline. They are currently only lowered in raw sil. +// +// REQUIRES: enable_opaque_values import Builtin -// CHECK-LABEL: sil_stage canonical sil_stage canonical public typealias Int = Builtin.Int64 @@ -12,12 +15,12 @@ public typealias Int = Builtin.Int64 // CHECK: bb0(%0 : $Builtin.Int64): // CHECK: return %0 : $Builtin.Int64 // CHECK: } // end sil function '$s20f010_genericIdentityBi64__Tg5' -sil hidden [noinline] @f010_genericIdentity : $@convention(thin) (@in T) -> @out T { -bb0(%0 : $T): +sil hidden [noinline] [ossa] @f010_genericIdentity : $@convention(thin) (@in T) -> @out T { +bb0(%0 : @owned $T): return %0 : $T } -sil @f015_callGeneric : $@convention(thin) (Builtin.Int64) -> Builtin.Int64 { +sil [ossa] @f015_callGeneric : $@convention(thin) (Builtin.Int64) -> Builtin.Int64 { bb0(%0 : $Builtin.Int64): %2 = function_ref @f010_genericIdentity : $@convention(thin) <τ_0_0> (@in τ_0_0) -> @out τ_0_0 %3 = apply %2(%0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> @out τ_0_0 @@ -37,8 +40,8 @@ bb0(%0 : $Builtin.Int64): // CHECK: %5 = tuple (%1 : $T, %2 : $T, %3 : $T) // CHECK: return %5 : $(T, T, T) // CHECK-LABEL: } // end sil function 'f020_multiResult' -sil hidden [noinline] @f020_multiResult : $@convention(thin) (@in T) -> (@out T, @out T, @out T) { -bb0(%0 : $T): +sil hidden [noinline] [ossa] @f020_multiResult : $@convention(thin) (@in T) -> (@out T, @out T, @out T) { +bb0(%0 : @owned $T): %2 = copy_value %0 : $T %3 = copy_value %0 : $T %4 = copy_value %0 : $T @@ -56,7 +59,7 @@ bb0(%0 : $T): // CHECK: %2 = apply %1(%0) : $@convention(thin) (Builtin.Int64) -> (Builtin.Int64, @out Builtin.Int64, @out Builtin.Int64) // CHECK: return %2 : $(Builtin.Int64, Builtin.Int64, Builtin.Int64) // CHECK-LABEL: } // end sil function 'f030_callMultiResult' -sil @f030_callMultiResult : $@convention(thin) (Int) -> (Int, Int, Int) { +sil [ossa] @f030_callMultiResult : $@convention(thin) (Int) -> (Int, Int, Int) { bb0(%0 : $Int): %1 = function_ref @f020_multiResult : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) %2 = apply %1(%0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) diff --git a/test/SILOptimizer/specialize_opaque.sil b/test/SILOptimizer/specialize_opaque.sil index 97f93f58fbf47..a3ffb78d75ad0 100644 --- a/test/SILOptimizer/specialize_opaque.sil +++ b/test/SILOptimizer/specialize_opaque.sil @@ -1,6 +1,6 @@ // RUN: %target-sil-opt -enable-sil-opaque-values -enable-sil-verify-all -generic-specializer %s | %FileCheck %s -sil_stage canonical +sil_stage raw import Builtin diff --git a/test/SILOptimizer/specialize_opaque_ossa.sil b/test/SILOptimizer/specialize_opaque_ossa.sil index 447e5e1b576b5..36a6690fda7fa 100644 --- a/test/SILOptimizer/specialize_opaque_ossa.sil +++ b/test/SILOptimizer/specialize_opaque_ossa.sil @@ -1,6 +1,6 @@ // RUN: %target-sil-opt -enable-sil-opaque-values -enable-sil-verify-all -generic-specializer %s | %FileCheck %s -sil_stage canonical +sil_stage raw import Builtin diff --git a/test/sil-passpipeline-dump/basic.test-sh b/test/sil-passpipeline-dump/basic.test-sh index 1aab22a8fce32..750e697688c91 100644 --- a/test/sil-passpipeline-dump/basic.test-sh +++ b/test/sil-passpipeline-dump/basic.test-sh @@ -1,7 +1,7 @@ // RUN: %sil-passpipeline-dumper -Onone | %FileCheck %s // CHECK: --- -// CHECK: name: non-Diagnostic Enabling Mandatory Optimizations +// CHECK: name: Non-Diagnostic Mandatory Optimizations // CHECK: passes: [ "for-each-loop-unroll", "mandatory-combine", // CHECK: "mandatory-arc-opts" ] // CHECK: --- From b187ba0dde1b3e070a2ad4d3abfd1e74e94d2c65 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 1 Mar 2022 13:22:42 -0800 Subject: [PATCH 05/88] Add support for indirect tuple-type results. This could happen as a result of specialization or concrete address-only values. For now, it's just tested by SIL unit tests. --- include/swift/SIL/ApplySite.h | 8 +-- .../Mandatory/AddressLowering.cpp | 67 +++++++++++-------- lib/SILOptimizer/Mandatory/AddressLowering.h | 6 +- lib/SILOptimizer/Utils/Generics.cpp | 4 +- test/SILOptimizer/address_lowering.sil | 57 ++++++++++++++++ 5 files changed, 104 insertions(+), 38 deletions(-) diff --git a/include/swift/SIL/ApplySite.h b/include/swift/SIL/ApplySite.h index 14a8abb965c0e..898b11150dac9 100644 --- a/include/swift/SIL/ApplySite.h +++ b/include/swift/SIL/ApplySite.h @@ -581,9 +581,9 @@ class FullApplySite : public ApplySite { } /// Get the SIL value that represents all of the given call's results. For a - /// single direct result, returns the result. For multiple results, returns a - /// fake tuple value. The tuple has no storage of its own. The real results - /// must be extracted from it. + /// single direct result, returns the actual result. For multiple results, + /// returns a pseudo-result tuple. The tuple has no storage of its own. The + /// real results must be extracted from it. /// /// For ApplyInst, returns the single-value instruction itself. /// @@ -592,7 +592,7 @@ class FullApplySite : public ApplySite { /// For BeginApplyInst, returns an invalid value. For coroutines, there is no /// single value representing all results. Yielded values are generally /// handled differently since they have the convention of incoming arguments. - SILValue getPseudoResult() const { + SILValue getResult() const { switch (getKind()) { case FullApplySiteKind::ApplyInst: return SILValue(cast(getInstruction())); diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index 81efd270a1f6c..1e8065c1d9599 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -174,14 +174,19 @@ cleanupAfterCall(FullApplySite apply, // that lowers values to storage. //===----------------------------------------------------------------------===// -/// If \p pseudoResult has multiple results, return the destructure. -static DestructureTupleInst *getCallMultiResult(SILValue pseudoResult) { - if (pseudoResult->getType().is()) { - if (auto *use = pseudoResult->getSingleUse()) - return cast(use->getUser()); +/// If \p pseudoResult represents multiple results and at least one result is +/// used, then return the destructure. +static DestructureTupleInst *getCallDestructure(FullApplySite apply) { + if (apply.getSubstCalleeConv().getNumDirectSILResults() == 1) + return nullptr; - assert(pseudoResult->use_empty() && "pseudo result can't be used"); - } + SILValue pseudoResult = apply.getResult(); + assert(pseudoResult->getType().is()); + if (auto *use = pseudoResult->getSingleUse()) + return cast(use->getUser()); + + assert(pseudoResult->use_empty() + && "pseudo result can only be used by a single destructure_tuple"); return nullptr; } @@ -205,19 +210,18 @@ static bool visitCallResults(FullApplySite apply, llvm::function_ref visitor) { auto fnConv = apply.getSubstCalleeConv(); - SILValue pseudoResult = apply.getPseudoResult(); - if (auto *destructure = getCallMultiResult(pseudoResult)) { + if (auto *destructure = getCallDestructure(apply)) { return visitCallMultiResults(destructure, fnConv, visitor); } - return visitor(pseudoResult, *fnConv.getDirectSILResults().begin()); + return visitor(apply.getResult(), *fnConv.getDirectSILResults().begin()); } /// Return true if the given value is either a "fake" tuple that represents all /// of a call's results or an empty tuple of no results. This may return true /// for either tuple_inst or a block argument. static bool isPseudoCallResult(SILValue value) { - if (isa(value)) - return value->getType().is(); + if (auto *apply = dyn_cast(value)) + return ApplySite(apply).getSubstCalleeConv().getNumDirectSILResults() > 1; auto *bbArg = dyn_cast(value); if (!bbArg) @@ -227,11 +231,18 @@ static bool isPseudoCallResult(SILValue value) { if (!term) return false; - return isa(term) && bbArg->getType().is(); + auto *tryApply = dyn_cast(term); + if (!tryApply) + return false; + + return ApplySite(tryApply).getSubstCalleeConv().getNumDirectSILResults() > 1; } /// Return true if this is a pseudo-return value. static bool isPseudoReturnValue(SILValue value) { + if (value->getFunction()->getConventions().getNumDirectSILResults() < 2) + return false; + if (auto *tuple = dyn_cast(value)) { Operand *singleUse = tuple->getSingleUse(); return singleUse && isa(singleUse->getUser()); @@ -261,9 +272,12 @@ static SILValue getTupleStorageValue(Operand *operand) { if (!singleUse || !isa(singleUse->getUser())) return tuple; + SILFunction *function = tuple->getFunction(); + if (function->getConventions().getNumDirectSILResults() < 2) + return tuple; + unsigned resultIdx = tuple->getElementIndex(operand); - SILFunction *function = tuple->getFunction(); auto loweredFnConv = getLoweredFnConv(function); assert(loweredFnConv.getResults().size() == tuple->getElements().size()); @@ -279,11 +293,11 @@ static SILValue getTupleStorageValue(Operand *operand) { /// Return the value representing storage for a single return value. /// -/// bb0(%loweredIndirectResult : $*T, ...) +/// bb0(%loweredIndirectResult : $*T, ...) // function entry /// return %oper /// /// For %oper, return %loweredIndirectResult -static SILValue getSingleReturnValue(Operand *operand) { +static SILValue getSingleReturnAddress(Operand *operand) { assert(!isPseudoReturnValue(operand->get())); auto *function = operand->getParentFunction(); @@ -612,7 +626,7 @@ void OpaqueValueVisitor::visitValue(SILValue value) { // Canonicalize returned values. // -// Given: +// Given $() -> @out (T, T): // %t = def : $(T, T) // use %t : $(T, T) // return %t : $(T, T) @@ -807,7 +821,7 @@ static SILValue getProjectedUseValue(Operand *operand) { // Return instructions can project into the return value. case SILInstructionKind::ReturnInst: - return getSingleReturnValue(operand); + return getSingleReturnAddress(operand); } return SILValue(); } @@ -1420,9 +1434,7 @@ AddressMaterialization::materializeProjectionIntoUse(Operand *operand, } case SILInstructionKind::TupleInst: { auto *tupleInst = cast(user); - // Function return values. - if (tupleInst->hasOneUse() - && isa(tupleInst->use_begin()->getUser())) { + if (isPseudoReturnValue(tupleInst)) { unsigned resultIdx = tupleInst->getElementIndex(operand); assert(resultIdx < pass.loweredFnConv.getNumIndirectSILResults()); // Cannot call getIndirectSILResults here because that API uses the @@ -1830,9 +1842,8 @@ void ApplyRewriter::convertApplyWithIndirectResults() { // Populate newCallArgs. makeIndirectArgs(newCallArgs); - // Record the original results before potentially removing the apply - // (try_apply is removed during rewriting). - auto *destructure = getCallMultiResult(apply.getPseudoResult()); + // Record the original result destructure before deleting a try_apply. + auto *destructure = getCallDestructure(apply); switch (apply.getKind()) { case FullApplySiteKind::ApplyInst: { @@ -2071,7 +2082,7 @@ void ApplyRewriter::rewriteTryApply(ArrayRef newCallArgs) { tryApply->getNormalBB(), tryApply->getErrorBB(), tryApply->getApplyOptions(), tryApply->getSpecializationInfo()); - auto *resultArg = cast(apply.getPseudoResult()); + auto *resultArg = cast(apply.getResult()); auto replaceTermResult = [&](SILValue newResultVal) { SILType resultTy = loweredCalleeConv.getSILResultType(typeCtx); @@ -2091,8 +2102,6 @@ void ApplyRewriter::rewriteTryApply(ArrayRef newCallArgs) { // Handle a single opaque result value. if (pass.valueStorageMap.contains(resultArg)) { - assert(!resultArg->getType().is()); - // Storage was materialized by materializeIndirectResultAddress. auto &origStorage = pass.valueStorageMap.getStorage(resultArg); assert(origStorage.isRewritten); @@ -2142,7 +2151,7 @@ void ApplyRewriter::rewriteTryApply(ArrayRef newCallArgs) { // // no uses of %d1, %d2 // void ApplyRewriter::replaceDirectResults(DestructureTupleInst *oldDestructure) { - SILValue newPseudoResult = apply.getPseudoResult(); + SILValue newPseudoResult = apply.getResult(); DestructureTupleInst *newDestructure = nullptr; if (loweredCalleeConv.getNumDirectSILResults() > 1) { @@ -2950,7 +2959,7 @@ static void rewriteIndirectApply(FullApplySite apply, ApplyRewriter(apply, pass).convertApplyWithIndirectResults(); if (!apply.getInstruction()->isDeleted()) { - assert(!getCallMultiResult(apply.getPseudoResult()) + assert(!getCallDestructure(apply) && "replaceDirectResults deletes the destructure"); pass.deleter.forceDelete(apply.getInstruction()); } diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.h b/lib/SILOptimizer/Mandatory/AddressLowering.h index e4c6ae79b0c2e..c4f8a99bfe25f 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.h +++ b/lib/SILOptimizer/Mandatory/AddressLowering.h @@ -38,13 +38,13 @@ namespace swift { /// After allocation, before materialization or rewriting, we may have: /// /// %result_addr = alloc_stack // storage for %result -/// %result = apply () -> @out T +/// %result = apply : $() -> @out T /// %extract = struct_extact %result // def-projection of %result /// /// Or, a projection may project into a composing use (use-projection): /// -/// %struct_addr = alloc_stack // storage for %struct -/// %result = apply () -> @out T // use-projection of %struct at operand #0 +/// %struct_addr = alloc_stack // storage for %struct +/// %result = apply : $() -> @out T // use-projection of %struct at operand #0 /// %struct = struct %result /// /// A phi-projection is a use projection that projects its entire value diff --git a/lib/SILOptimizer/Utils/Generics.cpp b/lib/SILOptimizer/Utils/Generics.cpp index 7f4d0160fce49..ecd63f31cd909 100644 --- a/lib/SILOptimizer/Utils/Generics.cpp +++ b/lib/SILOptimizer/Utils/Generics.cpp @@ -2242,7 +2242,7 @@ SILFunction *ReabstractionThunkGenerator::createThunk() { Arguments.push_back(NewArg); } FullApplySite ApplySite = createReabstractionThunkApply(Builder); - SILValue ReturnValue = ApplySite.getPseudoResult(); + SILValue ReturnValue = ApplySite.getResult(); assert(ReturnValue && "getPseudoResult out of sync with ApplySite?!"); Builder.createReturn(Loc, ReturnValue); @@ -2255,7 +2255,7 @@ SILFunction *ReabstractionThunkGenerator::createThunk() { FullApplySite ApplySite = createReabstractionThunkApply(Builder); - SILValue ReturnValue = ApplySite.getPseudoResult(); + SILValue ReturnValue = ApplySite.getResult(); assert(ReturnValue && "getPseudoResult out of sync with ApplySite?!"); if (ReturnValueAddr) { diff --git a/test/SILOptimizer/address_lowering.sil b/test/SILOptimizer/address_lowering.sil index afcbbfb965fad..5ee49ed89b145 100644 --- a/test/SILOptimizer/address_lowering.sil +++ b/test/SILOptimizer/address_lowering.sil @@ -79,6 +79,63 @@ bb0(%0 : @owned $T): return %0 : $T } +// This could happen as a result of either partial specialization from +// a single type parameter into a generic tuple, or specialization +// from a single type parameter into a tuple of concrete address-only +// types. +// +// CHECK-LABEL: sil [ossa] @f011_identity_tuple : $@convention(thin) (@in (T, T)) -> @out (T, T) { +// CHECK: bb0(%0 : $*(T, T), %1 : $*(T, T)): +// CHECK: copy_addr [take] %1 to [initialization] %0 : $*(T, T) +// CHECK-LABEL: } // end sil function 'f011_identity_tuple' +sil [ossa] @f011_identity_tuple : $@convention(thin) (@in (T, T)) -> @out (T, T) { +bb0(%0 : @owned $(T, T)): + return %0 : $(T, T) +} + +// CHECK-LABEL: sil [ossa] @f012_decompose_tuple_arg : $@convention(thin) (@in (T, T)) -> @out (T, T) { +// CHECK: bb0(%0 : $*(T, T), %1 : $*(T, T)): +// CHECK: [[ARG0:%.*]] = tuple_element_addr %1 : $*(T, T), 0 +// CHECK: [[ARG1:%.*]] = tuple_element_addr %1 : $*(T, T), 1 +// CHECK: [[RET0:%.*]] = tuple_element_addr %0 : $*(T, T), 0 +// CHECK: apply %{{.*}}([[RET0]], [[ARG0]]) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> @out τ_0_0 +// CHECK: [[RET1:%.*]] = tuple_element_addr %0 : $*(T, T), 1 +// CHECK: copy_addr [take] [[ARG1]] to [initialization] [[RET1]] : $*T +// CHECK-LABEL: } // end sil function 'f012_decompose_tuple_arg' +sil [ossa] @f012_decompose_tuple_arg : $@convention(thin) (@in (T, T)) -> @out (T, T) { +bb0(%0 : @owned $(T, T)): + (%arg0, %arg1) = destructure_tuple %0 : $(T, T) + %f = function_ref @f010_addrlower_identity : $@convention(thin) (@in T) -> @out T + %call = apply %f(%arg0) : $@convention(thin) <τ_0_0> (@in τ_0_0) -> @out τ_0_0 + %result = tuple (%call : $T, %arg1 : $T) + return %result : $(T, T) +} + +// CHECK-LABEL: sil [ossa] @f013_pass_tuple_arg : $@convention(thin) (@in T) -> @out T { +// CHECK: bb0(%0 : $*T, %1 : $*T): +// CHECK: [[IN:%.*]] = alloc_stack $(T, T) +// CHECK: [[OUT:%.*]] = alloc_stack $(T, T) +// CHECK: [[IN1:%.*]] = tuple_element_addr [[IN]] : $*(T, T), 1 +// CHECK: copy_addr %1 to [initialization] [[IN1]] : $*T +// CHECK: [[IN0:%.*]] = tuple_element_addr %2 : $*(T, T), 0 +// CHECK: copy_addr [take] %1 to [initialization] [[IN0]] : $*T +// CHECK: apply %{{.*}}([[OUT]], [[IN]]) : $@convention(thin) <τ_0_0> (@in (τ_0_0, τ_0_0)) -> @out (τ_0_0, τ_0_0) +// CHECK: [[RET:%.*]] = tuple_element_addr [[OUT]] : $*(T, T), 0 +// CHECK: [[DEAD:%.*]] = tuple_element_addr [[OUT]] : $*(T, T), 1 +// CHECK: destroy_addr [[DEAD]] : $*T +// CHECK: copy_addr [take] [[RET]] to [initialization] %0 : $*T +// CHECK-LABEL: } // end sil function 'f013_pass_tuple_arg' +sil [ossa] @f013_pass_tuple_arg : $@convention(thin) (@in T) -> @out T { +bb0(%0 : @owned $T): + %copy0 = copy_value %0 : $T + %arg = tuple (%0 : $T, %copy0 : $T) + %f = function_ref @f011_identity_tuple : $@convention(thin) (@in (T, T)) -> @out (T, T) + %call = apply %f(%arg) : $@convention(thin) (@in (T, T)) -> @out (T, T) + (%call0, %call1) = destructure_tuple %call : $(T, T) + destroy_value %call1 : $T + return %call0 : $T +} + // CHECK-LABEL: sil [ossa] @f020_multiResult : $@convention(thin) (@in T) -> (@out T, @out T, @out T) { // CHECK: %0 "$return_value" // CHECK: %1 "$return_value" From f4176b91110a52cbf287686b92e8c4e364286446 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Mon, 7 Mar 2022 23:11:19 -0800 Subject: [PATCH 06/88] [SIL-opaque] Code review suggestions Mostly documentation and typos. --- docs/SIL.rst | 20 ++++ lib/SIL/Verifier/SILVerifier.cpp | 16 +++- .../Mandatory/AddressLowering.cpp | 91 +++++++++---------- lib/SILOptimizer/Mandatory/AddressLowering.h | 47 ++++++---- 4 files changed, 101 insertions(+), 73 deletions(-) diff --git a/docs/SIL.rst b/docs/SIL.rst index abc26102a908c..722dbc63ea7ce 100644 --- a/docs/SIL.rst +++ b/docs/SIL.rst @@ -2193,6 +2193,26 @@ parts:: return %1 : $Klass } +Forwarding Address-Only Values +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Address-only values are potentially unmovable when borrowed. This +means that they cannot be forwarded with guaranteed ownership unless +the forwarded value has the same representation as in the original +value and can reuse the same storage. Non-destructive projection is +allowed, such as `struct_extract`. Aggregation, such as `struct`, and +destructive disaggregation, such as `switch_enum` is not allowed. This +is an invariant for OSSA with opaque SIL values for these reasons: + +1. To avoid implicit semantic copies. For move-only values, this allows +complete diagnostics. And in general, it makes it impossible for SIL +passes to "accidentally" create copies. + +2. To reuse borrowed storage. This allows the optimizer to share the same +storage for multiple exclusive reads of the same variable, avoiding +copies. It may also be necessary to support native Swift atomics, which +will be unmovable-when-borrowed. + Borrowed Object based Safe Interior Pointers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/lib/SIL/Verifier/SILVerifier.cpp b/lib/SIL/Verifier/SILVerifier.cpp index 51f1e635da8c4..8863f2ebd4c59 100644 --- a/lib/SIL/Verifier/SILVerifier.cpp +++ b/lib/SIL/Verifier/SILVerifier.cpp @@ -1008,6 +1008,13 @@ class SILVerifier : public SILVerifierBase { auto *TI = predBB->getTerminator(); if (F.hasOwnership()) { require(isa(TI), "All phi inputs must be branch operands."); + + // Address-only values are potentially unmovable when borrowed. See also + // checkOwnershipForwardingInst. A phi implies a move of its arguments + // because they can't necessarilly all reuse the same storage. + require((!arg->getType().isAddressOnly(F) + || arg->getOwnershipKind() != OwnershipKind::Guaranteed), + "Guaranteed address-only phi not allowed--implies a copy"); } else { // FIXME: when critical edges are removed and cond_br arguments are // disallowed, only allow BranchInst. @@ -1269,10 +1276,11 @@ class SILVerifier : public SILVerifierBase { checkOwnershipForwardingTermInst(term); } - // Address-only values are potentially move-only, and unmovable if they are - // borrowed. Ensure that guaranteed address-only values are forwarded with - // the same representation. Non-destructive projection is - // allowed. Aggregation and destructive disaggregation is not allowed. + // Address-only values are potentially unmovable when borrowed. Ensure that + // guaranteed address-only values are forwarded with the same + // representation. Non-destructive projection is allowed. Aggregation and + // destructive disaggregation is not allowed. See SIL.rst, Forwarding + // Addres-Only Values. if (ownership == OwnershipKind::Guaranteed && OwnershipForwardingMixin::isAddressOnly(i)) { require(OwnershipForwardingMixin::hasSameRepresentation(i), diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index 1e8065c1d9599..983b87bd73766 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -14,21 +14,23 @@ /// memory locations such as a stack locations. This is mandatory for IRGen. /// /// Lowering to LLVM IR requires each SILValue's type to be a valid "SIL storage -/// type". Opaque SILValues have address-only types. Address-only values require -/// indirect storage in LLVM, so their SIL storage type must be an address type. +/// type". Opaque SILValues have address-only types. These require indirect +/// storage in LLVM, so their SIL storage type must be an address type. /// -/// This pass should not introduce any semantic copies. Guaranteed values always -/// reuse the borrowed value's storage. This means that we SIL cannot allow -/// guaranteed opaque uses unless they are projections of the definition. In -/// particular, borrowed structs, tuples, and enums of address-only types are -/// not allowed. +/// This pass never creates copies except to replace explicit value copies +/// (copy_value, load [copy], store). For move-only values, this allows complete +/// diagnostics. And in general, it makes it impossible for SIL passes to +/// "accidentally" create copies. /// -/// When owned values are consumed by phis, multiple storage locations are -/// required to avoid interfering with other phi operands. However, the value -/// never needs to be live in multiple storage locations a once. When the value -/// is consumed by a phi, either it's own storage is coalesced with the phi -/// storage (they have the same address), or the value is bitwise moved into the -/// phi's storage. +/// This pass inserts moves (copy_addr [take] [initialize]) of owned values to +/// - compose aggregates +/// - resolve phi interference +/// +/// For guarantee values, this pass inserts neither copies nor moves. Opaque +/// values are potentially unmovable when borrowed. This means that guaranteed +/// address-only aggregates and phis are prohibited. This SIL invariant is +/// enforced by SILVerifier::checkOwnershipForwardingInst() and +/// SILVerifier::visitSILPhiArgument(). /// /// ## Step #1: Map opaque values /// @@ -58,7 +60,8 @@ /// during rewriting. /// /// After allocating storage for all non-phi opaque values, phi storage is -/// allocated. This is handled by a PhiStorageOptimizer that checks for +/// allocated. (Phi values are block arguments in which phi's arguments are +/// branch operands). This is handled by a PhiStorageOptimizer that checks for /// interference among the phi operands and reuses storage allocated to other /// values. /// @@ -169,7 +172,7 @@ cleanupAfterCall(FullApplySite apply, // Calls are currently SILValues, but when the result type is a tuple, the call // value does not represent a real value with storage. This is a bad situation // for address lowering because there's no way to tell from any given value -// whether its legal to assign storage to that value. As a result, the +// whether it's legal to assign storage to that value. As a result, the // implementation of call lowering doesn't fall out naturally from the algorithm // that lowers values to storage. //===----------------------------------------------------------------------===// @@ -218,7 +221,7 @@ visitCallResults(FullApplySite apply, /// Return true if the given value is either a "fake" tuple that represents all /// of a call's results or an empty tuple of no results. This may return true -/// for either tuple_inst or a block argument. +/// for either an apply instruction or a block argument. static bool isPseudoCallResult(SILValue value) { if (auto *apply = dyn_cast(value)) return ApplySite(apply).getSubstCalleeConv().getNumDirectSILResults() > 1; @@ -255,7 +258,7 @@ static bool isPseudoReturnValue(SILValue value) { /// the tuple is a pseudo-return value, return the indirect function argument /// for the corresponding result after lowering. /// -/// bb0(%loweredIndirectResult : $*T, ...) +/// bb0(..., %loweredIndirectResult : $*T, ...) /// .... /// %tuple = tuple(..., %operand, ...) /// return %tuple @@ -268,16 +271,12 @@ static bool isPseudoReturnValue(SILValue value) { /// (see insertIndirectReturnArgs()). static SILValue getTupleStorageValue(Operand *operand) { auto *tuple = cast(operand->getUser()); - Operand *singleUse = tuple->getSingleUse(); - if (!singleUse || !isa(singleUse->getUser())) - return tuple; - - SILFunction *function = tuple->getFunction(); - if (function->getConventions().getNumDirectSILResults() < 2) + if (!isPseudoReturnValue(tuple)) return tuple; unsigned resultIdx = tuple->getElementIndex(operand); + auto *function = tuple->getFunction(); auto loweredFnConv = getLoweredFnConv(function); assert(loweredFnConv.getResults().size() == tuple->getElements().size()); @@ -286,14 +285,14 @@ static SILValue getTupleStorageValue(Operand *operand) { if (loweredFnConv.isSILIndirect(result)) ++indirectResultIdx; } - // Cannot call F->getIndirectSILResults here because that API uses the + // Cannot call function->getIndirectSILResults here because that API uses the // function conventions before address lowering. return function->getArguments()[indirectResultIdx]; } /// Return the value representing storage for a single return value. /// -/// bb0(%loweredIndirectResult : $*T, ...) // function entry +/// bb0(..., %loweredIndirectResult : $*T, ...) // function entry /// return %oper /// /// For %oper, return %loweredIndirectResult @@ -301,9 +300,7 @@ static SILValue getSingleReturnAddress(Operand *operand) { assert(!isPseudoReturnValue(operand->get())); auto *function = operand->getParentFunction(); - auto loweredFnConv = getLoweredFnConv(function); - assert(loweredFnConv.getNumIndirectSILResults() == 1); - (void)loweredFnConv; + assert(getLoweredFnConv(function).getNumIndirectSILResults() == 1); // Cannot call getIndirectSILResults here because that API uses the // function conventions before address lowering. @@ -331,7 +328,7 @@ static bool isStoreCopy(SILValue value) { return isa(user) || isa(user); } -ValueStorage &ValueStorageMap::insertValue(SILValue value) { +void ValueStorageMap::insertValue(SILValue value, SILValue storageAddress) { assert(!stableStorage && "cannot grow stable storage map"); auto hashResult = @@ -339,9 +336,7 @@ ValueStorage &ValueStorageMap::insertValue(SILValue value) { (void)hashResult; assert(hashResult.second && "SILValue already mapped"); - valueVector.emplace_back(value, ValueStorage()); - - return valueVector.back().storage; + valueVector.emplace_back(value, ValueStorage(storageAddress)); } void ValueStorageMap::replaceValue(SILValue oldValue, SILValue newValue) { @@ -409,7 +404,7 @@ struct AddressLoweringState { SmallBlotSetVector indirectApplies; // All function-exiting terminators (return or throw instructions). - SmallVector exitingInsts; + SmallVector exitingInsts; // Copies from a phi's operand storage to the phi storage. These logically // occur on the CFG edge. Keep track of them to resolve anti-dependencies. @@ -462,7 +457,7 @@ struct AddressLoweringState { /// Before populating the ValueStorageMap, replace each value-typed argument to /// the current function with an address-typed argument by inserting a temporary /// load instruction. -static void convertIndirectFunctionArgs(AddressLoweringState &pass) { +static void convertDirectToIndirectFunctionArgs(AddressLoweringState &pass) { // Insert temporary argument loads at the top of the function. SILBuilder argBuilder = pass.getBuilder(pass.function->getEntryBlock()->begin()); @@ -490,9 +485,7 @@ static void convertIndirectFunctionArgs(AddressLoweringState &pass) { // Indirect calling convention may be used for loadable types. In that // case, generating the argument loads is sufficient. if (addrType.isAddressOnly(*pass.function)) { - auto &storage = pass.valueStorageMap.insertValue(loadArg); - storage.storageAddress = arg; - storage.isRewritten = true; + pass.valueStorageMap.insertValue(loadArg, arg); } } ++argIdx; @@ -520,10 +513,9 @@ static unsigned insertIndirectReturnArgs(AddressLoweringState &pass) { argIdx, bodyResultTy.getAddressType(), OwnershipKind::None, var); // Insert function results into valueStorageMap so that the caller storage // can be projected onto values inside the function as use projections. - auto &storage = pass.valueStorageMap.insertValue(funcArg); + // // This is the only case where a value defines its own storage. - storage.storageAddress = funcArg; - storage.isRewritten = true; + pass.valueStorageMap.insertValue(funcArg, funcArg); ++argIdx; } @@ -621,10 +613,11 @@ void OpaqueValueVisitor::visitValue(SILValue value) { pass.valueStorageMap.getStorage(value).storageAddress)); return; } - pass.valueStorageMap.insertValue(value); + pass.valueStorageMap.insertValue(value, SILValue()); } -// Canonicalize returned values. +// Canonicalize returned values. For multiple direct results, the operand of the +// return instruction must be a tuple with no other uses. // // Given $() -> @out (T, T): // %t = def : $(T, T) @@ -688,7 +681,7 @@ void OpaqueValueVisitor::canonicalizeReturnValues() { /// function. static void prepareValueStorage(AddressLoweringState &pass) { // Fixup this function's argument types with temporary loads. - convertIndirectFunctionArgs(pass); + convertDirectToIndirectFunctionArgs(pass); // Create a new function argument for each indirect result. insertIndirectReturnArgs(pass); @@ -2012,7 +2005,7 @@ void ApplyRewriter::rewriteApply(ArrayRef newCallArgs) { // Replace \p tryApply with a new try_apply using \p newCallArgs. // -// If the old result was a single address-only value, then create and return a +// If the old result was a single opaque value, then create and return a // fake load that takes its place in the storage map. Otherwise, return an // invalid SILValue. // @@ -3056,8 +3049,8 @@ static void removeOpaquePhis(SILBasicBlock *bb, AddressLoweringState &pass) { } } -// Instructions that use an address-only value without producing one are already -// deleted. The rest of the address-only definitions are now removed bottom-up +// Instructions that use an opaque value without producing one are already +// deleted. The rest of the opaque definitions are now removed bottom-up // by visiting valuestorageMap. // // Phis are removed here after all other instructions. @@ -3145,12 +3138,12 @@ void AddressLowering::runOnFunction(SILFunction *function) { // ## Step #1: Map opaque values // // First, rewrite this function's arguments and return values, then populate - // pass.valueStorageMap with an entry for each address-only value. + // pass.valueStorageMap with an entry for each opaque value. prepareValueStorage(pass); // ## Step #2: Allocate storage // - // For each address-only value mapped in step #1, either create an + // For each opaque value mapped in step #1, either create an // alloc_stack/dealloc_stack pair, or mark its ValueStorage entry as a // def-projection out of its operand's def or a use projection into its // composing use or into a phi (branch operand). @@ -3162,7 +3155,7 @@ void AddressLowering::runOnFunction(SILFunction *function) { // ## Step #3. Rewrite opaque values // - // Rewrite all instructions that either define or use an address-only value. + // Rewrite all instructions that either define or use an opaque value. // Creates new '_addr' variants of instructions, obtaining the storage // address from the 'valueStorageMap'. This materializes projections in // forward order, setting 'storageAddress' for each projection as it goes. diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.h b/lib/SILOptimizer/Mandatory/AddressLowering.h index c4f8a99bfe25f..c75f893385ae0 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.h +++ b/lib/SILOptimizer/Mandatory/AddressLowering.h @@ -17,18 +17,20 @@ namespace swift { -/// Track a value's storage. Stages in the storage life-cycle: +/// Track an opaque value's storage. An opaque value is a SILValue with +/// address-only type. Stages in the storage life-cycle: /// /// 1. Unallocated /// -/// 2. Allocated. Either (a) 'storageAddress' is an alloc_stack, or (b) -/// 'projectedStorageID' refers to a different ValueStorage, which recursively -/// leads to a valid 'storageAddress'. +/// 2. Allocated. Either (a) it is a root value where 'storageAddress' is an +/// alloc_stack, or (b) it is a projection where 'projectedStorageID' refers to +/// the parent ValueStorage, which recursively leads to a root value with a +/// valid 'storageAddress'. /// /// 3. Materialized. 'storageAddress' is valid. Address projections have been /// emitted at the point that this value is defined. /// -/// 4. Rewritten. The definition of this address-only value is fully translated +/// 4. Rewritten. The definition of this opaque value is fully translated /// into lowered SIL. Instructions are typically materialized and rewritten at /// the same time. A indirect result, however, is materialized as soon as its /// alloc_stack is emitted, but only rewritten once the call itself is @@ -45,19 +47,23 @@ namespace swift { /// /// %struct_addr = alloc_stack // storage for %struct /// %result = apply : $() -> @out T // use-projection of %struct at operand #0 -/// %struct = struct %result +/// %struct = struct (%result) /// /// A phi-projection is a use projection that projects its entire value /// through a phi rather than into a composing use. It has an invalid -/// 'projectedOperandNum'. +/// 'projectedOperandNum': /// -/// Operations that destructively resuse storage (open_existential_value, +/// %result = apply : $() -> @out T // use-projection of %phi +/// br bb1(%result) +/// bb1(%phi : @owned $T) +/// +/// Operations that destructively reuse storage (open_existential_value, /// unchecked_enum_data, and switch_enum), are not considered storage /// projections. Instead, these values have no ValueStorage but are rewritten to /// directly reuse their operand's storage. /// /// To materialize projections, address lowering follows the original def-use -/// edges for address-only values. Consequently, values that have storage cannot +/// edges for opaque values. Consequently, values that have storage cannot /// be removed from SIL or from the storage map until rewriting is /// complete. Mapped values can, however, be substituted on-the-fly by emitting /// a place-holder value and updating the map entry. This works because the @@ -76,11 +82,11 @@ struct ValueStorage { /// When either isDefProjection or isUseProjection is set, this refers to the /// storage whose "def" this value projects out of or whose operand this /// storage projects into via its "use. - uint32_t projectedStorageID; + uint32_t projectedStorageID = InvalidID; /// For use-projections, identifies the operand index of the composing use. /// Only valid for non-phi use projections. - uint16_t projectedOperandNum; + uint16_t projectedOperandNum = InvalidOper; /// Projection out of a storage def. e.g. this value is a destructure. unsigned isDefProjection : 1; @@ -96,16 +102,17 @@ struct ValueStorage { // across phis, which would result in piecewise initialization. unsigned initializesEnum : 1; - ValueStorage() { clear(); } - - void clear() { - storageAddress = SILValue(); - projectedStorageID = InvalidID; - projectedOperandNum = InvalidOper; - isUseProjection = false; + ValueStorage(SILValue storageAddress): storageAddress(storageAddress) { isDefProjection = false; + isUseProjection = false; isRewritten = false; initializesEnum = false; + + // The initial storage address is only valid when the value is effectively + // already rewritten. + if (storageAddress) { + isRewritten = true; + } } bool isAllocated() const { @@ -251,10 +258,10 @@ class ValueStorageMap { /// Insert a value in the map, creating a ValueStorage object for it. This /// must be called in RPO order. - ValueStorage &insertValue(SILValue value); + void insertValue(SILValue value, SILValue storageAddress); /// Replace a value that is mapped to storage with another value. This allows - /// limited rewritting of original address-only values. For example, block + /// limited rewriting of original opaque values. For example, block /// arguments can be replaced with fake loads in order to rewrite their /// corresponding terminator. void replaceValue(SILValue oldValue, SILValue newValue); From c4e167f48f74b9df3d7a405f98ecc9c806f1a55b Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 8 Mar 2022 17:31:38 -0800 Subject: [PATCH 07/88] Fix alloc_stack placement for open_existential. Compute the latestOpeningInst, not the firstOpeningInst. --- lib/SILOptimizer/Mandatory/AddressLowering.cpp | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index 983b87bd73766..eaff802dbf136 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -1129,8 +1129,9 @@ createStackAllocation(SILValue value) { // For opened existential types, allocate stack space at the type // definition. Allocating as early as possible provides more opportunity for - // creating use projections into value. - SILInstruction *firstOpeningInst = nullptr; + // creating use projections into value. But allocation must be no earlier then + // the latest type definition. + SILInstruction *latestOpeningInst = nullptr; allocTy.getASTType().visit([&](CanType type) { auto archetype = dyn_cast(type); if (!archetype) @@ -1142,15 +1143,15 @@ createStackAllocation(SILValue value) { auto *openingInst = openingVal->getDefiningInstruction(); assert(openingVal && "all opened archetypes should be resolved"); - if (firstOpeningInst - && pass.domInfo->dominates(firstOpeningInst, openingInst)) { + if (latestOpeningInst + && pass.domInfo->dominates(openingInst, latestOpeningInst)) { return; } - firstOpeningInst = openingInst; + latestOpeningInst = openingInst; } }); - auto allocPt = firstOpeningInst ? std::next(firstOpeningInst->getIterator()) - : pass.function->begin()->begin(); + auto allocPt = latestOpeningInst ? std::next(latestOpeningInst->getIterator()) + : pass.function->begin()->begin(); auto allocBuilder = pass.getBuilder(allocPt); AllocStackInst *alloc = allocBuilder.createAllocStack(pass.genLoc(), allocTy); @@ -1158,7 +1159,7 @@ createStackAllocation(SILValue value) { auto deallocBuilder = pass.getBuilder(insertPt); deallocBuilder.createDeallocStack(pass.genLoc(), alloc); }; - if (firstOpeningInst) { + if (latestOpeningInst) { // Deallocate at the dominance frontier to ensure that allocation encloses // not only the uses of the current value, but also of any values reusing // this storage as a use projection. From 71175f711555aca7c9e8ed70a8e5479aaa90d581 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 8 Mar 2022 17:32:13 -0800 Subject: [PATCH 08/88] Rename "phi copy" to "phi move" for consistency with documentation. In classic compiler terminology, this is a "phi copy" algorithm. But the documentation now tries to clearly distinguish between "semantics copies" vs. moves, where moves are "storage copies". --- .../Mandatory/AddressLowering.cpp | 98 +++++++++---------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index eaff802dbf136..086f51e477a51 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -406,8 +406,7 @@ struct AddressLoweringState { // All function-exiting terminators (return or throw instructions). SmallVector exitingInsts; - // Copies from a phi's operand storage to the phi storage. These logically - // occur on the CFG edge. Keep track of them to resolve anti-dependencies. + // Handle moves from a phi's operand storage to the phi storage. std::unique_ptr phiRewriter; AddressLoweringState(SILFunction *function, DominanceInfo *domInfo) @@ -758,10 +757,11 @@ static Operand *getProjectedDefOperand(SILValue value) { } } -/// Return the operand of the reused storage. These operations are always -/// rewritten by the use rewriter and destructively reuse their operand's -/// storage. If the result is address-only, then the operand must be -/// address-only (otherwise, the operand would not necessarilly have storage). +/// If \p value is a an existential or enum, then return the existential or enum +/// operand. These operations are always rewritten by the UseRewriter and always +/// destructively reuse the same storage as their operand. Note that if the +/// operation's result is address-only, then the operand must be address-only +/// and therefore must mapped to ValueStorage. static Operand *getReusedStorageOperand(SILValue value) { switch (value->getKind()) { default: @@ -785,7 +785,7 @@ static Operand *getReusedStorageOperand(SILValue value) { } /// If \p operand can project into its user, return the SILValue representing -/// user's storage. The user may composes an aggregate from its operands or +/// user's storage. The user may compose an aggregate from its operands or /// forwards its operands to arguments. /// /// TODO: Handle SwitchValueInst, CheckedCastValueBranchInst. @@ -1446,7 +1446,7 @@ AddressMaterialization::materializeProjectionIntoUse(Operand *operand, //===----------------------------------------------------------------------===// // PhiRewriter // -// Insert copies on CFG edges to break phi operand interferences. +// Insert moves on CFG edges to break phi operand interferences. //===----------------------------------------------------------------------===// namespace { @@ -1456,18 +1456,18 @@ namespace { // 1. Materialize the phi address. If the phi projects into a use, this requires // initialization of the user's storage in each predecessor. // -// 2. If the phi operand is not coalesced, then copy the operand into the +// 2. If the phi operand is not coalesced, then move the operand into the // materialized phi address. // -// For blocks with multiple phis, all copies of phi operands semantically occur +// For blocks with multiple phis, all moves of phi operands semantically occur // in parallel on the CFG edge from the predecessor to the phi block. As these -// copies are inserted into the predecessor's intruction list, maintain the -// illusion of parallel copies by resolving any interference between the phi -// copies. This is done by checking for anti-dependencies to or from other phi -// copies. If one phi copy's source reads from another phi copy's dest, then the +// moves are inserted into the predecessor's intruction list, maintain the +// illusion of parallel moves by resolving any interference between the phi +// moves. This is done by checking for anti-dependencies to or from other phi +// moves. If one phi move's source reads from another phi move's dest, then the // read must occur before the write. // -// Insert a second copy to break an anti-dependence cycle when both the source +// Insert a second move to break an anti-dependence cycle when both the source // and destination of the new phi interferes with other phis (the classic // phi-swap problem). // @@ -1486,18 +1486,18 @@ namespace { // br bb3(val0, val1) // bb2: // temp = alloc_stack -// copy_addr addr0 to temp -// copy_addr addr1 to addr0 -// copy_addr temp to addr1 +// copy_addr [take] addr0 to [initialization] temp +// copy_addr [take] addr1 to [initialization] addr0 +// copy_addr [take] temp to [initialization] addr1 // dealloc_stack temp // br bb3(val1, val1) // bb3(phi0, phi1): class PhiRewriter { AddressLoweringState &pass; - // A set of copies from a phi operand storage to phi storage. These logically + // A set of moves from a phi operand storage to phi storage. These logically // occur on the CFG edge. Keep track of them to resolve anti-dependencies. - SmallPtrSet phiCopies; + SmallPtrSet phiMoves; public: PhiRewriter(AddressLoweringState &pass) : pass(pass) {} @@ -1508,18 +1508,18 @@ class PhiRewriter { PhiRewriter(const PhiRewriter &) = delete; PhiRewriter &operator=(const PhiRewriter &) = delete; - CopyAddrInst *createPhiCopy(SILBuilder &builder, SILValue from, SILValue to) { - auto *copy = builder.createCopyAddr(pass.genLoc(), from, to, IsTake, + CopyAddrInst *createPhiMove(SILBuilder &builder, SILValue from, SILValue to) { + auto *move = builder.createCopyAddr(pass.genLoc(), from, to, IsTake, IsInitialization); - phiCopies.insert(copy); - return copy; + phiMoves.insert(move); + return move; } - struct CopyPosition { - SILBasicBlock::iterator latestCopyPos; + struct MovePosition { + SILBasicBlock::iterator latestMovePos; bool foundAntiDependenceCycle = false; }; - CopyPosition findPhiCopyPosition(PhiOperand phiOper); + MovePosition findPhiMovePosition(PhiOperand phiOper); }; } // anonymous namespace @@ -1529,15 +1529,15 @@ void PhiRewriter::materializeOperand(PhiOperand phiOper) { if (operStorage.isPhiProjection()) { if (operStorage.projectedStorageID == pass.valueStorageMap.getOrdinal(phiOper.getValue())) { - // This operand was coalesced with this particular phi. No copy needed. + // This operand was coalesced with this particular phi. No move needed. return; } } auto phiOperAddress = operStorage.getMaterializedAddress(); - auto copyPos = findPhiCopyPosition(phiOper); + auto movePos = findPhiMovePosition(phiOper); - auto builder = pass.getBuilder(copyPos.latestCopyPos); + auto builder = pass.getBuilder(movePos.latestMovePos); AddressMaterialization addrMat(pass, builder); auto &phiStorage = pass.valueStorageMap.getStorage(phiOper.getValue()); @@ -1545,16 +1545,16 @@ void PhiRewriter::materializeOperand(PhiOperand phiOper) { addrMat.materializeUseProjectionStorage(phiStorage, /*intoPhiOperand*/ true); - if (!copyPos.foundAntiDependenceCycle) { - createPhiCopy(builder, phiOperAddress, phiAddress); + if (!movePos.foundAntiDependenceCycle) { + createPhiMove(builder, phiOperAddress, phiAddress); return; } AllocStackInst *alloc = builder.createAllocStack(pass.genLoc(), phiOper.getValue()->getType()); - createPhiCopy(builder, phiOperAddress, alloc); + createPhiMove(builder, phiOperAddress, alloc); auto tempBuilder = pass.getBuilder(phiOper.getBranch()->getIterator()); - createPhiCopy(tempBuilder, alloc, phiAddress); + createPhiMove(tempBuilder, alloc, phiAddress); tempBuilder.createDeallocStack(pass.genLoc(), alloc); } @@ -1565,9 +1565,9 @@ PhiRewriter &AddressLoweringState::getPhiRewriter() { return *(this->phiRewriter.get()); } -// Return the latest position at which a copy into this phi may be emitted -// without violating an anti-dependence on another phi copy. -PhiRewriter::CopyPosition PhiRewriter::findPhiCopyPosition(PhiOperand phiOper) { +// Return the latest position at which a move into this phi may be emitted +// without violating an anti-dependence on another phi move. +PhiRewriter::MovePosition PhiRewriter::findPhiMovePosition(PhiOperand phiOper) { auto phiBaseAddress = pass.valueStorageMap.getBaseStorage(phiOper.getValue()).storageAddress; @@ -1578,34 +1578,34 @@ PhiRewriter::CopyPosition PhiRewriter::findPhiCopyPosition(PhiOperand phiOper) { auto insertPt = phiOper.getBranch()->getIterator(); bool foundEarliestInsertPoint = false; - CopyPosition copyPos; - copyPos.latestCopyPos = insertPt; + MovePosition movePos; + movePos.latestMovePos = insertPt; - // Continue scanning until all phi copies have been checked for interference. + // Continue scanning until all phi moves have been checked for interference. for (auto beginIter = phiOper.predBlock->begin(); insertPt != beginIter;) { --insertPt; - auto *phiCopy = dyn_cast(&*insertPt); - if (!phiCopy || !phiCopies.contains(phiCopy)) + auto *phiMove = dyn_cast(&*insertPt); + if (!phiMove || !phiMoves.contains(phiMove)) break; if (!foundEarliestInsertPoint - && getAccessBase(phiCopy->getSrc()) == phiBaseAddress) { - // Anti-dependence from the phi copy to the phi value. Do not copy into + && getAccessBase(phiMove->getSrc()) == phiBaseAddress) { + // Anti-dependence from the phi move to the phi value. Do not move into // the phi storage before this point. foundEarliestInsertPoint = true; } - if (getAccessBase(phiCopy->getDest()) == operBaseAddress) { - // Anti-dependence from the phi operand to the phi copy. Do not copy out + if (getAccessBase(phiMove->getDest()) == operBaseAddress) { + // Anti-dependence from the phi operand to the phi move. Do not move out // of the operand storage after this point. - copyPos.latestCopyPos = insertPt; + movePos.latestMovePos = insertPt; // If the earliest and latest points conflict, allocate a temporary. if (foundEarliestInsertPoint) { - copyPos.foundAntiDependenceCycle = true; + movePos.foundAntiDependenceCycle = true; } } } - return copyPos; + return movePos; } //===----------------------------------------------------------------------===// From d50c8f67ee5ad4be8cebb16ba4af7e08f793e177 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Wed, 9 Mar 2022 10:04:52 -0800 Subject: [PATCH 09/88] [SIL-opaque] avoid handling operands past 64k --- lib/SILOptimizer/Mandatory/AddressLowering.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index 086f51e477a51..6c7325b6e007d 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -840,7 +840,11 @@ void ValueStorageMap::recordComposingUseProjection(Operand *oper, auto &storage = getStorage(oper->get()); assert(!storage.isAllocated()); storage.projectedStorageID = getOrdinal(userValue); + storage.projectedOperandNum = oper->getOperandNumber(); + assert(storage.projectedOperandNum == oper->getOperandNumber() && + "operand overflow"); + storage.isUseProjection = true; if (EnumDecl *enumDecl = userValue->getType().getEnumOrBoundGenericEnum()) { @@ -1011,6 +1015,10 @@ bool OpaqueStorageAllocation::findProjectionIntoUseImpl( assert(!getProjectedDefOperand(userValue) && "storage cannot project in two directions."); + // Avoid handling preposterous types. + if (use->getOperandNumber() > UINT16_MAX) + continue; + // Recurse through all storage projections to find the uniquely allocated // storage. Enum storage cannot be reused across multiple subobjects because // it must be initialized via a single init_enum_data_addr instruction. From fd45bd01921f83c7e5d8fde8cb3a283c561fd20c Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Wed, 9 Mar 2022 11:50:13 -0800 Subject: [PATCH 10/88] [SIL-opaque] More file-level documentation Explain high-level objectives and terminology with more precision. --- .../Mandatory/AddressLowering.cpp | 64 +++++++++++++++---- 1 file changed, 51 insertions(+), 13 deletions(-) diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index 6c7325b6e007d..e8f8c90d2f060 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -19,37 +19,75 @@ /// /// This pass never creates copies except to replace explicit value copies /// (copy_value, load [copy], store). For move-only values, this allows complete -/// diagnostics. And in general, it makes it impossible for SIL passes to +/// diagnostics. And in general, this makes it impossible for SIL passes to /// "accidentally" create copies. /// /// This pass inserts moves (copy_addr [take] [initialize]) of owned values to /// - compose aggregates /// - resolve phi interference /// -/// For guarantee values, this pass inserts neither copies nor moves. Opaque +/// For guaranteed values, this pass inserts neither copies nor moves. Opaque /// values are potentially unmovable when borrowed. This means that guaranteed /// address-only aggregates and phis are prohibited. This SIL invariant is /// enforced by SILVerifier::checkOwnershipForwardingInst() and /// SILVerifier::visitSILPhiArgument(). /// +/// The simplest approach to address lowering is to map each opaque SILValue to +/// a separate alloc_stack. This pass avoids doing that in the following cases: +/// +/// 1. Reused-storage: Some operations are guaranteed to reuse their operand's +/// storage. This includes extracting an enum payload and opening an existential +/// value. This is required avoid introducing new copies or moves. +/// +/// // %data's storage must reuse storage allocated for %enum +/// %data = unchecked_enum_data %enum : $Optional, #Optional.some!enumelt +/// +/// 2. Def-projection: Some operations are guaranteed to directly project out of +/// their operand's storage. This is also required to avoid introducing new +/// copies or moves. Unlike reused-storage, such projections are non-destructive +/// and repeatable. +/// +/// // %field's storage is part of the storage allocated for %struct +/// %field = struct_extract %struct, #field +/// +/// 3. Use-projection: Operations that compose aggregates may optionally allow +/// their operands to project into the storage allocated for their result. This +/// is only an optimization but is essential for reasonable code generation. +/// +/// // %field's storage may be part of the storage allocated for %struct +/// %struct = struct(..., %field, ...) +/// +/// 4. Phi-projection: Phi's may optionally allow their (branch) operands to +/// reuse the storage allocated for their result (block argument). This is only +/// an optimization, but is important to avoid many useless moves: +/// +/// // %arg's storage may be part of the storage allocated for %phi +/// br bb(%arg) +/// bb(%phi : @owned $T) +/// +/// The algorithm proceeds as follows: +/// /// ## Step #1: Map opaque values /// /// Populate a map from each opaque SILValue to its ValueStorage in forward /// order (RPO). Each opaque value is mapped to an ordinal ID representing the /// storage. Storage locations can now be optimized by remapping the values. /// +/// Reused-storage operations are not mapped to ValueStorage. +/// /// ## Step #2: Allocate storage /// /// In reverse order (PO), allocate the parent storage object for each opaque /// value. /// -/// If the value is a subobject extraction (struct_extract, tuple_extract, -/// open_existential_value, unchecked_enum_data), then mark the value's storage -/// as a projection from the def's storage. +/// Handle def-projection: If the value is a subobject extraction +/// (struct_extract, tuple_extract, open_existential_value, +/// unchecked_enum_data), then mark the value's storage as a projection from the +/// def's storage. /// -/// If the value's use composes a parent object from this value (struct, tuple, -/// enum), and the use's storage dominates this value, then mark the value's -/// storage as a projection into the use's storage. +/// Handle use-projection: If the value's use composes a parent object from this +/// value (struct, tuple, enum), and the use's storage dominates this value, +/// then mark the value's storage as a projection into the use's storage. /// /// ValueStorage projections can be chained. A non-projection ValueStorage is /// the root of a tree of projections. @@ -59,11 +97,11 @@ /// projections are not mapped to a `storageAddress` at this point. That happens /// during rewriting. /// -/// After allocating storage for all non-phi opaque values, phi storage is -/// allocated. (Phi values are block arguments in which phi's arguments are -/// branch operands). This is handled by a PhiStorageOptimizer that checks for -/// interference among the phi operands and reuses storage allocated to other -/// values. +/// Handle phi-projection: After allocating storage for all non-phi opaque +/// values, phi storage is allocated. (Phi values are block arguments in which +/// phi's arguments are branch operands). This is handled by a +/// PhiStorageOptimizer that checks for interference among the phi operands and +/// reuses storage allocated to other values. /// /// ## Step #3. Rewrite opaque values /// From 355ca16897e678bc914381711f111fd3d0ba2b87 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Wed, 9 Mar 2022 11:52:51 -0800 Subject: [PATCH 11/88] [SIL-opaque] Add section-level comment explaining storage allocation --- lib/SILOptimizer/Mandatory/AddressLowering.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index e8f8c90d2f060..ba2c6c6aee025 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -860,7 +860,9 @@ static SILValue getProjectedUseValue(Operand *operand) { //===----------------------------------------------------------------------===// // OpaqueStorageAllocation // -// Generate alloc_stack and address projections for abstract storage locations. +// For each ValueStorage, first determine whether it can project out of its +// definition's storage or into the storage of a use. If so, record the +// projection information. Otherwise emit an alloc_stack for this storage root. // ===---------------------------------------------------------------------===// // Record a storage projection from the source of the given operand into its From ef2bf97a14ccdbf149825113da8ead4cd7ac8dbd Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Wed, 9 Mar 2022 11:53:34 -0800 Subject: [PATCH 12/88] [SIL-opaque] remove cleanupAfterCall helper --- .../Mandatory/AddressLowering.cpp | 49 ++++++------------- 1 file changed, 14 insertions(+), 35 deletions(-) diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index ba2c6c6aee025..d80978aae9d99 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -176,30 +176,6 @@ static SILFunctionConventions getLoweredCallConv(ApplySite call) { SILModuleConventions::getLoweredAddressConventions(call.getModule())); } -/// Invoke \p cleanup on all paths exiting a call. -static void -cleanupAfterCall(FullApplySite apply, - llvm::function_ref cleanup) { - switch (apply.getKind()) { - case FullApplySiteKind::ApplyInst: { - cleanup(std::next(apply.getInstruction()->getIterator())); - break; - } - case FullApplySiteKind::TryApplyInst: { - auto *tryApply = cast(apply.getInstruction()); - cleanup(tryApply->getNormalBB()->begin()); - cleanup(tryApply->getErrorBB()->begin()); - break; - } - case FullApplySiteKind::BeginApplyInst: { - // FIXME: Unimplemented - // - // This should be as simple as calling cleanup for all the end_applies. - llvm::report_fatal_error("Unimplemented coroutine"); - } - } -} - //===----------------------------------------------------------------------===// // Multi-Result // @@ -482,6 +458,12 @@ struct AddressLoweringState { builder.setCurrentDebugScope(originalInst->getDebugScope()); return builder; } + + void prepareBuilder(SILBuilder &builder) { + builder.setSILConventions( + SILModuleConventions::getLoweredAddressConventions( + builder.getModule())); + }; }; } // end anonymous namespace @@ -1733,22 +1715,20 @@ void CallArgRewriter::rewriteIndirectArgument(Operand *operand) { if (apply.getArgumentConvention(*operand).isOwnedConvention()) { argBuilder.createTrivialStoreOr(apply.getLoc(), argValue, allocInst, StoreOwnershipQualifier::Init); - cleanupAfterCall(apply, [&](SILBasicBlock::iterator insertPt) { - auto deallocBuilder = pass.getBuilder(insertPt); - deallocBuilder.createDeallocStack(callLoc, allocInst); + apply.insertAfterFullEvaluation([&](SILBuilder &callBuilder) { + callBuilder.createDeallocStack(callLoc, allocInst); }); } else { auto borrow = argBuilder.emitBeginBorrowOperation(callLoc, argValue); auto *storeInst = argBuilder.emitStoreBorrowOperation(callLoc, borrow, allocInst); - cleanupAfterCall(apply, [&](SILBasicBlock::iterator insertPt) { - auto cleanupBuilder = pass.getBuilder(insertPt); + apply.insertAfterFullEvaluation([&](SILBuilder &callBuilder) { if (auto *storeBorrow = dyn_cast(storeInst)) { - cleanupBuilder.emitEndBorrowOperation(callLoc, storeBorrow); + callBuilder.emitEndBorrowOperation(callLoc, storeBorrow); } - cleanupBuilder.emitEndBorrowOperation(callLoc, borrow); - cleanupBuilder.createDeallocStack(callLoc, allocInst); + callBuilder.emitEndBorrowOperation(callLoc, borrow); + callBuilder.createDeallocStack(callLoc, allocInst); }); } } @@ -2020,9 +2000,8 @@ SILValue ApplyRewriter::materializeIndirectResultAddress(SILValue oldResult, // Instead of using resultBuilder, insert dealloc immediately after the call // for stack discpline across loadable indirect results. - cleanupAfterCall(apply, [&](SILBasicBlock::iterator insertPt) { - auto cleanupBuilder = pass.getBuilder(insertPt); - cleanupBuilder.createDeallocStack(callLoc, allocInst); + apply.insertAfterFullEvaluation([&](SILBuilder &callBuilder) { + callBuilder.createDeallocStack(callLoc, allocInst); }); if (oldResult && !oldResult->use_empty()) { From f79c6c853c1c05340c2dd0d599eaffaf54724763 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Wed, 9 Mar 2022 21:22:07 -0800 Subject: [PATCH 13/88] [SIL-opaque] add a test case for phi coalescing Avoid attempting to coalesce enum payloads. --- .../Mandatory/PhiStorageOptimizer.cpp | 19 ++++++----- test/SILOptimizer/address_lowering_phi.sil | 32 +++++++++++++++++++ 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/lib/SILOptimizer/Mandatory/PhiStorageOptimizer.cpp b/lib/SILOptimizer/Mandatory/PhiStorageOptimizer.cpp index 4e4af608679a3..545daab64d090 100644 --- a/lib/SILOptimizer/Mandatory/PhiStorageOptimizer.cpp +++ b/lib/SILOptimizer/Mandatory/PhiStorageOptimizer.cpp @@ -116,7 +116,6 @@ void PhiStorageOptimizer::optimize() { coalescedPhi.coalescedOperands.push_back(phi.getOperand(predecessor)); return; } - occupiedBlocks.insert(phi.phiBlock); for (auto *incomingPred : phi.phiBlock->getPredecessorBlocks()) { tryCoalesceOperand(incomingPred); } @@ -145,14 +144,15 @@ bool PhiStorageOptimizer::canCoalesceValue(SILValue incomingVal) { auto &incomingStorage = valueStorageMap.getStorage(incomingVal); - // If the incoming use is pre-allocated it can't be coalesced. - // This also handles incoming values that are already coalesced with - // another use. + // If the incoming use directly reuses its def storage, projects out of its + // def storage, or is pre-allocated, then it can't be coalesced. When incoming + // storage is directly reused, isAllocated() is false. isProjection() covers + // the other cases. // // Coalescing use projections from incomingVal into its other non-phi uses - // would require by recursively following uses across projections when - // computing liveness. - if (incomingStorage.isProjection()) + // could be handled, but would require by recursively following uses across + // projections when computing liveness. + if (!incomingStorage.isAllocated() || incomingStorage.isProjection()) return false; auto *defInst = incomingVal->getDefiningInstruction(); @@ -163,7 +163,6 @@ bool PhiStorageOptimizer::canCoalesceValue(SILValue incomingVal) { // analysis of the whole phi web before coalescing phi operands. return false; } - assert(incomingStorage.isAllocated() && "nonphi must be allocated"); // Don't coalesce an incoming value unless it's storage is from a stack // allocation, which can be replaced with another alloc_stack. @@ -213,7 +212,11 @@ bool PhiStorageOptimizer::recordUseLiveness(SILValue incomingVal, for (auto *use : incomingVal->getUses()) { StackList liveBBWorklist(getFunction()); + // If \p liveBB is already occupied by another value, return + // false. Otherwise, mark \p liveBB live and push it onto liveBBWorklist. auto visitLiveBlock = [&](SILBasicBlock *liveBB) { + assert(liveBB != phi.phiBlock && "phi operands are consumed"); + if (occupiedBlocks.contains(liveBB)) return false; diff --git a/test/SILOptimizer/address_lowering_phi.sil b/test/SILOptimizer/address_lowering_phi.sil index a7ec7e882ac8b..5df8244fc7f4a 100644 --- a/test/SILOptimizer/address_lowering_phi.sil +++ b/test/SILOptimizer/address_lowering_phi.sil @@ -10,6 +10,11 @@ typealias AnyObject = Builtin.AnyObject typealias Int = Builtin.Int64 typealias Bool = Builtin.Int1 +enum Optional { + case none + case some(T) +} + struct SRef { @_hasStorage var object: AnyObject { get set } @_hasStorage var element: T { get set } @@ -438,3 +443,30 @@ bb6(%phi6 : @owned $InnerStruct): %outer = struct $OuterStruct (%phi6 : $InnerStruct, %3 : $AnyObject) return %outer : $OuterStruct } + +// CHECK-LABEL: sil [ossa] @f090_payloadPhiOperand : $@convention(thin) (@in Optional, @in T) -> @out T { +// CHECK: bb0(%0 : $*T, %1 : $*Optional, %2 : $*T): +// CHECK: cond_br undef, bb2, bb1 +// CHECK: bb1: +// CHECK: destroy_addr %2 : $*T +// CHECK: [[P:%.*]] = unchecked_take_enum_data_addr %1 : $*Optional, #Optional.some!enumelt +// CHECK: copy_addr [take] [[P]] to [initialization] %0 : $*T +// CHECK: br bb3 +// CHECK: bb2: +// CHECK: destroy_addr %1 : $*Optional +// CHECK: copy_addr [take] %2 to [initialization] %0 : $*T +// CHECK: br bb3 +// CHECK-LABEL: } // end sil function 'f090_payloadPhiOperand' +sil [ossa] @f090_payloadPhiOperand : $@convention(thin) (@in Optional, @in T) -> @out T { +bb0(%0 : @owned $Optional, %1 : @owned $T): + cond_br undef, bb2, bb1 +bb1: + destroy_value %1 : $T + %payload = unchecked_enum_data %0 : $Optional, #Optional.some!enumelt + br bb3(%payload : $T) +bb2: + destroy_value %0 : $Optional + br bb3(%1 : $T) +bb3(%phi : @owned $T): + return %phi : $T +} From e116998090575e8cda058f73ba9d800c9c70823d Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Wed, 9 Mar 2022 21:24:37 -0800 Subject: [PATCH 14/88] [SIL-opaque] in-depth top-level documentation for phi coalescing. --- .../Mandatory/PhiStorageOptimizer.cpp | 57 ++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/lib/SILOptimizer/Mandatory/PhiStorageOptimizer.cpp b/lib/SILOptimizer/Mandatory/PhiStorageOptimizer.cpp index 545daab64d090..bd3e3defd644f 100644 --- a/lib/SILOptimizer/Mandatory/PhiStorageOptimizer.cpp +++ b/lib/SILOptimizer/Mandatory/PhiStorageOptimizer.cpp @@ -13,11 +13,66 @@ /// PhiStorageOptimizer implements an analysis used by AddressLowering /// to reuse storage across block arguments. /// +/// In OSSA, phi operands can often be coalesced because they are +/// consuming--they end the lifetime of their operand. This optimization may +/// fail to coalesce an operand for two major reasons: +/// +/// 1. This phi operand is already coalesced with other storage, possibly of a +/// different type: +/// +/// %field = struct_extract %struct : $Struct, #field +/// br bb(%field : $T) +/// +/// bb(%phi : @owned $T): +/// ... +/// +/// 2. This phi operand interferes with another coalesced phi operand. +/// +/// Only one of the call results below, either %get0 or %get1, can be coalesced +/// with %phi. The %phi will itself be coalesced with this function's indirect +/// @out argument. +/// +/// sil [ossa] @function : $@convention(thin) () -> @out T { +/// bb0: +/// %get0 = apply %get() : $@convention(thin) <τ_0_0>() -> @out τ_0_0 +/// %get1 = apply %get() : $@convention(thin) <τ_0_0>() -> @out τ_0_0 +/// cond_br undef, bb2, bb1 +/// +/// bb1: +/// destroy_value %get0 : $T +/// br bb3(%get1 : $T) +/// +/// bb2: +/// destroy_value %get1 : $T +/// br bb3(%get0 : $T) +/// +/// bb3(%phi : @owned $T): +/// return %phi : $T +/// +/// TODO: Liveness is currently recorded at the block level. This could be +/// extended to handle operand with nonoverlapping liveness in the same +/// block. In this case, %get0 and %get1 could both be coalesced with a bit of +/// extra book-keeping: +/// +/// bb0: +/// %get0 = apply %get() : $@convention(thin) <τ_0_0>() -> @out τ_0_0 +/// +/// bb1: +/// destroy_value %get0 : $T +/// %get1 = apply %get() : $@convention(thin) <τ_0_0>() -> @out τ_0_0 +/// br bb3(%get1 : $T) +/// +/// bb2: +/// br bb3(%get0 : $T) +/// +/// bb3(%phi : @owned $T): +/// /// TODO: This does not yet coalesce the copy_value instructions that produce a /// phi operand. Such a copy implies that both the operand and phi value are /// live past the phi. Nonetheleses, they could still be coalesced as /// follows... First coalesce all direct phi operands. Then transitively -/// coalesce copies by redoing the liveness traversal from the uses of the copy. +/// coalesce copies by checking if the copy's source is coalescable, then +/// redoing the liveness traversal from the uses of the copy. /// /// TODO: This approach uses on-the-fly liveness discovery for all incoming /// values at once. It requires no storage for liveness. Hopefully this is From 7f88908402c8b569b46b3c2080020056d07d9fa6 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Wed, 9 Mar 2022 22:00:22 -0800 Subject: [PATCH 15/88] [SIL-opaque] minor NFC review feedback --- lib/SILOptimizer/Mandatory/AddressLowering.cpp | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index d80978aae9d99..8f4db85ea42f3 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -1092,8 +1092,7 @@ checkStorageDominates(AllocStackInst *allocInst, void OpaqueStorageAllocation::allocatePhi(PhiValue phi) { // Coalesces phi operand storage with the phi storage. The algorithm processes - // all incoming values at once, so it is is run when visiting the block - // argument. + // all incoming values at once, so it is run when visiting the block argument. // // The phi operand projections are computed first to give them priority. Then // we determine if the phi itself can share storage with one of its users. @@ -1143,17 +1142,12 @@ createStackAllocation(SILValue value) { assert(value.getOwnershipKind() != OwnershipKind::Guaranteed && "creating storage for a guaranteed value implies a copy"); -#ifndef NDEBUG // Instructions that produce an opened type never reach here because they // have guaranteed ownership--they project their storage. We reach this // point after the opened value has been copied. - if (auto *defInst = value->getDefiningInstruction()) { - if (auto *singleValue = dyn_cast(defInst)) { - assert(!cast(defInst)->getDefinedOpenedArchetype() - && "owned open_existential is unsupported"); - } - } -#endif + assert((!isa(value) + || !cast(value)->getDefinedOpenedArchetype()) + && "owned open_existential is unsupported"); SILType allocTy = value->getType(); From 72817dfd705efcb8afdbbd0758b4f671a886badc Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Wed, 9 Mar 2022 22:16:08 -0800 Subject: [PATCH 16/88] [SIL-opaque] rename materialization functions. --- .../Mandatory/AddressLowering.cpp | 37 +++++++++++-------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index 8f4db85ea42f3..99fc3f8bcef1f 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -1231,7 +1231,7 @@ class AddressMaterialization { return storage.storageAddress; if (storage.isUseProjection) { - materializeUseProjectionStorage(storage, /*intoPhiOperand*/ false); + recursivelyMaterializeStorage(storage, /*intoPhiOperand*/ false); } else { assert(storage.isDefProjection); storage.storageAddress = materializeDefProjection(origValue); @@ -1241,8 +1241,8 @@ class AddressMaterialization { void initializeOperand(Operand *operand); - SILValue materializeUseProjectionStorage(ValueStorage &storage, - bool intoPhiOperand); + SILValue recursivelyMaterializeStorage(ValueStorage &storage, + bool intoPhiOperand); SILValue materializeDefProjection(SILValue origValue); @@ -1257,8 +1257,8 @@ class AddressMaterialization { SILValue materializeComposingUser(SingleValueInstruction *user, bool intoPhiOperand) { - return materializeUseProjectionStorage( - pass.valueStorageMap.getStorage(user), intoPhiOperand); + return recursivelyMaterializeStorage(pass.valueStorageMap.getStorage(user), + intoPhiOperand); } }; } // anonymous namespace @@ -1290,19 +1290,26 @@ void AddressMaterialization::initializeOperand(Operand *operand) { StoreOwnershipQualifier::Init); } -// Recursively materialize the address for storage at the point that a use -// projects into it via either a composing-use (struct, tuple, enum) or phi -// projection. This only materializes the address that the operands project -// into. It does not materialize the storage for the result. e.g. it -// materializes init_enum_data_addr, not inject_enum_addr. +// Recursively materialize the address for storage at the point that an operand +// may project into it via either a composing-use (struct, tuple, enum) or phi +// projection. +// +// Precondition: \p storage is not a def-projection. // // If \p intoPhiOperand is true, this materializes the address in the path that -// reaches a phi operand, not the phi block itself. +// reaches a phi operand, not the phi block itself. Do not map the storage onto +// the materialized address. // // If \p intoPhiOperand is false, then the materialized address is guaranteed to // domaninate the composing user. Map the user onto this address to avoid // rematerialization. -SILValue AddressMaterialization::materializeUseProjectionStorage( +// +// Note: This only materializes the address for the purpose of projection an +// operand into the storage. It does not materialize the final address of +// storage after materializing the result. In particular, it materializes +// init_enum_data_addr, but not inject_enum_addr. +// +SILValue AddressMaterialization::recursivelyMaterializeStorage( ValueStorage &storage, bool intoPhiOperand = false) { // If this storage is already materialized, then simply return its // address. This not only avoids redundant projections, but is necessary for @@ -1331,7 +1338,7 @@ SILValue AddressMaterialization::materializeUseProjectionStorage( return recordAddress(useStorage.storage.storageAddress); } if (storage.isPhiProjection()) { - return recordAddress(materializeUseProjectionStorage( + return recordAddress(recursivelyMaterializeStorage( pass.valueStorageMap.getProjectedStorage(storage).storage, /*intoPhiOperand*/ true)); } @@ -1566,8 +1573,8 @@ void PhiRewriter::materializeOperand(PhiOperand phiOper) { auto &phiStorage = pass.valueStorageMap.getStorage(phiOper.getValue()); SILValue phiAddress = - addrMat.materializeUseProjectionStorage(phiStorage, - /*intoPhiOperand*/ true); + addrMat.recursivelyMaterializeStorage(phiStorage, + /*intoPhiOperand*/ true); if (!movePos.foundAntiDependenceCycle) { createPhiMove(builder, phiOperAddress, phiAddress); From 6f3a0c3df0543686b8bd6cf62aca94650f794607 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Wed, 9 Mar 2022 22:27:11 -0800 Subject: [PATCH 17/88] [SIL-opaque] rename initializeComposingUse --- lib/SILOptimizer/Mandatory/AddressLowering.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index 99fc3f8bcef1f..f82a66fce22da 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -1239,7 +1239,7 @@ class AddressMaterialization { return storage.storageAddress; } - void initializeOperand(Operand *operand); + void initializeComposingUse(Operand *operand); SILValue recursivelyMaterializeStorage(ValueStorage &storage, bool intoPhiOperand); @@ -1269,7 +1269,7 @@ class AddressMaterialization { /// /// If the operand projects into its use, then the memory was already /// initialized when visiting the use. -void AddressMaterialization::initializeOperand(Operand *operand) { +void AddressMaterialization::initializeComposingUse(Operand *operand) { SILValue def = operand->get(); if (def->getType().isAddressOnly(*pass.function)) { ValueStorage &storage = pass.valueStorageMap.getStorage(def); @@ -2903,7 +2903,7 @@ class DefRewriter : SILInstructionVisitor { void visitEnumInst(EnumInst *enumInst) { if (enumInst->hasOperand()) { // Handle operands here because loadable operands must also be copied. - addrMat.initializeOperand(&enumInst->getOperandRef()); + addrMat.initializeComposingUse(&enumInst->getOperandRef()); } SILValue enumAddr = addrMat.materializeAddress(enumInst); @@ -2916,7 +2916,7 @@ class DefRewriter : SILInstructionVisitor { InitExistentialValueInst *initExistentialValue) { // Initialize memory for the operand which may be opaque or loadable. - addrMat.initializeOperand(&initExistentialValue->getOperandRef()); + addrMat.initializeComposingUse(&initExistentialValue->getOperandRef()); } // Project an opaque value out of a box-type existential. @@ -2948,7 +2948,7 @@ class DefRewriter : SILInstructionVisitor { // For each element, initialize the operand's memory. Some struct elements // may be loadable types. for (Operand &operand : structInst->getAllOperands()) - addrMat.initializeOperand(&operand); + addrMat.initializeComposingUse(&operand); } // Define an opaque tuple. @@ -2956,7 +2956,7 @@ class DefRewriter : SILInstructionVisitor { // For each element, initialize the operand's memory. Some tuple elements // may be loadable types. for (Operand &operand : tupleInst->getAllOperands()) - addrMat.initializeOperand(&operand); + addrMat.initializeComposingUse(&operand); } }; } // end anonymous namespace From dbaada435c090fec25627c899f4440716c45dee0 Mon Sep 17 00:00:00 2001 From: David Smith Date: Fri, 18 Mar 2022 12:38:53 -0700 Subject: [PATCH 18/88] Stay in vectors longer before doing a horizontal sum --- stdlib/public/core/StringUTF16View.swift | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/stdlib/public/core/StringUTF16View.swift b/stdlib/public/core/StringUTF16View.swift index aac81751ab50f..326ee05464173 100644 --- a/stdlib/public/core/StringUTF16View.swift +++ b/stdlib/public/core/StringUTF16View.swift @@ -533,14 +533,18 @@ extension String.UTF16View { //Find the number of continuations (0b10xxxxxx) let sValue = Builtin.loadRaw(readPtr._rawValue) as S let continuations = S.zero.replacing(with: S.one, where: sValue .< -65 + 1) - let continuationCount = Int(continuations.wrappedSum()) //Find the number of 4 byte code points (0b11110xxx) let uValue = Builtin.loadRaw(readPtr._rawValue) as U - let fourBytes = U.zero.replacing(with: U.one, where: uValue .>= 0b11110000) - let fourByteCount = Int(fourBytes.wrappedSum()) - - utf16Count &+= (U.scalarCount - continuationCount) + fourByteCount + let fourBytes = S.zero.replacing( + with: S.one, + where: unsafeBitCast( + uValue .>= 0b11110000, + to: SIMDMask.self + ) + ) + + utf16Count &+= U.scalarCount + Int((fourBytes &- continuations).wrappedSum()) readPtr += MemoryLayout.stride } From 025079466a4746e4ca4237367d79a64b0370b4a6 Mon Sep 17 00:00:00 2001 From: Daniel Duan Date: Fri, 18 Mar 2022 15:13:31 -0700 Subject: [PATCH 19/88] [utils] Remove Python 2 The library `six` provides compatibility between Python 2, and 3. It's no longer necessary once we migrate of Python 2 completely. Also remove any custom logic for Python 2 (the ones referenced by a commentanyways). https://bugs.swift.org/browse/SR-16025 --- utils/GYBUnicodeDataUtils.py | 3 - utils/backtrace-check | 4 - utils/build-script | 10 +-- .../build_swift/argparse/actions.py | 8 +- .../build_swift/argparse/parser.py | 6 +- .../build_swift/build_swift/argparse/types.py | 4 +- utils/build_swift/build_swift/migration.py | 5 +- utils/build_swift/build_swift/presets.py | 13 +-- utils/build_swift/build_swift/shell.py | 84 ++++--------------- utils/build_swift/build_swift/versions.py | 8 +- .../build_swift/build_swift/wrappers/xcrun.py | 4 +- .../build_swift/argparse/test_actions.py | 14 ++-- .../build_swift/test_driver_arguments.py | 31 +++---- .../tests/build_swift/test_migration.py | 5 +- .../tests/build_swift/test_presets.py | 7 +- .../tests/build_swift/test_shell.py | 76 ++--------------- .../tests/build_swift/test_versions.py | 8 +- utils/build_swift/tests/utils.py | 7 +- utils/line-directive | 5 +- utils/round-trip-syntax-test | 11 +-- .../build_script_invocation.py | 6 +- .../swift_build_support/cmake.py | 6 +- .../tests/products/test_cmark.py | 7 +- .../tests/products/test_earlyswiftdriver.py | 7 +- .../tests/products/test_llvm.py | 7 +- .../tests/products/test_ninja.py | 7 +- .../tests/products/test_swift.py | 7 +- utils/swift_build_support/tests/test_shell.py | 7 +- 28 files changed, 78 insertions(+), 289 deletions(-) diff --git a/utils/GYBUnicodeDataUtils.py b/utils/GYBUnicodeDataUtils.py index 8651534dff8b2..3c541c4d5076c 100644 --- a/utils/GYBUnicodeDataUtils.py +++ b/utils/GYBUnicodeDataUtils.py @@ -355,9 +355,6 @@ def map_index(idx): else: return idx - # NOTE: Python 2's `map` function returns a list. Where Python 3's - # `map` function returns an iterator. To work around this the - # result of the `map` is explicitly converted to a `list`. return list(map(map_index, indexes)) # If self.bmp_data contains identical data blocks, keep the first one, diff --git a/utils/backtrace-check b/utils/backtrace-check index 53fd849665e26..a8058cff27b46 100755 --- a/utils/backtrace-check +++ b/utils/backtrace-check @@ -72,10 +72,6 @@ def main(): found_stack_trace_start = False found_stack_trace_entry = False for line in lines: - # In Python 2, string objects can contain Unicode characters. - if sys.version_info.major == 2: - line = line.decode('utf-8', 'replace') - line = line.rstrip('\n') # First see if we found the start of our stack trace start. If so, set diff --git a/utils/build-script b/utils/build-script index 4e97c6b266f48..345233348a7a6 100755 --- a/utils/build-script +++ b/utils/build-script @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # This source file is part of the Swift.org open source project # @@ -32,8 +32,6 @@ from build_swift.build_swift.constants import SWIFT_BUILD_ROOT from build_swift.build_swift.constants import SWIFT_REPO_NAME from build_swift.build_swift.constants import SWIFT_SOURCE_ROOT -import six - from swift_build_support.swift_build_support import build_script_invocation from swift_build_support.swift_build_support import shell from swift_build_support.swift_build_support import targets @@ -120,7 +118,7 @@ class JSONDumper(json.JSONEncoder): def default(self, o): if hasattr(o, '__dict__'): return vars(o) - return six.text_type(o) + return str(o) def print_xcodebuild_versions(file=sys.stdout): @@ -499,7 +497,7 @@ def main_preset(): try: preset_parser.read_files(args.preset_file_names) except presets.PresetError as e: - fatal_error(six.text_type(e)) + fatal_error(str(e)) if args.show_presets: for name in sorted(preset_parser.preset_names, @@ -520,7 +518,7 @@ def main_preset(): args.preset, vars=args.preset_substitutions) except presets.PresetError as e: - fatal_error(six.text_type(e)) + fatal_error(str(e)) preset_args = migration.migrate_swift_sdks(preset.args) diff --git a/utils/build_swift/build_swift/argparse/actions.py b/utils/build_swift/build_swift/argparse/actions.py index 58028eb5d9eff..a9f402cf63bcd 100644 --- a/utils/build_swift/build_swift/argparse/actions.py +++ b/utils/build_swift/build_swift/argparse/actions.py @@ -18,8 +18,6 @@ import argparse import copy -import six - from .types import BoolType, PathType @@ -81,7 +79,7 @@ def __init__(self, if dests == argparse.SUPPRESS: dests = [] metavar = metavar or '' - elif isinstance(dests, six.string_types): + elif isinstance(dests, (str,)): dests = [dests] metavar = metavar or dests[0].upper() @@ -138,7 +136,7 @@ def __init__(self, option_strings, join=None, **kwargs): **kwargs) def __call__(self, parser, namespace, values, option_string=None): - if isinstance(values, six.string_types): + if isinstance(values, (str,)): values = [values] for dest in self.dests: @@ -343,5 +341,5 @@ def __call__(self, parser, namespace, values, option_string=None): if self.message is not None: parser.error(self.message) - arg = option_string or six.text_type(values) + arg = option_string or str(values) parser.error('unsupported argument: {}'.format(arg)) diff --git a/utils/build_swift/build_swift/argparse/parser.py b/utils/build_swift/build_swift/argparse/parser.py index eb80a96744259..4957438d6ba0d 100644 --- a/utils/build_swift/build_swift/argparse/parser.py +++ b/utils/build_swift/build_swift/argparse/parser.py @@ -19,8 +19,6 @@ import argparse from contextlib import contextmanager -import six - from . import Namespace, SUPPRESS, actions from .actions import Action @@ -133,7 +131,7 @@ def thunk(**kwargs): *names, action=action, **kwargs) def add_positional(self, dests, action=None, **kwargs): - if isinstance(dests, six.string_types): + if isinstance(dests, (str,)): dests = [dests] if any(dest.startswith('-') for dest in dests): @@ -145,7 +143,7 @@ def add_positional(self, dests, action=None, **kwargs): return self._add_argument(dests, action, **kwargs) def add_option(self, option_strings, *actions, **kwargs): - if isinstance(option_strings, six.string_types): + if isinstance(option_strings, (str,)): option_strings = [option_strings] if not all(opt.startswith('-') for opt in option_strings): diff --git a/utils/build_swift/build_swift/argparse/types.py b/utils/build_swift/build_swift/argparse/types.py index 53c08a00062a1..fac0e599aef62 100644 --- a/utils/build_swift/build_swift/argparse/types.py +++ b/utils/build_swift/build_swift/argparse/types.py @@ -19,8 +19,6 @@ import re import shlex -import six - from . import ArgumentTypeError from ..versions import Version @@ -42,7 +40,7 @@ def _repr(cls, args): """ _args = [] - for key, value in six.iteritems(args): + for key, value in args.items(): _args.append('{}={}'.format(key, repr(value))) return '{}({})'.format(type(cls).__name__, ', '.join(_args)) diff --git a/utils/build_swift/build_swift/migration.py b/utils/build_swift/build_swift/migration.py index 60eb84e97ff45..a6fb00c6e4d40 100644 --- a/utils/build_swift/build_swift/migration.py +++ b/utils/build_swift/build_swift/migration.py @@ -17,9 +17,6 @@ import itertools import subprocess -import six -from six.moves import map - from swift_build_support.swift_build_support.targets import \ StdlibDeploymentTarget @@ -137,4 +134,4 @@ def check_impl_args(build_script_impl, args): _, err = pipe.communicate() if pipe.returncode != 0: - raise ValueError(six.text_type(err.splitlines()[0].decode())) + raise ValueError(str(err.splitlines()[0].decode())) diff --git a/utils/build_swift/build_swift/presets.py b/utils/build_swift/build_swift/presets.py index 58b556d00368d..06086f16babb1 100644 --- a/utils/build_swift/build_swift/presets.py +++ b/utils/build_swift/build_swift/presets.py @@ -14,13 +14,11 @@ from __future__ import absolute_import, unicode_literals +import configparser import functools import io from collections import OrderedDict, namedtuple -from six import StringIO -from six.moves import configparser - from . import class_utils @@ -313,14 +311,9 @@ def read_string(self, string): """Reads and parses a string containing preset definintions. """ - fp = StringIO(string) - - # ConfigParser changes drastically from Python 2 to 3 - if hasattr(self._parser, 'read_file'): - self._parser.read_file(fp) - else: - self._parser.readfp(fp) + fp = io.StringIO(string) + self._parser.read_file(fp) self._parse_raw_presets() # ------------------------------------------------------------------------- diff --git a/utils/build_swift/build_swift/shell.py b/utils/build_swift/build_swift/shell.py index edcc35d7938c6..28c25ee0b49f6 100644 --- a/utils/build_swift/build_swift/shell.py +++ b/utils/build_swift/build_swift/shell.py @@ -24,26 +24,11 @@ import subprocess import sys from copy import copy as _copy +from pathlib import Path +from pipes import quote as _quote from shlex import split from subprocess import CalledProcessError -import six -from six.moves import map - - -try: - # Python 2 - from pipes import quote as _quote -except ImportError: - from shutil import quote as _quote - - -try: - # Python 3.4 - from pathlib import Path -except ImportError: - Path = None - __all__ = [ 'CalledProcessError', @@ -111,7 +96,7 @@ def _convert_pathlib_path(path): return path if isinstance(path, Path): - return six.text_type(path) + return str(path) return path @@ -150,14 +135,14 @@ def _normalize_args(args): CommandWrapper instances into a one-dimensional list of strings. """ - if isinstance(args, six.string_types): + if isinstance(args, (str,)): return shlex.split(args) def normalize_arg(arg): arg = _convert_pathlib_path(arg) - if isinstance(arg, six.string_types): - return [six.text_type(arg)] + if isinstance(arg, (str,)): + return [str(arg)] if isinstance(arg, AbstractWrapper): return list(map(_convert_pathlib_path, arg.command)) @@ -173,34 +158,6 @@ def normalize_arg(arg): # ----------------------------------------------------------------------------- # Decorators -def _backport_devnull(func): - """Decorator used to backport the subprocess.DEVNULL functionality from - Python 3 to Python 2. - """ - - # DEVNULL was introduced in Python 3.3 - if _PY_VERSION >= (3, 3): - return func - - @functools.wraps(func) - def wrapper(command, **kwargs): - stdout = kwargs.get('stdout', sys.stdout) - stderr = kwargs.get('stderr', sys.stderr) - - if stdout != DEVNULL and stderr != DEVNULL: - return func(command, **kwargs) - - with open(os.devnull, 'w') as devnull: - if stdout == DEVNULL: - kwargs['stdout'] = devnull - if stderr == DEVNULL: - kwargs['stderr'] = devnull - - return func(command, **kwargs) - - return wrapper - - def _normalize_command(func): """Decorator used to uniformly normalize the input command of the subprocess wrappers. @@ -208,7 +165,7 @@ def _normalize_command(func): @functools.wraps(func) def wrapper(command, **kwargs): - if not isinstance(command, six.string_types): + if not isinstance(command, (str,)): command = _normalize_args(command) return func(command, **kwargs) @@ -237,10 +194,9 @@ def wrapper(command, **kwargs): # Public Functions def quote(command): - """Extension of the standard pipes.quote (Python 2) or shutil.quote - (Python 3) that handles both strings and lists of strings. This mirrors - how the subprocess package can handle commands as both a standalone string - or list of strings. + """Extension of the standard shutil.quote that handles both strings and + lists of strings. This mirrors how the subprocess package can handle + commands as both a standalone string or list of strings. >>> quote('/Applications/App Store.app') "'/Applications/App Store.app'" @@ -249,7 +205,7 @@ def quote(command): "rm -rf '~/Documents/My Homework'" """ - if isinstance(command, six.string_types): + if isinstance(command, (str,)): return _quote(command) if isinstance(command, collections.Iterable): @@ -288,7 +244,6 @@ def __init__(self, command, **kwargs): solution to this problem in the form of their `method_decorator`. """ - @_backport_devnull @_normalize_command @_add_echo_kwarg def closure(command, **kwargs): @@ -305,7 +260,6 @@ def __exit__(self, *exc): self.wait() -@_backport_devnull @_normalize_command @_add_echo_kwarg def call(command, **kwargs): @@ -316,7 +270,6 @@ def call(command, **kwargs): return subprocess.call(command, **kwargs) -@_backport_devnull @_normalize_command @_add_echo_kwarg def check_call(command, **kwargs): @@ -327,7 +280,6 @@ def check_call(command, **kwargs): return subprocess.check_call(command, **kwargs) -@_backport_devnull @_normalize_command @_add_echo_kwarg def check_output(command, **kwargs): @@ -337,16 +289,11 @@ def check_output(command, **kwargs): Output is returned as a unicode string. """ - if six.PY3: - kwargs['encoding'] = 'utf-8' + kwargs['encoding'] = 'utf-8' output = subprocess.check_output(command, **kwargs) - if six.PY3: - return output - - # Return unicode string rather than bytes in Python 2. - return six.text_type(output, errors='ignore') + return output # ----------------------------------------------------------------------------- @@ -484,8 +431,7 @@ def wraps(command): return CommandWrapper(command) -@six.add_metaclass(abc.ABCMeta) -class AbstractWrapper(object): +class AbstractWrapper(object, metaclass=abc.ABCMeta): """Abstract base class for implementing wrappers around command line utilities and executables. Subclasses must implement the `command` method which returns a command list suitable for use with executor instances. @@ -555,7 +501,7 @@ def __init__(self): self.EXECUTABLE = _convert_pathlib_path(self.EXECUTABLE) - if not isinstance(self.EXECUTABLE, six.string_types): + if not isinstance(self.EXECUTABLE, (str,)): raise AttributeError( '{}.EXECUTABLE must be an executable name or path'.format( type(self).__name__)) diff --git a/utils/build_swift/build_swift/versions.py b/utils/build_swift/build_swift/versions.py index 82562e95eb939..27285f840cad7 100644 --- a/utils/build_swift/build_swift/versions.py +++ b/utils/build_swift/build_swift/versions.py @@ -16,8 +16,6 @@ import functools -import six - __all__ = [ 'InvalidVersionError', @@ -175,7 +173,7 @@ class Version(object): __slots__ = ('components', '_str') def __init__(self, version): - version = six.text_type(version) + version = str(version) # Save the version string since it's impossible to reconstruct it from # just the parsed components @@ -190,10 +188,6 @@ def __eq__(self, other): return self.components == other.components - # NOTE: Python 2 compatibility. - def __ne__(self, other): - return not self == other - def __lt__(self, other): if not isinstance(other, Version): return NotImplemented diff --git a/utils/build_swift/build_swift/wrappers/xcrun.py b/utils/build_swift/build_swift/wrappers/xcrun.py index 7e29a6e73dbf4..d89e798857410 100644 --- a/utils/build_swift/build_swift/wrappers/xcrun.py +++ b/utils/build_swift/build_swift/wrappers/xcrun.py @@ -18,8 +18,6 @@ import re import shlex -import six - from .. import shell from ..versions import Version @@ -61,7 +59,7 @@ def _prepend_sdk_and_toolchain(func): @functools.wraps(func) def wrapper(self, args, sdk=None, toolchain=None, **kwargs): - if isinstance(args, six.string_types): + if isinstance(args, (str,)): args = shlex.split(args) if toolchain: args = ['--toolchain', toolchain] + args diff --git a/utils/build_swift/tests/build_swift/argparse/test_actions.py b/utils/build_swift/tests/build_swift/argparse/test_actions.py index 0bd4d981b19b7..bdb53de2728b8 100644 --- a/utils/build_swift/tests/build_swift/argparse/test_actions.py +++ b/utils/build_swift/tests/build_swift/argparse/test_actions.py @@ -14,8 +14,6 @@ from build_swift.argparse import ( ArgumentParser, BoolType, Nargs, PathType, SUPPRESS, actions) -import six - from ... import utils @@ -186,7 +184,7 @@ def test_valid_int(self): parser.add_argument('--foo', action=actions.StoreIntAction) for i in [0, 1, 42, -64]: - args = parser.parse_args(['--foo', six.text_type(i)]) + args = parser.parse_args(['--foo', str(i)]) self.assertEqual(args.foo, i) def test_invalid_int(self): @@ -195,7 +193,7 @@ def test_invalid_int(self): for i in [0.0, True, 'bar']: with utils.quiet_output(), self.assertRaises(SystemExit): - parser.parse_args(['--foo', six.text_type(i)]) + parser.parse_args(['--foo', str(i)]) class TestStoreTrueAction(unittest.TestCase): @@ -301,7 +299,7 @@ def test_with_optional_true_arg(self): parser.add_argument('--foo', action=actions.ToggleTrueAction) for value in BoolType.TRUE_VALUES: - args = parser.parse_args(['--foo', six.text_type(value)]) + args = parser.parse_args(['--foo', str(value)]) self.assertTrue(args.foo) args = parser.parse_args(['--foo={}'.format(value)]) @@ -312,7 +310,7 @@ def test_with_optional_false_arg(self): parser.add_argument('--foo', action=actions.ToggleTrueAction) for value in BoolType.FALSE_VALUES: - args = parser.parse_args(['--foo', six.text_type(value)]) + args = parser.parse_args(['--foo', str(value)]) self.assertFalse(args.foo) args = parser.parse_args(['--foo={}'.format(value)]) @@ -363,7 +361,7 @@ def test_with_optional_true_arg(self): parser.add_argument('--foo', action=actions.ToggleFalseAction) for value in BoolType.TRUE_VALUES: - args = parser.parse_args(['--foo', six.text_type(value)]) + args = parser.parse_args(['--foo', str(value)]) self.assertFalse(args.foo) args = parser.parse_args(['--foo={}'.format(value)]) @@ -374,7 +372,7 @@ def test_with_optional_false_arg(self): parser.add_argument('--foo', action=actions.ToggleFalseAction) for value in BoolType.FALSE_VALUES: - args = parser.parse_args(['--foo', six.text_type(value)]) + args = parser.parse_args(['--foo', str(value)]) self.assertTrue(args.foo) args = parser.parse_args(['--foo={}'.format(value)]) diff --git a/utils/build_swift/tests/build_swift/test_driver_arguments.py b/utils/build_swift/tests/build_swift/test_driver_arguments.py index 370e9f4538be4..85a6eb2a71c1c 100644 --- a/utils/build_swift/tests/build_swift/test_driver_arguments.py +++ b/utils/build_swift/tests/build_swift/test_driver_arguments.py @@ -20,8 +20,6 @@ from build_swift import migration from build_swift.presets import PresetParser -import six - from .test_presets import PRESET_DEFAULTS from .. import expected_options as eo from .. import utils @@ -80,9 +78,6 @@ def __new__(cls, name, bases, attrs): test_name = 'test_preset_{}'.format(name) attrs[test_name] = cls.generate_preset_test(name, args) - if six.PY2: - name = str(name) - return super(TestDriverArgumentParserMeta, cls).__new__( cls, name, bases, attrs) @@ -92,8 +87,8 @@ def test(self): parsed_values = self.parse_default_args([]) parsed_value = getattr(parsed_values, dest) - if default_value.__class__ in six.string_types: - parsed_value = six.text_type(parsed_value) + if default_value.__class__ in (str,): + parsed_value = str(parsed_value) self.assertEqual(default_value, parsed_value, 'Invalid default value for "{}": {} != {}' @@ -215,7 +210,7 @@ def _generate_choices_option_test(cls, option): def test(self): for choice in option.choices: namespace = self.parse_args( - [option.option_string, six.text_type(choice)]) + [option.option_string, str(choice)]) self.assertEqual(getattr(namespace, option.dest), choice) with self.assertRaises(ParserError): @@ -228,13 +223,13 @@ def _generate_int_option_test(cls, option): def test(self): for i in [0, 1, 42]: namespace = self.parse_args( - [option.option_string, six.text_type(i)]) + [option.option_string, str(i)]) self.assertEqual(int(getattr(namespace, option.dest)), i) # FIXME: int-type options should not accept non-int strings - # self.parse_args([option.option_string, six.text_type(0.0)]) - # self.parse_args([option.option_string, six.text_type(1.0)]) - # self.parse_args([option.option_string, six.text_type(3.14)]) + # self.parse_args([option.option_string, str(0.0)]) + # self.parse_args([option.option_string, str(1.0)]) + # self.parse_args([option.option_string, str(3.14)]) # self.parse_args([option.option_string, 'NaN']) return test @@ -335,15 +330,15 @@ def test(self): return test -@six.add_metaclass(TestDriverArgumentParserMeta) -class TestDriverArgumentParser(unittest.TestCase): +class TestDriverArgumentParser( + unittest.TestCase, metaclass=TestDriverArgumentParserMeta): def _parse_args(self, args): try: return migration.parse_args(self.parser, args) except (SystemExit, ValueError) as e: raise ParserError('failed to parse arguments: {} {}'.format( - six.text_type(args), e)) + str(args), e)) def _check_impl_args(self, namespace): assert hasattr(namespace, 'build_script_impl_args') @@ -354,7 +349,7 @@ def _check_impl_args(self, namespace): namespace.build_script_impl_args) except (SystemExit, ValueError) as e: raise ParserError('failed to parse impl arguments: {} {}'.format( - six.text_type(namespace.build_script_impl_args), e)) + str(namespace.build_script_impl_args), e)) def parse_args_and_unknown_args(self, args, namespace=None): if namespace is None: @@ -370,7 +365,7 @@ def parse_args_and_unknown_args(self, args, namespace=None): namespace, unknown_args)) except (SystemExit, argparse.ArgumentError) as e: raise ParserError('failed to parse arguments: {} {}'.format( - six.text_type(args), e)) + str(args), e)) return namespace, unknown_args @@ -380,7 +375,7 @@ def parse_args(self, args, namespace=None): if unknown_args: raise ParserError('unknown arguments: {}'.format( - six.text_type(unknown_args))) + str(unknown_args))) return namespace diff --git a/utils/build_swift/tests/build_swift/test_migration.py b/utils/build_swift/tests/build_swift/test_migration.py index 7caf09920a6d0..bca9b2acb177e 100644 --- a/utils/build_swift/tests/build_swift/test_migration.py +++ b/utils/build_swift/tests/build_swift/test_migration.py @@ -16,8 +16,6 @@ from build_swift import migration from build_swift.constants import BUILD_SCRIPT_IMPL_PATH -import six - from swift_build_support.swift_build_support.targets import StdlibDeploymentTarget @@ -66,8 +64,7 @@ def test(self): return test -@six.add_metaclass(TestMigrateSwiftSDKsMeta) -class TestMigrateSwiftSDKs(unittest.TestCase): +class TestMigrateSwiftSDKs(unittest.TestCase, metaclass=TestMigrateSwiftSDKsMeta): def test_empty_swift_sdks(self): args = migration.migrate_swift_sdks(['--swift-sdks=']) diff --git a/utils/build_swift/tests/build_swift/test_presets.py b/utils/build_swift/tests/build_swift/test_presets.py index aef033f28a4af..f142cbf35574d 100644 --- a/utils/build_swift/tests/build_swift/test_presets.py +++ b/utils/build_swift/tests/build_swift/test_presets.py @@ -9,6 +9,7 @@ from __future__ import unicode_literals +import configparser import os import unittest @@ -16,9 +17,6 @@ from build_swift import presets from build_swift.presets import Preset, PresetParser -import six -from six.moves import configparser - from .. import utils @@ -158,8 +156,7 @@ def test(self): return test -@six.add_metaclass(TestPresetParserMeta) -class TestPresetParser(unittest.TestCase): +class TestPresetParser(unittest.TestCase, metaclass=TestPresetParserMeta): def test_read_files(self): parser = PresetParser() diff --git a/utils/build_swift/tests/build_swift/test_shell.py b/utils/build_swift/tests/build_swift/test_shell.py index 625082f5b1106..25e5da2e877b4 100644 --- a/utils/build_swift/tests/build_swift/test_shell.py +++ b/utils/build_swift/tests/build_swift/test_shell.py @@ -9,15 +9,14 @@ from __future__ import absolute_import, unicode_literals +import builtins import collections import sys import unittest +from io import StringIO from build_swift import shell -import six -from six import StringIO - from .. import utils @@ -48,7 +47,7 @@ def patch(*args, **kwargs): # ----------------------------------------------------------------------------- # Constants -_OPEN_NAME = '{}.open'.format(six.moves.builtins.__name__) +_OPEN_NAME = '{}.open'.format(builtins.__name__) # ----------------------------------------------------------------------------- @@ -90,7 +89,7 @@ def test_convert_pathlib_path(self): self.assertEqual( shell._convert_pathlib_path(path), - six.text_type(path)) + str(path)) # ------------------------------------------------------------------------- # _get_stream_file @@ -192,59 +191,6 @@ class TestDecorators(unittest.TestCase): """Unit tests for the decorators defined in the build_swift.shell module used to backport or add functionality to the subprocess wrappers. """ - - # ------------------------------------------------------------------------- - # _backport_devnull - - @utils.requires_module('unittest.mock') - @patch(_OPEN_NAME, new_callable=mock_open) - @patch('build_swift.shell._PY_VERSION', (3, 2)) - def test_backport_devnull_stdout_kwarg(self, mock_open): - mock_file = MagicMock() - mock_open.return_value.__enter__.return_value = mock_file - - @shell._backport_devnull - def func(command, **kwargs): - self.assertEqual(kwargs['stdout'], mock_file) - - func('', stdout=shell.DEVNULL) - assert(mock_open.return_value.__enter__.called) - assert(mock_open.return_value.__exit__.called) - - @utils.requires_module('unittest.mock') - @patch(_OPEN_NAME, new_callable=mock_open) - @patch('build_swift.shell._PY_VERSION', (3, 2)) - def test_backport_devnull_stderr_kwarg(self, mock_open): - mock_file = MagicMock() - mock_open.return_value.__enter__.return_value = mock_file - - @shell._backport_devnull - def func(command, **kwargs): - self.assertEqual(kwargs['stderr'], mock_file) - - func('', stderr=shell.DEVNULL) - assert(mock_open.return_value.__enter__.called) - assert(mock_open.return_value.__exit__.called) - - @utils.requires_module('unittest.mock') - @patch(_OPEN_NAME, new_callable=mock_open) - def test_backport_devnull_does_not_open(self, mock_open): - @shell._backport_devnull - def func(command): - pass - - func('') - mock_open.return_value.__enter__.assert_not_called() - mock_open.return_value.__exit__.assert_not_called() - - @utils.requires_module('unittest.mock') - @patch('build_swift.shell._PY_VERSION', (3, 3)) - def test_backport_devnull_noop_starting_with_python_3_3(self): - def func(): - pass - - self.assertEqual(shell._backport_devnull(func), func) - # ------------------------------------------------------------------------- # _normalize_command @@ -368,20 +314,14 @@ def test_check_call(self, mock_check_call): @patch('subprocess.check_output') def test_check_output(self, mock_check_output): # Before Python 3 the subprocess.check_output function returned bytes. - if six.PY3: - mock_check_output.return_value = '' - else: - mock_check_output.return_value = b'' + mock_check_output.return_value = '' output = shell.check_output('ls') # We always expect str (utf-8) output - self.assertIsInstance(output, six.text_type) + self.assertIsInstance(output, str) - if six.PY3: - mock_check_output.assert_called_with('ls', encoding='utf-8') - else: - mock_check_output.assert_called_with('ls') + mock_check_output.assert_called_with('ls', encoding='utf-8') class TestShellUtilities(unittest.TestCase): @@ -468,7 +408,7 @@ def test_copy_echos_fake_cp_directory_command(self, mock_stdout): @patch('build_swift.shell._convert_pathlib_path') def test_pushd_converts_pathlib_path(self, mock_convert): path = Path('/other/path') - mock_convert.return_value = six.text_type(path) + mock_convert.return_value = str(path) shell.pushd(path) diff --git a/utils/build_swift/tests/build_swift/test_versions.py b/utils/build_swift/tests/build_swift/test_versions.py index 19447d63f679f..51e3c5076b53a 100644 --- a/utils/build_swift/tests/build_swift/test_versions.py +++ b/utils/build_swift/tests/build_swift/test_versions.py @@ -13,8 +13,6 @@ from build_swift.versions import Version -import six - class TestVersion(unittest.TestCase): """Unit tests for the Version class. @@ -59,7 +57,7 @@ def assertVersionLess(self, v1, v2): # ------------------------------------------------------------------------- def test_parse(self): - for string, components in six.iteritems(self.VERSION_COMPONENTS): + for string, components in self.VERSION_COMPONENTS.items(): # Version parses version = Version(string) @@ -90,7 +88,7 @@ def test_less_than(self): self.assertVersionLess('a0b', 'a1') def test_str(self): - for string in six.iterkeys(self.VERSION_COMPONENTS): + for string in self.VERSION_COMPONENTS.keys(): version = Version(string) - self.assertEqual(six.text_type(version), string) + self.assertEqual(str(version), string) diff --git a/utils/build_swift/tests/utils.py b/utils/build_swift/tests/utils.py index b077c9a255e37..b4001ad728662 100644 --- a/utils/build_swift/tests/utils.py +++ b/utils/build_swift/tests/utils.py @@ -14,14 +14,11 @@ import platform import sys import unittest +from io import StringIO from build_swift import cache_utils from build_swift.versions import Version -import six -from six import StringIO - - __all__ = [ 'quiet_output', 'redirect_stderr', @@ -161,7 +158,7 @@ def requires_python(version): greater or equal to the required version. """ - if isinstance(version, six.string_types): + if isinstance(version, str): version = Version(version) if _PYTHON_VERSION >= version: diff --git a/utils/line-directive b/utils/line-directive index 8678f353eae49..13e8988969e54 100755 --- a/utils/line-directive +++ b/utils/line-directive @@ -62,10 +62,7 @@ line_pattern = re.compile( def _make_line_map(target_filename, stream=None): """ - >>> try: - ... from StringIO import StringIO # py2 - ... except ImportError: - ... from io import StringIO # py3 + >>> from io import StringIO >>> _make_line_map('box', ... StringIO('''// ###sourceLocation(file: "foo.bar", line: 3) ... line 2 diff --git a/utils/round-trip-syntax-test b/utils/round-trip-syntax-test index ddcccd389dc3d..808cceab22df9 100755 --- a/utils/round-trip-syntax-test +++ b/utils/round-trip-syntax-test @@ -14,21 +14,12 @@ from functools import reduce logging.basicConfig(format='%(message)s', level=logging.INFO) -# Python 2 `unicode` was renamed `str` in Python 3. To consistently support -# both, define `unicode` to be `str` when using Python 3. Once we can drop -# Python 2 support, delete this and change all uses of `unicode` to `str`. -# (Currently, this is only here to avoid a python_lint failure from unicode -# references below.) -if sys.version_info[0] >= 3: - unicode = str - - class RoundTripTask(object): def __init__(self, input_filename, action, swift_syntax_test, skip_bad_syntax): assert action == '-round-trip-parse' or action == '-round-trip-lex' if sys.version_info[0] < 3: - assert type(input_filename) == unicode + assert type(input_filename) == str assert type(swift_syntax_test) == str assert os.path.isfile(input_filename), \ diff --git a/utils/swift_build_support/swift_build_support/build_script_invocation.py b/utils/swift_build_support/swift_build_support/build_script_invocation.py index 3ae3002f5d906..5343318ad80fa 100644 --- a/utils/swift_build_support/swift_build_support/build_script_invocation.py +++ b/utils/swift_build_support/swift_build_support/build_script_invocation.py @@ -20,8 +20,6 @@ from build_swift.build_swift.constants import SWIFT_REPO_NAME from build_swift.build_swift.constants import SWIFT_SOURCE_ROOT -import six - from swift_build_support.swift_build_support import products from swift_build_support.swift_build_support import shell from swift_build_support.swift_build_support import targets @@ -495,7 +493,7 @@ def compute_host_specific_variables(self): try: config = HostSpecificConfiguration(host_target, args) except argparse.ArgumentError as e: - exit_rejecting_arguments(six.text_type(e)) + exit_rejecting_arguments(str(e)) # Convert into `build-script-impl` style variables. options[host_target] = { @@ -696,7 +694,7 @@ def _execute_impl(self, pipeline, all_hosts, should_run_epilogue_operations): try: config = HostSpecificConfiguration(host_target.name, self.args) except argparse.ArgumentError as e: - exit_rejecting_arguments(six.text_type(e)) + exit_rejecting_arguments(str(e)) print("Building the standard library for: {}".format( " ".join(config.swift_stdlib_build_targets))) if config.swift_test_run_targets and ( diff --git a/utils/swift_build_support/swift_build_support/cmake.py b/utils/swift_build_support/swift_build_support/cmake.py index 8e2aeaba64834..5da052e40764c 100644 --- a/utils/swift_build_support/swift_build_support/cmake.py +++ b/utils/swift_build_support/swift_build_support/cmake.py @@ -22,8 +22,6 @@ import re from numbers import Number -import six - from . import shell @@ -46,7 +44,7 @@ def define(self, var, value): value = self.true_false(value) if value is None: value = "" - elif not isinstance(value, six.string_types + (Number,)): + elif not isinstance(value, (str, Number)): raise ValueError('define: invalid value for key %s: %s (%s)' % (var, value, type(value))) self._options.append('-D%s=%s' % (var, value)) @@ -202,7 +200,7 @@ def build_args(self): elif args.cmake_generator == 'Xcode': build_args += ['-parallelizeTargets', - '-jobs', six.text_type(jobs)] + '-jobs', str(jobs)] return build_args diff --git a/utils/swift_build_support/tests/products/test_cmark.py b/utils/swift_build_support/tests/products/test_cmark.py index 77b25bfa56158..5c1e83c6d1975 100644 --- a/utils/swift_build_support/tests/products/test_cmark.py +++ b/utils/swift_build_support/tests/products/test_cmark.py @@ -15,12 +15,7 @@ import sys import tempfile import unittest -try: - # py2 - from StringIO import StringIO -except ImportError: - # py3 - from io import StringIO +from io import StringIO # from swift_build_support import cmake from swift_build_support import shell diff --git a/utils/swift_build_support/tests/products/test_earlyswiftdriver.py b/utils/swift_build_support/tests/products/test_earlyswiftdriver.py index d572b25b59066..c6fc4e1a0f6bd 100644 --- a/utils/swift_build_support/tests/products/test_earlyswiftdriver.py +++ b/utils/swift_build_support/tests/products/test_earlyswiftdriver.py @@ -15,12 +15,7 @@ import sys import tempfile import unittest -try: - # py2 - from StringIO import StringIO -except ImportError: - # py3 - from io import StringIO +from io import StringIO from swift_build_support import shell from swift_build_support.products import EarlySwiftDriver diff --git a/utils/swift_build_support/tests/products/test_llvm.py b/utils/swift_build_support/tests/products/test_llvm.py index 058d35eb70e53..edcf8fad59120 100644 --- a/utils/swift_build_support/tests/products/test_llvm.py +++ b/utils/swift_build_support/tests/products/test_llvm.py @@ -15,12 +15,7 @@ import sys import tempfile import unittest -try: - # py2 - from StringIO import StringIO -except ImportError: - # py3 - from io import StringIO +from io import StringIO from swift_build_support import shell from swift_build_support.products import LLVM diff --git a/utils/swift_build_support/tests/products/test_ninja.py b/utils/swift_build_support/tests/products/test_ninja.py index 8938ade1d16c9..171f18822baac 100644 --- a/utils/swift_build_support/tests/products/test_ninja.py +++ b/utils/swift_build_support/tests/products/test_ninja.py @@ -16,12 +16,7 @@ import sys import tempfile import unittest -try: - # py2 - from StringIO import StringIO -except ImportError: - # py3 - from io import StringIO +from io import StringIO from build_swift.build_swift.wrappers import xcrun diff --git a/utils/swift_build_support/tests/products/test_swift.py b/utils/swift_build_support/tests/products/test_swift.py index f1d9e68739883..bcd413641dbc0 100644 --- a/utils/swift_build_support/tests/products/test_swift.py +++ b/utils/swift_build_support/tests/products/test_swift.py @@ -15,12 +15,7 @@ import sys import tempfile import unittest -try: - # py2 - from StringIO import StringIO -except ImportError: - # py3 - from io import StringIO +from io import StringIO from swift_build_support import shell from swift_build_support.products import Swift diff --git a/utils/swift_build_support/tests/test_shell.py b/utils/swift_build_support/tests/test_shell.py index 431affba2ec81..f6afd72b3a7c8 100644 --- a/utils/swift_build_support/tests/test_shell.py +++ b/utils/swift_build_support/tests/test_shell.py @@ -16,12 +16,7 @@ import sys import tempfile import unittest -try: - # py2 - from StringIO import StringIO -except ImportError: - # py3 - from io import StringIO +from io import StringIO from swift_build_support import shell From 51777d5ff773e442100f91fc78c5510ac80cc09d Mon Sep 17 00:00:00 2001 From: Alex Hoppen Date: Fri, 18 Mar 2022 10:34:32 +0100 Subject: [PATCH 20/88] [CodeCompletion] Move TypeCheckCompletionCallbacks to IDE --- include/swift/AST/ASTContext.h | 7 +- include/swift/IDE/ArgumentCompletion.h | 2 +- include/swift/IDE/DotExprCompletion.h | 2 +- include/swift/IDE/ExprCompletion.h | 2 +- include/swift/IDE/KeyPathCompletion.h | 2 +- .../swift/IDE/TypeCheckCompletionCallback.h | 81 +++++++++++++ .../swift/IDE/UnresolvedMemberCompletion.h | 2 +- .../swift/Sema/CodeCompletionTypeChecking.h | 60 ---------- include/swift/Sema/CompletionContextFinder.h | 2 +- include/swift/Sema/IDETypeChecking.h | 14 --- lib/IDE/CMakeLists.txt | 1 + lib/IDE/CodeCompletion.cpp | 2 +- lib/IDE/TypeCheckCompletionCallback.cpp | 110 ++++++++++++++++++ lib/Sema/TypeCheckCodeCompletion.cpp | 89 -------------- lib/Sema/TypeCheckConstraints.cpp | 4 +- 15 files changed, 206 insertions(+), 174 deletions(-) create mode 100644 include/swift/IDE/TypeCheckCompletionCallback.h delete mode 100644 include/swift/Sema/CodeCompletionTypeChecking.h create mode 100644 lib/IDE/TypeCheckCompletionCallback.cpp diff --git a/include/swift/AST/ASTContext.h b/include/swift/AST/ASTContext.h index b97cf8e2fd9d1..c7aa5d317400f 100644 --- a/include/swift/AST/ASTContext.h +++ b/include/swift/AST/ASTContext.h @@ -130,7 +130,6 @@ namespace swift { class IndexSubset; struct SILAutoDiffDerivativeFunctionKey; struct InterfaceSubContextDelegate; - class TypeCheckCompletionCallback; enum class KnownProtocolKind : uint8_t; @@ -146,6 +145,10 @@ namespace syntax { class SyntaxArena; } +namespace ide { + class TypeCheckCompletionCallback; +} + /// Lists the set of "known" Foundation entities that are used in the /// compiler. /// @@ -281,7 +284,7 @@ class ASTContext final { /// the cancellation might return with any result. std::shared_ptr> CancellationFlag = nullptr; - TypeCheckCompletionCallback *CompletionCallback = nullptr; + ide::TypeCheckCompletionCallback *CompletionCallback = nullptr; /// The request-evaluator that is used to process various requests. Evaluator evaluator; diff --git a/include/swift/IDE/ArgumentCompletion.h b/include/swift/IDE/ArgumentCompletion.h index c6b9d404c8b8e..6527f9b4f5b8d 100644 --- a/include/swift/IDE/ArgumentCompletion.h +++ b/include/swift/IDE/ArgumentCompletion.h @@ -16,7 +16,7 @@ #include "swift/IDE/CodeCompletionConsumer.h" #include "swift/IDE/CodeCompletionContext.h" #include "swift/IDE/PossibleParamInfo.h" -#include "swift/Sema/CodeCompletionTypeChecking.h" +#include "swift/IDE/TypeCheckCompletionCallback.h" namespace swift { namespace ide { diff --git a/include/swift/IDE/DotExprCompletion.h b/include/swift/IDE/DotExprCompletion.h index 597ff1f4acdb4..2bf1375fb866a 100644 --- a/include/swift/IDE/DotExprCompletion.h +++ b/include/swift/IDE/DotExprCompletion.h @@ -15,7 +15,7 @@ #include "swift/IDE/CodeCompletionConsumer.h" #include "swift/IDE/CodeCompletionContext.h" -#include "swift/Sema/CodeCompletionTypeChecking.h" +#include "swift/IDE/TypeCheckCompletionCallback.h" namespace swift { namespace ide { diff --git a/include/swift/IDE/ExprCompletion.h b/include/swift/IDE/ExprCompletion.h index 4b60b8d8e6e4b..f8475b2a37a90 100644 --- a/include/swift/IDE/ExprCompletion.h +++ b/include/swift/IDE/ExprCompletion.h @@ -15,7 +15,7 @@ #include "swift/IDE/CodeCompletionConsumer.h" #include "swift/IDE/CodeCompletionContext.h" -#include "swift/Sema/CodeCompletionTypeChecking.h" +#include "swift/IDE/TypeCheckCompletionCallback.h" namespace swift { namespace ide { diff --git a/include/swift/IDE/KeyPathCompletion.h b/include/swift/IDE/KeyPathCompletion.h index 972f39c6a669f..cdd70e0f15502 100644 --- a/include/swift/IDE/KeyPathCompletion.h +++ b/include/swift/IDE/KeyPathCompletion.h @@ -15,7 +15,7 @@ #include "swift/IDE/CodeCompletionConsumer.h" #include "swift/IDE/CodeCompletionContext.h" -#include "swift/Sema/CodeCompletionTypeChecking.h" +#include "swift/IDE/TypeCheckCompletionCallback.h" namespace swift { namespace ide { diff --git a/include/swift/IDE/TypeCheckCompletionCallback.h b/include/swift/IDE/TypeCheckCompletionCallback.h new file mode 100644 index 0000000000000..02455c1c0b941 --- /dev/null +++ b/include/swift/IDE/TypeCheckCompletionCallback.h @@ -0,0 +1,81 @@ +//===--- TypeCheckCompletionCallback.h -----------------------------------===// +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +/// \file +/// Provides TypeCheckCompletionCallback implementations for the various kinds +/// of code completion. These extract and persist information needed to compute +/// completion results from the solutions formed during expression typechecking. +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_IDE_TYPECHECKCOMPLETIONCALLBACK_H +#define SWIFT_IDE_TYPECHECKCOMPLETIONCALLBACK_H + +#include "swift/AST/Expr.h" +#include "swift/AST/Type.h" +#include "swift/Basic/LLVM.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" + +namespace swift { +class Decl; +class DeclContext; +class Type; +class ValueDecl; +class CodeCompletionExpr; + +namespace constraints { +class ConstraintSystem; +class Solution; +} // namespace constraints + +namespace ide { + +class TypeCheckCompletionCallback { + bool GotCallback = false; + +public: + virtual ~TypeCheckCompletionCallback() {} + + /// Called for each solution produced while type-checking an expression + /// that the code completion expression participates in. + virtual void sawSolution(const constraints::Solution &solution) { + GotCallback = true; + }; + + /// True if at least one solution was passed via the \c sawSolution + /// callback. + bool gotCallback() const { return GotCallback; } + + /// Typecheck the code completion expression in its outermost expression + /// context, calling \c sawSolution for each solution formed. + virtual void fallbackTypeCheck(DeclContext *DC); +}; + +// MARK: - Utility functions for subclasses of TypeCheckCompletionCallback + +Type getTypeForCompletion(const constraints::Solution &S, Expr *E); + +/// Whether the given completion expression is the only expression in its +/// containing closure or function body and its value is implicitly returned. +/// +/// If these conditions are met, code completion needs to avoid penalizing +/// completion results that don't match the expected return type when +/// computing type relations, as since no return statement was explicitly +/// written by the user, it's possible they intend the single expression not +/// as the return value but merely the first entry in a multi-statement body +/// they just haven't finished writing yet. +bool isImplicitSingleExpressionReturn(constraints::ConstraintSystem &CS, + Expr *CompletionExpr); + +} // namespace ide +} // namespace swift + +#endif // SWIFT_IDE_TYPECHECKCOMPLETIONCALLBACK_H diff --git a/include/swift/IDE/UnresolvedMemberCompletion.h b/include/swift/IDE/UnresolvedMemberCompletion.h index 3e303fb65c42d..b6d3ea007e7f8 100644 --- a/include/swift/IDE/UnresolvedMemberCompletion.h +++ b/include/swift/IDE/UnresolvedMemberCompletion.h @@ -15,7 +15,7 @@ #include "swift/IDE/CodeCompletionConsumer.h" #include "swift/IDE/CodeCompletionContext.h" -#include "swift/Sema/CodeCompletionTypeChecking.h" +#include "swift/IDE/TypeCheckCompletionCallback.h" namespace swift { namespace ide { diff --git a/include/swift/Sema/CodeCompletionTypeChecking.h b/include/swift/Sema/CodeCompletionTypeChecking.h deleted file mode 100644 index 57b9fb76bce2e..0000000000000 --- a/include/swift/Sema/CodeCompletionTypeChecking.h +++ /dev/null @@ -1,60 +0,0 @@ -//===--- CodeCompletionTypeChecking.h --------------------------*- C++ -*-===// -// -// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors -// Licensed under Apache License v2.0 with Runtime Library Exception -// -// See https://swift.org/LICENSE.txt for license information -// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors -// -//===----------------------------------------------------------------------===// -// -/// \file -/// Provides TypeCheckCompletionCallback implementations for the various kinds -/// of code completion. These extract and persist information needed to compute -/// completion results from the solutions formed during expression typechecking. -// -//===----------------------------------------------------------------------===// - -#ifndef SWIFT_SEMA_CODECOMPLETIONTYPECHECKING_H -#define SWIFT_SEMA_CODECOMPLETIONTYPECHECKING_H - -#include "swift/AST/Expr.h" -#include "swift/AST/Type.h" -#include "swift/Basic/LLVM.h" -#include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/SmallVector.h" - -namespace swift { - class Decl; - class DeclContext; - class Type; - class ValueDecl; - class CodeCompletionExpr; - - namespace constraints { - class Solution; - } - - class TypeCheckCompletionCallback { - bool GotCallback = false; - - public: - virtual ~TypeCheckCompletionCallback() {} - - /// Called for each solution produced while type-checking an expression - /// that the code completion expression participates in. - virtual void sawSolution(const constraints::Solution &solution) { - GotCallback = true; - }; - - /// True if at least one solution was passed via the \c sawSolution - /// callback. - bool gotCallback() const { return GotCallback; } - - /// Typecheck the code completion expression in its outermost expression - /// context, calling \c sawSolution for each solution formed. - virtual void fallbackTypeCheck(DeclContext *DC); - }; -} - -#endif diff --git a/include/swift/Sema/CompletionContextFinder.h b/include/swift/Sema/CompletionContextFinder.h index 803f14b37b185..de362bf214630 100644 --- a/include/swift/Sema/CompletionContextFinder.h +++ b/include/swift/Sema/CompletionContextFinder.h @@ -16,7 +16,7 @@ #include "swift/AST/ASTNode.h" #include "swift/AST/ASTWalker.h" #include "swift/AST/Expr.h" -#include "swift/Sema/CodeCompletionTypeChecking.h" +#include "swift/IDE/TypeCheckCompletionCallback.h" namespace swift { diff --git a/include/swift/Sema/IDETypeChecking.h b/include/swift/Sema/IDETypeChecking.h index 3e6bff07179c8..b9c3c44cd97d6 100644 --- a/include/swift/Sema/IDETypeChecking.h +++ b/include/swift/Sema/IDETypeChecking.h @@ -159,20 +159,6 @@ namespace swift { constraints::SolutionApplicationTarget &target, bool needsPrecheck, llvm::function_ref callback); - Type getTypeForCompletion(const constraints::Solution &S, Expr *E); - - /// Whether the given completion expression is the only expression in its - /// containing closure or function body and its value is implicitly returned. - /// - /// If these conditions are met, code completion needs to avoid penalizing - /// completion results that don't match the expected return type when - /// computing type relations, as since no return statement was explicitly - /// written by the user, it's possible they intend the single expression not - /// as the return value but merely the first entry in a multi-statement body - /// they just haven't finished writing yet. - bool isImplicitSingleExpressionReturn(constraints::ConstraintSystem &CS, - Expr *CompletionExpr); - LookupResult lookupSemanticMember(DeclContext *DC, Type ty, DeclName name); diff --git a/lib/IDE/CMakeLists.txt b/lib/IDE/CMakeLists.txt index 02301eca46810..dd6f43f1d9583 100644 --- a/lib/IDE/CMakeLists.txt +++ b/lib/IDE/CMakeLists.txt @@ -30,6 +30,7 @@ add_swift_host_library(swiftIDE STATIC REPLCodeCompletion.cpp SwiftSourceDocInfo.cpp SyntaxModel.cpp + TypeCheckCompletionCallback.cpp UnresolvedMemberCompletion.cpp Utils.cpp IDETypeChecking.cpp diff --git a/lib/IDE/CodeCompletion.cpp b/lib/IDE/CodeCompletion.cpp index 98afd4be11744..5505ee3208a8e 100644 --- a/lib/IDE/CodeCompletion.cpp +++ b/lib/IDE/CodeCompletion.cpp @@ -42,10 +42,10 @@ #include "swift/IDE/DotExprCompletion.h" #include "swift/IDE/ExprCompletion.h" #include "swift/IDE/KeyPathCompletion.h" +#include "swift/IDE/TypeCheckCompletionCallback.h" #include "swift/IDE/UnresolvedMemberCompletion.h" #include "swift/IDE/Utils.h" #include "swift/Parse/CodeCompletionCallbacks.h" -#include "swift/Sema/CodeCompletionTypeChecking.h" #include "swift/Sema/IDETypeChecking.h" #include "swift/Strings.h" #include "swift/Subsystems.h" diff --git a/lib/IDE/TypeCheckCompletionCallback.cpp b/lib/IDE/TypeCheckCompletionCallback.cpp new file mode 100644 index 0000000000000..2460d60c71980 --- /dev/null +++ b/lib/IDE/TypeCheckCompletionCallback.cpp @@ -0,0 +1,110 @@ +//===--- TypeCheckCompletionCallback.cpp ----------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include "swift/IDE/TypeCheckCompletionCallback.h" +#include "swift/Sema/CompletionContextFinder.h" +#include "swift/Sema/ConstraintSystem.h" +#include "swift/Sema/IDETypeChecking.h" + +using namespace swift; +using namespace swift::ide; +using namespace swift::constraints; + +void TypeCheckCompletionCallback::fallbackTypeCheck(DeclContext *DC) { + assert(!GotCallback); + + CompletionContextFinder finder(DC); + if (!finder.hasCompletionExpr()) + return; + + auto fallback = finder.getFallbackCompletionExpr(); + if (!fallback) + return; + + SolutionApplicationTarget completionTarget(fallback->E, fallback->DC, + CTP_Unused, Type(), + /*isDiscared=*/true); + typeCheckForCodeCompletion(completionTarget, /*needsPrecheck=*/true, + [&](const Solution &S) { sawSolution(S); }); +} + +// MARK: - Utility functions for subclasses of TypeCheckCompletionCallback + +Type swift::ide::getTypeForCompletion(const constraints::Solution &S, Expr *E) { + if (!S.hasType(E)) { + assert(false && "Expression wasn't type checked?"); + return nullptr; + } + + auto &CS = S.getConstraintSystem(); + + // To aid code completion, we need to attempt to convert type placeholders + // back into underlying generic parameters if possible, since type + // of the code completion expression is used as "expected" (or contextual) + // type so it's helpful to know what requirements it has to filter + // the list of possible member candidates e.g. + // + // \code + // func test(_: [T]) {} + // + // test(42.#^MEMBERS^#) + // \code + // + // It's impossible to resolve `T` in this case but code completion + // expression should still have a type of `[T]` instead of `[<>]` + // because it helps to produce correct contextual member list based on + // a conformance requirement associated with generic parameter `T`. + if (isa(E)) { + auto completionTy = S.getType(E).transform([&](Type type) -> Type { + if (auto *typeVar = type->getAs()) + return S.getFixedType(typeVar); + return type; + }); + + return S.simplifyType(completionTy.transform([&](Type type) { + if (auto *placeholder = type->getAs()) { + if (auto *typeVar = + placeholder->getOriginator().dyn_cast()) { + if (auto *GP = typeVar->getImpl().getGenericParameter()) { + // Code completion depends on generic parameter type being + // represented in terms of `ArchetypeType` since it's easy + // to extract protocol requirements from it. + if (auto *GPD = GP->getDecl()) + return GPD->getInnermostDeclContext()->mapTypeIntoContext(GP); + } + } + + return Type(CS.getASTContext().TheUnresolvedType); + } + + return type; + })); + } + + return S.getResolvedType(E); +} + +bool swift::ide::isImplicitSingleExpressionReturn(ConstraintSystem &CS, + Expr *CompletionExpr) { + Expr *ParentExpr = CS.getParentExpr(CompletionExpr); + if (!ParentExpr) + return CS.getContextualTypePurpose(CompletionExpr) == CTP_ReturnSingleExpr; + + if (auto *ParentCE = dyn_cast(ParentExpr)) { + if (ParentCE->hasSingleExpressionBody() && + ParentCE->getSingleExpressionBody() == CompletionExpr) { + ASTNode Last = ParentCE->getBody()->getLastElement(); + return !Last.isStmt(StmtKind::Return) || Last.isImplicit(); + } + } + return false; +} diff --git a/lib/Sema/TypeCheckCodeCompletion.cpp b/lib/Sema/TypeCheckCodeCompletion.cpp index 3b11b12885968..244e8167d1a62 100644 --- a/lib/Sema/TypeCheckCodeCompletion.cpp +++ b/lib/Sema/TypeCheckCodeCompletion.cpp @@ -41,7 +41,6 @@ #include "swift/Basic/STLExtras.h" #include "swift/Parse/Lexer.h" #include "swift/Sema/IDETypeChecking.h" -#include "swift/Sema/CodeCompletionTypeChecking.h" #include "swift/Sema/ConstraintSystem.h" #include "swift/Sema/CompletionContextFinder.h" #include "swift/Strings.h" @@ -789,91 +788,3 @@ swift::lookupSemanticMember(DeclContext *DC, Type ty, DeclName name) { return TypeChecker::lookupMember(DC, ty, DeclNameRef(name), None); } -Type swift::getTypeForCompletion(const constraints::Solution &S, Expr *E) { - if (!S.hasType(E)) { - assert(false && "Expression wasn't type checked?"); - return nullptr; - } - - auto &CS = S.getConstraintSystem(); - - // To aid code completion, we need to attempt to convert type placeholders - // back into underlying generic parameters if possible, since type - // of the code completion expression is used as "expected" (or contextual) - // type so it's helpful to know what requirements it has to filter - // the list of possible member candidates e.g. - // - // \code - // func test(_: [T]) {} - // - // test(42.#^MEMBERS^#) - // \code - // - // It's impossible to resolve `T` in this case but code completion - // expression should still have a type of `[T]` instead of `[<>]` - // because it helps to produce correct contextual member list based on - // a conformance requirement associated with generic parameter `T`. - if (isa(E)) { - auto completionTy = S.getType(E).transform([&](Type type) -> Type { - if (auto *typeVar = type->getAs()) - return S.getFixedType(typeVar); - return type; - }); - - return S.simplifyType(completionTy.transform([&](Type type) { - if (auto *placeholder = type->getAs()) { - if (auto *typeVar = - placeholder->getOriginator().dyn_cast()) { - if (auto *GP = typeVar->getImpl().getGenericParameter()) { - // Code completion depends on generic parameter type being - // represented in terms of `ArchetypeType` since it's easy - // to extract protocol requirements from it. - if (auto *GPD = GP->getDecl()) - return GPD->getInnermostDeclContext()->mapTypeIntoContext(GP); - } - } - - return Type(CS.getASTContext().TheUnresolvedType); - } - - return type; - })); - } - - return S.getResolvedType(E); -} - -bool swift::isImplicitSingleExpressionReturn(ConstraintSystem &CS, - Expr *CompletionExpr) { - Expr *ParentExpr = CS.getParentExpr(CompletionExpr); - if (!ParentExpr) - return CS.getContextualTypePurpose(CompletionExpr) == CTP_ReturnSingleExpr; - - if (auto *ParentCE = dyn_cast(ParentExpr)) { - if (ParentCE->hasSingleExpressionBody() && - ParentCE->getSingleExpressionBody() == CompletionExpr) { - ASTNode Last = ParentCE->getBody()->getLastElement(); - return !Last.isStmt(StmtKind::Return) || Last.isImplicit(); - } - } - return false; -} - -void TypeCheckCompletionCallback::fallbackTypeCheck(DeclContext *DC) { - assert(!GotCallback); - - CompletionContextFinder finder(DC); - if (!finder.hasCompletionExpr()) - return; - - auto fallback = finder.getFallbackCompletionExpr(); - if (!fallback) - return; - - SolutionApplicationTarget completionTarget(fallback->E, fallback->DC, - CTP_Unused, Type(), - /*isDiscared=*/true); - TypeChecker::typeCheckForCodeCompletion( - completionTarget, /*needsPrecheck*/ true, - [&](const Solution &S) { sawSolution(S); }); -} diff --git a/lib/Sema/TypeCheckConstraints.cpp b/lib/Sema/TypeCheckConstraints.cpp index ce3a407fb5d1e..ab47a810f5dc1 100644 --- a/lib/Sema/TypeCheckConstraints.cpp +++ b/lib/Sema/TypeCheckConstraints.cpp @@ -17,8 +17,8 @@ //===----------------------------------------------------------------------===// #include "MiscDiagnostics.h" -#include "TypeChecker.h" #include "TypeCheckAvailability.h" +#include "TypeChecker.h" #include "swift/AST/ASTVisitor.h" #include "swift/AST/ASTWalker.h" #include "swift/AST/DiagnosticSuppression.h" @@ -28,7 +28,7 @@ #include "swift/AST/SubstitutionMap.h" #include "swift/AST/TypeCheckRequests.h" #include "swift/Basic/Statistic.h" -#include "swift/Sema/CodeCompletionTypeChecking.h" +#include "swift/IDE/TypeCheckCompletionCallback.h" #include "swift/Sema/ConstraintSystem.h" #include "swift/Sema/SolutionResult.h" #include "llvm/ADT/DenseMap.h" From 17eb6ea1a296be54a28e78057fa3481c9483dd12 Mon Sep 17 00:00:00 2001 From: Alex Hoppen Date: Fri, 18 Mar 2022 10:40:02 +0100 Subject: [PATCH 21/88] [CodeCompletion] Unify logic to retrieve completion expr type for all completion callbacks --- lib/IDE/ArgumentCompletion.cpp | 3 --- lib/IDE/DotExprCompletion.cpp | 2 +- lib/IDE/ExprCompletion.cpp | 13 +------------ lib/IDE/TypeCheckCompletionCallback.cpp | 14 ++++++++++++-- lib/IDE/UnresolvedMemberCompletion.cpp | 2 +- test/IDE/complete_assignment.swift | 2 +- 6 files changed, 16 insertions(+), 20 deletions(-) diff --git a/lib/IDE/ArgumentCompletion.cpp b/lib/IDE/ArgumentCompletion.cpp index acc2cfdb3d6da..9887e8e05453b 100644 --- a/lib/IDE/ArgumentCompletion.cpp +++ b/lib/IDE/ArgumentCompletion.cpp @@ -94,9 +94,6 @@ void ArgumentTypeCheckCompletionCallback::sawSolution(const Solution &S) { TypeCheckCompletionCallback::sawSolution(S); Type ExpectedTy = getTypeForCompletion(S, CompletionExpr); - if (!ExpectedTy) { - return; - } auto &CS = S.getConstraintSystem(); diff --git a/lib/IDE/DotExprCompletion.cpp b/lib/IDE/DotExprCompletion.cpp index f64f00edcc7d0..7d27071b82121 100644 --- a/lib/IDE/DotExprCompletion.cpp +++ b/lib/IDE/DotExprCompletion.cpp @@ -55,7 +55,7 @@ void DotExprTypeCheckCompletionCallback::sawSolution( // If base type couldn't be determined (e.g. because base expression // is an invalid reference), let's not attempt to do a lookup since // it wouldn't produce any useful results anyway. - if (!BaseTy || BaseTy->getRValueType()->is()) + if (!BaseTy) return; auto *Locator = CS.getConstraintLocator(SemanticExpr); diff --git a/lib/IDE/ExprCompletion.cpp b/lib/IDE/ExprCompletion.cpp index c97918a6fbe82..154f6268970fe 100644 --- a/lib/IDE/ExprCompletion.cpp +++ b/lib/IDE/ExprCompletion.cpp @@ -25,18 +25,7 @@ void ExprTypeCheckCompletionCallback::sawSolution( auto &CS = S.getConstraintSystem(); - // Prefer to get the expected type as the completion expression's contextual - // type. If that fails (because there is no explicit contextual type spelled - // out in the source), the code completion expression will have been - // type-checked to its expected contextual type. - Type ExpectedTy = - CS.getContextualType(CompletionExpr, /*forConstraint=*/false); - if (!ExpectedTy) { - ExpectedTy = S.getResolvedType(CompletionExpr); - } - if (ExpectedTy->hasUnresolvedType()) { - ExpectedTy = Type(); - } + Type ExpectedTy = getTypeForCompletion(S, CompletionExpr); bool ImplicitReturn = isImplicitSingleExpressionReturn(CS, CompletionExpr); diff --git a/lib/IDE/TypeCheckCompletionCallback.cpp b/lib/IDE/TypeCheckCompletionCallback.cpp index 2460d60c71980..c4d754291296e 100644 --- a/lib/IDE/TypeCheckCompletionCallback.cpp +++ b/lib/IDE/TypeCheckCompletionCallback.cpp @@ -47,6 +47,8 @@ Type swift::ide::getTypeForCompletion(const constraints::Solution &S, Expr *E) { auto &CS = S.getConstraintSystem(); + Type Result; + // To aid code completion, we need to attempt to convert type placeholders // back into underlying generic parameters if possible, since type // of the code completion expression is used as "expected" (or contextual) @@ -70,7 +72,7 @@ Type swift::ide::getTypeForCompletion(const constraints::Solution &S, Expr *E) { return type; }); - return S.simplifyType(completionTy.transform([&](Type type) { + Result = S.simplifyType(completionTy.transform([&](Type type) { if (auto *placeholder = type->getAs()) { if (auto *typeVar = placeholder->getOriginator().dyn_cast()) { @@ -88,9 +90,17 @@ Type swift::ide::getTypeForCompletion(const constraints::Solution &S, Expr *E) { return type; })); + } else { + Result = S.getResolvedType(E); } - return S.getResolvedType(E); + if (!Result || Result->is()) { + Result = CS.getContextualType(E, /*forConstraint=*/false); + } + if (Result && Result->is()) { + Result = Type(); + } + return Result; } bool swift::ide::isImplicitSingleExpressionReturn(ConstraintSystem &CS, diff --git a/lib/IDE/UnresolvedMemberCompletion.cpp b/lib/IDE/UnresolvedMemberCompletion.cpp index b90bf8b8dd804..6f1c7b6ca1cfe 100644 --- a/lib/IDE/UnresolvedMemberCompletion.cpp +++ b/lib/IDE/UnresolvedMemberCompletion.cpp @@ -89,7 +89,7 @@ void UnresolvedMemberTypeCheckCompletionCallback::sawSolution( // If the type couldn't be determined (e.g. because there isn't any context // to derive it from), let's not attempt to do a lookup since it wouldn't // produce any useful results anyway. - if (ExpectedTy && !ExpectedTy->is()) { + if (ExpectedTy) { // If ExpectedTy is a duplicate of any other result, ignore this solution. if (!llvm::any_of(ExprResults, [&](const ExprResult &R) { return R.ExpectedTy->isEqual(ExpectedTy); diff --git a/test/IDE/complete_assignment.swift b/test/IDE/complete_assignment.swift index b36b537acab86..5eed19944bc99 100644 --- a/test/IDE/complete_assignment.swift +++ b/test/IDE/complete_assignment.swift @@ -202,7 +202,7 @@ func f2() { // ASSIGN_11-DAG: Decl[InstanceMethod]/CurrNominal: IntOpGen()[#Int?#] // ASSIGN_11-DAG: Decl[InstanceMethod]/CurrNominal: D1Gen()[#D1#] // ASSIGN_11-DAG: Decl[InstanceMethod]/CurrNominal: D2Gen()[#D2#] -// ASSIGN_11-DAG: Decl[InstanceMethod]/CurrNominal/TypeRelation[Invalid]: VoidGen()[#Void#] +// ASSIGN_11-DAG: Decl[InstanceMethod]/CurrNominal: VoidGen()[#Void#] // ASSIGN_11-DAG: Decl[InstanceVar]/CurrNominal: InternalC2[#C2#] func f12() { From 03d819f442f05b58f6a2b1816b2e280292db5329 Mon Sep 17 00:00:00 2001 From: Alex Hoppen Date: Sat, 19 Mar 2022 08:46:20 +0100 Subject: [PATCH 22/88] [CodeCompletion] Check whether surrounding context supports async in all solver-based completion kinds --- include/swift/IDE/ArgumentCompletion.h | 10 +- include/swift/IDE/CompletionLookup.h | 3 + include/swift/IDE/DotExprCompletion.h | 11 ++- .../swift/IDE/TypeCheckCompletionCallback.h | 3 + .../swift/IDE/UnresolvedMemberCompletion.h | 16 +++- lib/IDE/ArgumentCompletion.cpp | 7 +- lib/IDE/CodeCompletion.cpp | 9 +- lib/IDE/CompletionLookup.cpp | 92 +++++++++---------- lib/IDE/DotExprCompletion.cpp | 19 ++-- lib/IDE/ExprCompletion.cpp | 22 +---- lib/IDE/TypeCheckCompletionCallback.cpp | 30 ++++++ lib/IDE/UnresolvedMemberCompletion.cpp | 28 ++++-- test/IDE/complete_global_actorisolation.swift | 12 ++- 13 files changed, 167 insertions(+), 95 deletions(-) diff --git a/include/swift/IDE/ArgumentCompletion.h b/include/swift/IDE/ArgumentCompletion.h index 6527f9b4f5b8d..ba3f365d147f6 100644 --- a/include/swift/IDE/ArgumentCompletion.h +++ b/include/swift/IDE/ArgumentCompletion.h @@ -45,9 +45,14 @@ class ArgumentTypeCheckCompletionCallback : public TypeCheckCompletionCallback { Type BaseType; /// True if an argument label precedes the completion location. bool HasLabel; + /// Whether the surrounding context is async and thus calling async + /// functions is supported. + bool IsInAsyncContext; }; CodeCompletionExpr *CompletionExpr; + DeclContext *DC; + SmallVector Results; /// Populates a vector of parameters to suggest along with a vector of types @@ -59,8 +64,9 @@ class ArgumentTypeCheckCompletionCallback : public TypeCheckCompletionCallback { SmallVectorImpl &Types); public: - ArgumentTypeCheckCompletionCallback(CodeCompletionExpr *CompletionExpr) - : CompletionExpr(CompletionExpr) {} + ArgumentTypeCheckCompletionCallback(CodeCompletionExpr *CompletionExpr, + DeclContext *DC) + : CompletionExpr(CompletionExpr), DC(DC) {} void sawSolution(const constraints::Solution &solution) override; diff --git a/include/swift/IDE/CompletionLookup.h b/include/swift/IDE/CompletionLookup.h index e7b118c3f0ff4..9dd5237b399af 100644 --- a/include/swift/IDE/CompletionLookup.h +++ b/include/swift/IDE/CompletionLookup.h @@ -76,6 +76,9 @@ bool isCodeCompletionAtTopLevel(const DeclContext *DC); /// } bool isCompletionDeclContextLocalContext(DeclContext *DC); +/// Returns \c true if \p DC can handles async call. +bool canDeclContextHandleAsync(const DeclContext *DC); + /// Return \c true if the completion happens at top-level of a library file. bool isCodeCompletionAtTopLevelOfLibraryFile(const DeclContext *DC); diff --git a/include/swift/IDE/DotExprCompletion.h b/include/swift/IDE/DotExprCompletion.h index 2bf1375fb866a..72e31481b37f4 100644 --- a/include/swift/IDE/DotExprCompletion.h +++ b/include/swift/IDE/DotExprCompletion.h @@ -31,15 +31,22 @@ class DotExprTypeCheckCompletionCallback : public TypeCheckCompletionCallback { bool ExpectsNonVoid; bool BaseIsStaticMetaType; bool IsImplicitSingleExpressionReturn; + + /// Whether the surrounding context is async and thus calling async + /// functions is supported. + bool IsInAsyncContext; }; CodeCompletionExpr *CompletionExpr; + DeclContext *DC; + SmallVector Results; llvm::DenseMap, size_t> BaseToSolutionIdx; public: - DotExprTypeCheckCompletionCallback(CodeCompletionExpr *CompletionExpr) - : CompletionExpr(CompletionExpr) {} + DotExprTypeCheckCompletionCallback(CodeCompletionExpr *CompletionExpr, + DeclContext *DC) + : CompletionExpr(CompletionExpr), DC(DC) {} /// Typecheck the code completion expression in isolation, calling /// \c sawSolution for each solution formed. diff --git a/include/swift/IDE/TypeCheckCompletionCallback.h b/include/swift/IDE/TypeCheckCompletionCallback.h index 02455c1c0b941..de4d762fc0799 100644 --- a/include/swift/IDE/TypeCheckCompletionCallback.h +++ b/include/swift/IDE/TypeCheckCompletionCallback.h @@ -75,6 +75,9 @@ Type getTypeForCompletion(const constraints::Solution &S, Expr *E); bool isImplicitSingleExpressionReturn(constraints::ConstraintSystem &CS, Expr *CompletionExpr); +/// Returns \c true iff the decl context \p DC allows calling async functions. +bool isContextAsync(const constraints::Solution &S, DeclContext *DC); + } // namespace ide } // namespace swift diff --git a/include/swift/IDE/UnresolvedMemberCompletion.h b/include/swift/IDE/UnresolvedMemberCompletion.h index b6d3ea007e7f8..94771a8f2655e 100644 --- a/include/swift/IDE/UnresolvedMemberCompletion.h +++ b/include/swift/IDE/UnresolvedMemberCompletion.h @@ -25,19 +25,25 @@ namespace ide { /// formed during expression type-checking. class UnresolvedMemberTypeCheckCompletionCallback : public TypeCheckCompletionCallback { - struct ExprResult { + struct Result { Type ExpectedTy; bool IsImplicitSingleExpressionReturn; + + /// Whether the surrounding context is async and thus calling async + /// functions is supported. + bool IsInAsyncContext; }; CodeCompletionExpr *CompletionExpr; - SmallVector ExprResults; - SmallVector EnumPatternTypes; + DeclContext *DC; + + SmallVector ExprResults; + SmallVector EnumPatternTypes; public: UnresolvedMemberTypeCheckCompletionCallback( - CodeCompletionExpr *CompletionExpr) - : CompletionExpr(CompletionExpr) {} + CodeCompletionExpr *CompletionExpr, DeclContext *DC) + : CompletionExpr(CompletionExpr), DC(DC) {} void sawSolution(const constraints::Solution &solution) override; diff --git a/lib/IDE/ArgumentCompletion.cpp b/lib/IDE/ArgumentCompletion.cpp index 9887e8e05453b..432177a0dfdbf 100644 --- a/lib/IDE/ArgumentCompletion.cpp +++ b/lib/IDE/ArgumentCompletion.cpp @@ -195,6 +195,8 @@ void ArgumentTypeCheckCompletionCallback::sawSolution(const Solution &S) { } } + bool IsAsync = isContextAsync(S, DC); + // If this is a duplicate of any other result, ignore this solution. if (llvm::any_of(Results, [&](const Result &R) { return R.FuncD == FuncD && nullableTypesEqual(R.FuncTy, FuncTy) && @@ -207,7 +209,7 @@ void ArgumentTypeCheckCompletionCallback::sawSolution(const Solution &S) { Results.push_back({ExpectedTy, isa(ParentCall), FuncD, FuncTy, ArgIdx, ParamIdx, std::move(ClaimedParams), - IsNoninitialVariadic, CallBaseTy, HasLabel}); + IsNoninitialVariadic, CallBaseTy, HasLabel, IsAsync}); } void ArgumentTypeCheckCompletionCallback::deliverResults( @@ -272,6 +274,9 @@ void ArgumentTypeCheckCompletionCallback::deliverResults( ExpectedTypes.push_back(Result.ExpectedType); } Lookup.setExpectedTypes(ExpectedTypes, false); + bool IsInAsyncContext = llvm::any_of( + Results, [](const Result &Res) { return Res.IsInAsyncContext; }); + Lookup.setCanCurrDeclContextHandleAsync(IsInAsyncContext); Lookup.getValueCompletionsInDeclContext(Loc); Lookup.getSelfTypeCompletionInDeclContext(Loc, /*isForDeclResult=*/false); diff --git a/lib/IDE/CodeCompletion.cpp b/lib/IDE/CodeCompletion.cpp index 5505ee3208a8e..1e55bfddbaeb8 100644 --- a/lib/IDE/CodeCompletion.cpp +++ b/lib/IDE/CodeCompletion.cpp @@ -1327,7 +1327,8 @@ bool CodeCompletionCallbacksImpl::trySolverCompletion(bool MaybeFuncBody) { assert(CodeCompleteTokenExpr); assert(CurDeclContext); - DotExprTypeCheckCompletionCallback Lookup(CodeCompleteTokenExpr); + DotExprTypeCheckCompletionCallback Lookup(CodeCompleteTokenExpr, + CurDeclContext); llvm::SaveAndRestore CompletionCollector(Context.CompletionCallback, &Lookup); typeCheckContextAt(CurDeclContext, CompletionLoc); @@ -1351,7 +1352,8 @@ bool CodeCompletionCallbacksImpl::trySolverCompletion(bool MaybeFuncBody) { assert(CodeCompleteTokenExpr); assert(CurDeclContext); - UnresolvedMemberTypeCheckCompletionCallback Lookup(CodeCompleteTokenExpr); + UnresolvedMemberTypeCheckCompletionCallback Lookup(CodeCompleteTokenExpr, + CurDeclContext); llvm::SaveAndRestore CompletionCollector(Context.CompletionCallback, &Lookup); typeCheckContextAt(CurDeclContext, CompletionLoc); @@ -1380,7 +1382,8 @@ bool CodeCompletionCallbacksImpl::trySolverCompletion(bool MaybeFuncBody) { case CompletionKind::CallArg: { assert(CodeCompleteTokenExpr); assert(CurDeclContext); - ArgumentTypeCheckCompletionCallback Lookup(CodeCompleteTokenExpr); + ArgumentTypeCheckCompletionCallback Lookup(CodeCompleteTokenExpr, + CurDeclContext); llvm::SaveAndRestore CompletionCollector( Context.CompletionCallback, &Lookup); typeCheckContextAt(CurDeclContext, CompletionLoc); diff --git a/lib/IDE/CompletionLookup.cpp b/lib/IDE/CompletionLookup.cpp index 76019e55dc694..9983fd2f406a1 100644 --- a/lib/IDE/CompletionLookup.cpp +++ b/lib/IDE/CompletionLookup.cpp @@ -34,52 +34,6 @@ static bool SwiftKeyPathFilter(ValueDecl *decl, DeclVisibilityKind) { } } -/// Returns \c true if \p DC can handles async call. -static bool canDeclContextHandleAsync(const DeclContext *DC) { - if (auto *func = dyn_cast(DC)) - return func->isAsyncContext(); - - if (auto *closure = dyn_cast(DC)) { - // See if the closure has 'async' function type. - if (auto closureType = closure->getType()) - if (auto fnType = closureType->getAs()) - if (fnType->isAsync()) - return true; - - // If the closure doesn't contain any async call in the body, closure itself - // doesn't have 'async' type even if 'async' closure is expected. - // func foo(fn: () async -> Void) - // foo { } - // In this case, the closure is wrapped with a 'FunctionConversionExpr' - // which has 'async' function type. - struct AsyncClosureChecker : public ASTWalker { - const ClosureExpr *Target; - bool Result = false; - - AsyncClosureChecker(const ClosureExpr *Target) : Target(Target) {} - - std::pair walkToExprPre(Expr *E) override { - if (E == Target) - return {false, E}; - - if (auto conversionExpr = dyn_cast(E)) { - if (conversionExpr->getSubExpr() == Target) { - if (conversionExpr->getType()->is() && - conversionExpr->getType()->castTo()->isAsync()) - Result = true; - return {false, E}; - } - } - return {true, E}; - } - } checker(closure); - closure->getParent()->walkContext(checker); - return checker.Result; - } - - return false; -} - static bool isTopLevelSubcontext(const DeclContext *DC) { for (; DC && DC->isLocalContext(); DC = DC->getParent()) { switch (DC->getContextKind()) { @@ -207,6 +161,52 @@ bool swift::ide::isCompletionDeclContextLocalContext(DeclContext *DC) { return true; } +/// Returns \c true if \p DC can handles async call. +bool swift::ide::canDeclContextHandleAsync(const DeclContext *DC) { + if (auto *func = dyn_cast(DC)) + return func->isAsyncContext(); + + if (auto *closure = dyn_cast(DC)) { + // See if the closure has 'async' function type. + if (auto closureType = closure->getType()) + if (auto fnType = closureType->getAs()) + if (fnType->isAsync()) + return true; + + // If the closure doesn't contain any async call in the body, closure itself + // doesn't have 'async' type even if 'async' closure is expected. + // func foo(fn: () async -> Void) + // foo { } + // In this case, the closure is wrapped with a 'FunctionConversionExpr' + // which has 'async' function type. + struct AsyncClosureChecker : public ASTWalker { + const ClosureExpr *Target; + bool Result = false; + + AsyncClosureChecker(const ClosureExpr *Target) : Target(Target) {} + + std::pair walkToExprPre(Expr *E) override { + if (E == Target) + return {false, E}; + + if (auto conversionExpr = dyn_cast(E)) { + if (conversionExpr->getSubExpr() == Target) { + if (conversionExpr->getType()->is() && + conversionExpr->getType()->castTo()->isAsync()) + Result = true; + return {false, E}; + } + } + return {true, E}; + } + } checker(closure); + closure->getParent()->walkContext(checker); + return checker.Result; + } + + return false; +} + /// Return \c true if the completion happens at top-level of a library file. bool swift::ide::isCodeCompletionAtTopLevelOfLibraryFile( const DeclContext *DC) { diff --git a/lib/IDE/DotExprCompletion.cpp b/lib/IDE/DotExprCompletion.cpp index 7d27071b82121..6865fef97ccdf 100644 --- a/lib/IDE/DotExprCompletion.cpp +++ b/lib/IDE/DotExprCompletion.cpp @@ -69,6 +69,8 @@ void DotExprTypeCheckCompletionCallback::sawSolution( if (auto SelectedOverload = S.getOverloadChoiceIfAvailable(CalleeLocator)) ReferencedDecl = SelectedOverload->choice.getDeclOrNull(); + bool IsAsync = isContextAsync(S, DC); + auto Key = std::make_pair(BaseTy, ReferencedDecl); auto Ret = BaseToSolutionIdx.insert({Key, Results.size()}); if (Ret.second) { @@ -79,15 +81,19 @@ void DotExprTypeCheckCompletionCallback::sawSolution( : !ParentExpr && CS.getContextualTypePurpose( CompletionExpr) != CTP_Unused; - Results.push_back( - {BaseTy, ReferencedDecl, {}, DisallowVoid, ISDMT, ImplicitReturn}); - if (ExpectedTy) + Results.push_back({BaseTy, ReferencedDecl, + /*ExpectedTypes=*/{}, DisallowVoid, ISDMT, + ImplicitReturn, IsAsync}); + if (ExpectedTy) { Results.back().ExpectedTypes.push_back(ExpectedTy); + } } else if (ExpectedTy) { - auto &ExpectedTys = Results[Ret.first->getSecond()].ExpectedTypes; + auto &ExistingResult = Results[Ret.first->getSecond()]; + ExistingResult.IsInAsyncContext |= IsAsync; auto IsEqual = [&](Type Ty) { return ExpectedTy->isEqual(Ty); }; - if (!llvm::any_of(ExpectedTys, IsEqual)) - ExpectedTys.push_back(ExpectedTy); + if (!llvm::any_of(ExistingResult.ExpectedTypes, IsEqual)) { + ExistingResult.ExpectedTypes.push_back(ExpectedTy); + } } } @@ -116,6 +122,7 @@ void DotExprTypeCheckCompletionCallback::deliverResults( Lookup.shouldCheckForDuplicates(Results.size() > 1); for (auto &Result : Results) { + Lookup.setCanCurrDeclContextHandleAsync(Result.IsInAsyncContext); Lookup.setIsStaticMetatype(Result.BaseIsStaticMetaType); Lookup.getPostfixKeywordCompletions(Result.BaseTy, BaseExpr); Lookup.setExpectedTypes(Result.ExpectedTypes, diff --git a/lib/IDE/ExprCompletion.cpp b/lib/IDE/ExprCompletion.cpp index 154f6268970fe..59d4464d7f395 100644 --- a/lib/IDE/ExprCompletion.cpp +++ b/lib/IDE/ExprCompletion.cpp @@ -29,25 +29,7 @@ void ExprTypeCheckCompletionCallback::sawSolution( bool ImplicitReturn = isImplicitSingleExpressionReturn(CS, CompletionExpr); - // We are in an async context if - // - the decl context is async or - // - the decl context is sync but it's used in a context that expectes an - // async function. This happens if the code completion token is in a - // closure that doesn't contain any async calles. Thus the closure is - // type-checked as non-async, but it might get converted to an async - // closure based on its contextual type. - bool isAsync = CS.isAsynchronousContext(DC); - if (!isAsync) { - auto target = S.solutionApplicationTargets.find(dyn_cast(DC)); - if (target != S.solutionApplicationTargets.end()) { - if (auto ContextTy = target->second.getClosureContextualType()) { - if (auto ContextFuncTy = - S.simplifyType(ContextTy)->getAs()) { - isAsync = ContextFuncTy->isAsync(); - } - } - } - } + bool IsAsync = isContextAsync(S, DC); llvm::SmallDenseMap SolutionSpecificVarTypes; for (auto NT : S.nodeTypes) { @@ -57,7 +39,7 @@ void ExprTypeCheckCompletionCallback::sawSolution( } Results.push_back( - {ExpectedTy, ImplicitReturn, isAsync, SolutionSpecificVarTypes}); + {ExpectedTy, ImplicitReturn, IsAsync, SolutionSpecificVarTypes}); } void ExprTypeCheckCompletionCallback::deliverResults( diff --git a/lib/IDE/TypeCheckCompletionCallback.cpp b/lib/IDE/TypeCheckCompletionCallback.cpp index c4d754291296e..71478071fbda6 100644 --- a/lib/IDE/TypeCheckCompletionCallback.cpp +++ b/lib/IDE/TypeCheckCompletionCallback.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "swift/IDE/TypeCheckCompletionCallback.h" +#include "swift/IDE/CompletionLookup.h" #include "swift/Sema/CompletionContextFinder.h" #include "swift/Sema/ConstraintSystem.h" #include "swift/Sema/IDETypeChecking.h" @@ -118,3 +119,32 @@ bool swift::ide::isImplicitSingleExpressionReturn(ConstraintSystem &CS, } return false; } + +bool swift::ide::isContextAsync(const constraints::Solution &S, + DeclContext *DC) { + // We are in an async context if + // - the decl context is async + if (S.getConstraintSystem().isAsynchronousContext(DC)) { + return true; + } + + // - the decl context is sync but it's used in a context that expectes an + // async function. This happens if the code completion token is in a + // closure that doesn't contain any async calles. Thus the closure is + // type-checked as non-async, but it might get converted to an async + // closure based on its contextual type + auto target = S.solutionApplicationTargets.find(dyn_cast(DC)); + if (target != S.solutionApplicationTargets.end()) { + if (auto ContextTy = target->second.getClosureContextualType()) { + if (auto ContextFuncTy = + S.simplifyType(ContextTy)->getAs()) { + return ContextFuncTy->isAsync(); + } + } + } + + // - we did not record any information about async-ness of the context in the + // solution, but the type information recorded AST declares the context as + // async. + return canDeclContextHandleAsync(DC); +} diff --git a/lib/IDE/UnresolvedMemberCompletion.cpp b/lib/IDE/UnresolvedMemberCompletion.cpp index 6f1c7b6ca1cfe..3b96d2d79eaf0 100644 --- a/lib/IDE/UnresolvedMemberCompletion.cpp +++ b/lib/IDE/UnresolvedMemberCompletion.cpp @@ -86,17 +86,21 @@ void UnresolvedMemberTypeCheckCompletionCallback::sawSolution( auto &CS = S.getConstraintSystem(); Type ExpectedTy = getTypeForCompletion(S, CompletionExpr); + + bool IsAsync = isContextAsync(S, DC); + // If the type couldn't be determined (e.g. because there isn't any context // to derive it from), let's not attempt to do a lookup since it wouldn't // produce any useful results anyway. if (ExpectedTy) { // If ExpectedTy is a duplicate of any other result, ignore this solution. - if (!llvm::any_of(ExprResults, [&](const ExprResult &R) { - return R.ExpectedTy->isEqual(ExpectedTy); - })) { + auto IsEqual = [&](const Result &R) { + return R.ExpectedTy->isEqual(ExpectedTy); + }; + if (!llvm::any_of(ExprResults, IsEqual)) { bool SingleExprBody = isImplicitSingleExpressionReturn(CS, CompletionExpr); - ExprResults.push_back({ExpectedTy, SingleExprBody}); + ExprResults.push_back({ExpectedTy, SingleExprBody, IsAsync}); } } @@ -110,10 +114,13 @@ void UnresolvedMemberTypeCheckCompletionCallback::sawSolution( MatchVarType = S.getResolvedType(MatchVar); } if (MatchVarType && !MatchVarType->is()) { - if (!llvm::any_of(EnumPatternTypes, [&](const Type &R) { - return R->isEqual(MatchVarType); - })) { - EnumPatternTypes.push_back(MatchVarType); + auto IsEqual = [&](const Result &R) { + return R.ExpectedTy->isEqual(MatchVarType); + }; + if (!llvm::any_of(EnumPatternTypes, IsEqual)) { + EnumPatternTypes.push_back({MatchVarType, + /*IsImplicitSingleExpressionReturn=*/false, + IsAsync}); } } } @@ -142,6 +149,7 @@ void UnresolvedMemberTypeCheckCompletionCallback::deliverResults( Result.IsImplicitSingleExpressionReturn, /*expectsNonVoid*/ true); Lookup.setIdealExpectedType(Result.ExpectedTy); + Lookup.setCanCurrDeclContextHandleAsync(Result.IsInAsyncContext); // For optional types, also get members of the unwrapped type if it's not // already equivalent to one of the top-level types. Handling it via the top @@ -157,10 +165,12 @@ void UnresolvedMemberTypeCheckCompletionCallback::deliverResults( // Offer completions when interpreting the pattern match as an // EnumElementPattern. - for (auto &Ty : EnumPatternTypes) { + for (auto &Result : EnumPatternTypes) { + Type Ty = Result.ExpectedTy; Lookup.setExpectedTypes({Ty}, /*IsImplicitSingleExpressionReturn=*/false, /*expectsNonVoid=*/true); Lookup.setIdealExpectedType(Ty); + Lookup.setCanCurrDeclContextHandleAsync(Result.IsInAsyncContext); // We can pattern match MyEnum against Optional if (Ty->getOptionalObjectType()) { diff --git a/test/IDE/complete_global_actorisolation.swift b/test/IDE/complete_global_actorisolation.swift index c5d85621bec7b..d3c9b96b10b49 100644 --- a/test/IDE/complete_global_actorisolation.swift +++ b/test/IDE/complete_global_actorisolation.swift @@ -162,10 +162,20 @@ extension MyClass { @MyGlobalActor func testInNestedSingleExpressionClosure() { takeClosure { takeClosure { - otherInstanceOfMyClass.#^IN_NESTED_SINGLE_EXPRESSION_CLOSURE_ON_GLBOAL_ACTOR_OTHER_DOT?check=IN_FUNC_ON_GLOBAL_ACTOR_SELF_DOT^# + otherInstanceOfMyClass.#^IN_NESTED_SINGLE_EXPRESSION_CLOSURE_ON_GLBOAL_ACTOR_OTHER_DOT^# } } } +// IN_NESTED_SINGLE_EXPRESSION_CLOSURE_ON_GLBOAL_ACTOR_OTHER_DOT: Begin completions +// IN_NESTED_SINGLE_EXPRESSION_CLOSURE_ON_GLBOAL_ACTOR_OTHER_DOT-DAG: Decl[InstanceMethod]/CurrNominal: funcOnGlobalActor()[#Int#]; name=funcOnGlobalActor() +// IN_NESTED_SINGLE_EXPRESSION_CLOSURE_ON_GLBOAL_ACTOR_OTHER_DOT-DAG: Decl[InstanceMethod]/CurrNominal: funcOnOtherGlobalActor()[' async'][#Int#]; name=funcOnOtherGlobalActor() +// IN_NESTED_SINGLE_EXPRESSION_CLOSURE_ON_GLBOAL_ACTOR_OTHER_DOT-DAG: Decl[InstanceMethod]/CurrNominal: funcSync()[#Int#]; name=funcSync() +// IN_NESTED_SINGLE_EXPRESSION_CLOSURE_ON_GLBOAL_ACTOR_OTHER_DOT-DAG: Decl[InstanceMethod]/CurrNominal: nonSenableFuncOnGlobalActor({#arg: MyNonSendable#})[#Int#]; name=nonSenableFuncOnGlobalActor(arg:) +// IN_NESTED_SINGLE_EXPRESSION_CLOSURE_ON_GLBOAL_ACTOR_OTHER_DOT-DAG: Decl[InstanceMethod]/CurrNominal: nonSenableFuncOnOtherGlobalActor({#arg: MyNonSendable#})[' async'][#Int#]; name=nonSenableFuncOnOtherGlobalActor(arg:) +// IN_NESTED_SINGLE_EXPRESSION_CLOSURE_ON_GLBOAL_ACTOR_OTHER_DOT-DAG: Decl[InstanceVar]/CurrNominal: varOnGlobalActor[#Int#]; name=varOnGlobalActor +// IN_NESTED_SINGLE_EXPRESSION_CLOSURE_ON_GLBOAL_ACTOR_OTHER_DOT-DAG: Decl[InstanceVar]/CurrNominal: varOnOtherGlobalActor[#Int#][' async']; name=varOnOtherGlobalActor +// IN_NESTED_SINGLE_EXPRESSION_CLOSURE_ON_GLBOAL_ACTOR_OTHER_DOT-DAG: Decl[InstanceVar]/CurrNominal: varSync[#Int#]; name=varSync +// IN_NESTED_SINGLE_EXPRESSION_CLOSURE_ON_GLBOAL_ACTOR_OTHER_DOT: End completions } actor ActorTests { From cb514646cd42117454a6b077c5cf482e0866d4c1 Mon Sep 17 00:00:00 2001 From: Hamish Knight Date: Mon, 21 Mar 2022 19:26:10 +0000 Subject: [PATCH 23/88] [test] Add some additional regex parsing tests --- test/StringProcessing/Parse/regex.swift | 8 ++++ .../Parse/regex_parse_error.swift | 39 +++++++++++++++++-- 2 files changed, 44 insertions(+), 3 deletions(-) diff --git a/test/StringProcessing/Parse/regex.swift b/test/StringProcessing/Parse/regex.swift index c4920ce04bb85..1ce36cf57898d 100644 --- a/test/StringProcessing/Parse/regex.swift +++ b/test/StringProcessing/Parse/regex.swift @@ -21,3 +21,11 @@ _ = re're\r\e\'\\' _ = (#/[*/#, #/+]/#, #/.]/#) // expected-error@-1 {{cannot parse regular expression: quantifier '+' must appear after expression}} // expected-error@-2 {{cannot parse regular expression: expected ']'}} + +// Make sure we can skip over `'` characters in the regex body. +_ = re'(?'xA0_-y1'x)' +_ = re'(?('xA0_')\')' +_ = re'\'(?('-20'))' +_ = re'\k'+2-1'\'' +_ = re'\g'xA0_'' +_ = re'(?C'9,3, pg(')' diff --git a/test/StringProcessing/Parse/regex_parse_error.swift b/test/StringProcessing/Parse/regex_parse_error.swift index 129f43a48e03e..cfe8358cef85b 100644 --- a/test/StringProcessing/Parse/regex_parse_error.swift +++ b/test/StringProcessing/Parse/regex_parse_error.swift @@ -6,10 +6,43 @@ _ = re'(' // expected-error {{expected ')'}} // FIXME: Should be 'group openings' _ = re')' // expected-error {{closing ')' does not balance any groups openings}} -let s = #/\\/''/ // expected-error {{unterminated regex literal}} +_ = #/\\/''/ // expected-error {{unterminated regex literal}} _ = #|\| // expected-error {{unterminated regex literal}} _ = #// // expected-error {{unterminated regex literal}} _ = re'x // expected-error {{unterminated regex literal}} -// expected-error@+1 {{unterminated regex literal}} -var unterminated = #/xy +_ = #/xy // expected-error {{unterminated regex literal}} + +_ = re'(?' // expected-error {{expected group specifier}} + +_ = re'(?'' // expected-error {{unterminated regex literal}} +// expected-error@-1 {{expected group name}} + +_ = re'(?'abc' // expected-error {{unterminated regex literal}} +// expected-error@-1 {{expected ')'}} + +// TODO: Maybe change "unterminated string literal" to "unterminated single quote"? +_ = re'(?'abc ' // expected-error {{unterminated string literal}} +// expected-error@-1 {{expected group specifier}} +// expected-error@-2 {{consecutive statements on a line must be separated by ';'}} + +_ = re'(?'a // expected-error {{expected group specifier}} +// expected-error@-1 {{cannot find 'a' in scope}} +// expected-error@-2 {{consecutive statements on a line must be separated by ';'}} + +_ = re'\(?'abc' // expected-error {{unterminated string literal}} +// expected-error@-1 {{consecutive statements on a line must be separated by ';'}} + + _ = re'\ + ' +// expected-error@-2 {{unterminated regex literal}} +// expected-error@-3 {{expected escape sequence}} +// expected-error@-3 {{unterminated string literal}} + +func foo(_ x: T, _ y: T) {} +foo(re'(?', re'abc') // expected-error {{expected group specifier}} +foo(re'(?C', re'abc') // expected-error {{expected ')'}} + +foo(re'(?'', re'abc') // expected-error {{expected group name}} +// expected-error@-1 {{unterminated string literal}} +// expected-error@-2 {{expected ',' separator}} From 9ab7a822dfb198e3d13d40fe6a63d0d90a037aa7 Mon Sep 17 00:00:00 2001 From: Evan Wilde Date: Tue, 21 Dec 2021 10:43:26 -0800 Subject: [PATCH 24/88] Cleaning up _runAsyncMain a bit I'm making two cleanups here. First, the closure going into `_runAsyncMain` needs to be `@Sendable` or passing it to the task is not safe. This will also result in a warning being emitted. Second, I'm making this @usableFromInline and `internal`. This function is around for legacy reasons, but it's part of the ABI, so we can't pull it out entirely, but we don't want folks using it. --- stdlib/public/Concurrency/Task.swift | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stdlib/public/Concurrency/Task.swift b/stdlib/public/Concurrency/Task.swift index 493d6176a786a..623eb74a4b187 100644 --- a/stdlib/public/Concurrency/Task.swift +++ b/stdlib/public/Concurrency/Task.swift @@ -834,7 +834,9 @@ internal func _asyncMainDrainQueue() -> Never internal func _getMainExecutor() -> Builtin.Executor @available(SwiftStdlib 5.1, *) -public func _runAsyncMain(_ asyncFun: @escaping () async throws -> ()) { +@usableFromInline +@preconcurrency +internal func _runAsyncMain(_ asyncFun: @Sendable @escaping () async throws -> ()) { Task.detached { do { #if !os(Windows) From 4700bc7e701a84b5f810373c21ce2943455163cc Mon Sep 17 00:00:00 2001 From: Evan Wilde Date: Mon, 21 Mar 2022 12:59:21 -0700 Subject: [PATCH 25/88] Add _runAsyncMain to api digester list Adding `_runAsyncMain` to the api-digester list since we're adding `@preconcurrency` when we add `@Sendable`. These two should negate each other, but the api-digester isn't quite smart enough to understand that. --- test/api-digester/stability-concurrency-abi.test | 1 + 1 file changed, 1 insertion(+) diff --git a/test/api-digester/stability-concurrency-abi.test b/test/api-digester/stability-concurrency-abi.test index 670dd052326e0..87deb228636bd 100644 --- a/test/api-digester/stability-concurrency-abi.test +++ b/test/api-digester/stability-concurrency-abi.test @@ -58,6 +58,7 @@ Func AsyncSequence.map(_:) is now with @preconcurrency Func AsyncSequence.prefix(while:) is now with @preconcurrency Func MainActor.run(resultType:body:) has generic signature change from to Func MainActor.run(resultType:body:) has mangled name changing from 'static Swift.MainActor.run(resultType: A.Type, body: @Swift.MainActor @Sendable () throws -> A) async throws -> A' to 'static Swift.MainActor.run(resultType: A.Type, body: @Swift.MainActor @Sendable () throws -> A) async throws -> A' +Func _runAsyncMain(_:) is now with @preconcurrency Protocol Actor has added inherited protocol AnyActor Protocol Actor has generic signature change from to Struct CheckedContinuation has removed conformance to UnsafeSendable From a1e3afadb2693ae76c74a3929362cbfaf45a22b9 Mon Sep 17 00:00:00 2001 From: Ben Barham Date: Mon, 21 Mar 2022 13:23:27 -0700 Subject: [PATCH 26/88] [Option] Add feature flag for empty ABI descriptors Allow clients to check whether they can force empty ABI descriptors to be output. --- lib/Option/features.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/Option/features.json b/lib/Option/features.json index 143099aaaae35..905a3b1fac532 100644 --- a/lib/Option/features.json +++ b/lib/Option/features.json @@ -17,6 +17,9 @@ }, { "name": "no-warn-superfluous-index-unit-path" + }, + { + "name": "empty-abi-descriptor" } ] } From dc82433c78cfaf0f894ab72104648245fd442512 Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Mon, 21 Mar 2022 16:29:19 -0400 Subject: [PATCH 27/88] RequirementMachine: Another silly GenericSignatureBuilder compatibility hack for concrete contraction --- .../ConcreteContraction.cpp | 85 ++++++++++++++++++- ...rete_contraction_unrelated_typealias.swift | 48 +++++++++++ 2 files changed, 132 insertions(+), 1 deletion(-) create mode 100644 test/Generics/concrete_contraction_unrelated_typealias.swift diff --git a/lib/AST/RequirementMachine/ConcreteContraction.cpp b/lib/AST/RequirementMachine/ConcreteContraction.cpp index 0ce15268953d8..9a1d3e40e3136 100644 --- a/lib/AST/RequirementMachine/ConcreteContraction.cpp +++ b/lib/AST/RequirementMachine/ConcreteContraction.cpp @@ -173,6 +173,10 @@ class ConcreteContraction { Type substType(Type type) const; Requirement substRequirement(const Requirement &req) const; + bool preserveSameTypeRequirement(const Requirement &req) const; + + bool hasResolvedMemberTypeOfInterestingParameter(Type t) const; + public: ConcreteContraction(bool debug) : Debug(debug) {} @@ -384,6 +388,73 @@ ConcreteContraction::substRequirement(const Requirement &req) const { } } +bool ConcreteContraction:: +hasResolvedMemberTypeOfInterestingParameter(Type type) const { + return type.findIf([&](Type t) -> bool { + if (auto *memberTy = t->getAs()) { + if (memberTy->getAssocType() == nullptr) + return false; + + auto baseTy = memberTy->getBase(); + if (auto *genericParam = baseTy->getAs()) { + GenericParamKey key(genericParam); + + Type concreteType; + { + auto found = ConcreteTypes.find(key); + if (found != ConcreteTypes.end() && found->second.size() == 1) + return true; + } + + Type superclass; + { + auto found = Superclasses.find(key); + if (found != Superclasses.end() && found->second.size() == 1) + return true; + } + } + } + + return false; + }); +} + +/// Another silly GenericSignatureBuilder compatibility hack. +/// +/// Consider this code: +/// +/// class C { +/// typealias A = T +/// } +/// +/// protocol P { +/// associatedtype A +/// } +/// +/// func f(_: X, _: T) where X : P, X : C, X.A == T {} +/// +/// The GenericSignatureBuilder would introduce an equivalence between +/// typealias A in class C and associatedtype A in protocol P, so the +/// requirement 'X.A == T' would effectively constrain _both_. +/// +/// Simulate this by keeping both the original and substituted same-type +/// requirement in a narrow case. +bool ConcreteContraction::preserveSameTypeRequirement( + const Requirement &req) const { + if (req.getKind() != RequirementKind::SameType) + return false; + + if (Superclasses.find(req.getFirstType()->getRootGenericParam()) + == Superclasses.end()) + return false; + + if (hasResolvedMemberTypeOfInterestingParameter(req.getFirstType()) || + hasResolvedMemberTypeOfInterestingParameter(req.getSecondType())) + return false; + + return true; +} + /// Substitute all occurrences of generic parameters subject to superclass /// or concrete type requirements with their corresponding superclass or /// concrete type. @@ -506,6 +577,18 @@ bool ConcreteContraction::performConcreteContraction( llvm::dbgs() << "\n"; } + if (preserveSameTypeRequirement(req.req)) { + if (Debug) { + llvm::dbgs() << "@ Preserving original requirement: "; + req.req.dump(llvm::dbgs()); + llvm::dbgs() << "\n"; + } + + // Make the duplicated requirement 'inferred' so that we don't diagnose + // it as redundant. + result.push_back({req.req, SourceLoc(), /*inferred=*/true}); + } + // Substitute the requirement. Optional substReq = substRequirement(req.req); @@ -519,7 +602,7 @@ bool ConcreteContraction::performConcreteContraction( llvm::dbgs() << "\n"; } - return false; + continue; } if (Debug) { diff --git a/test/Generics/concrete_contraction_unrelated_typealias.swift b/test/Generics/concrete_contraction_unrelated_typealias.swift new file mode 100644 index 0000000000000..f0d5cdf3d66a4 --- /dev/null +++ b/test/Generics/concrete_contraction_unrelated_typealias.swift @@ -0,0 +1,48 @@ +// R/UN: %target-swift-frontend -typecheck %s -debug-generic-signatures -requirement-machine-inferred-signatures=on 2>&1 | %FileCheck %s +// RUN: %target-swift-frontend -typecheck %s -debug-generic-signatures -requirement-machine-inferred-signatures=on -disable-requirement-machine-concrete-contraction 2>&1 | %FileCheck %s + +// Another GenericSignatureBuilder oddity, reduced from RxSwift. +// +// The requirements 'Proxy.Parent == P' and 'Proxy.Delegate == D' in the +// init() below refer to both the typealias and the associated type, +// despite the class being unrelated to the protocol; it just happens to +// define typealiases with the same name. +// +// In the Requirement Machine, the concrete contraction pre-processing +// pass would eagerly substitute the concrete type into these two +// requirements, producing the useless requirements 'P == P' and 'D == D'. +// +// Make sure concrete contraction keeps these requirements as-is by +// checking the generic signature with and without concrete contraction. + +class GenericDelegateProxy

{ + typealias Parent = P + typealias Delegate = D + + // CHECK-LABEL: .GenericDelegateProxy.init(_:)@ + // CHECK-NEXT: , Proxy : DelegateProxyType> + init(_: Proxy.Type) + where Proxy: GenericDelegateProxy, + Proxy.Parent == P, + Proxy.Delegate == D {} +} + +class SomeClass {} +struct SomeStruct {} + +class ConcreteDelegateProxy { + typealias Parent = SomeClass + typealias Delegate = SomeStruct + + // CHECK-LABEL: .ConcreteDelegateProxy.init(_:_:_:)@ + // CHECK-NEXT: + init(_: P, _: D, _: Proxy.Type) + where Proxy: ConcreteDelegateProxy, + Proxy.Parent == P, + Proxy.Delegate == D {} +} + +protocol DelegateProxyType { + associatedtype Parent : AnyObject + associatedtype Delegate +} \ No newline at end of file From 7bcd13b8998fbc2b16ca68b5347095ff7344cc3a Mon Sep 17 00:00:00 2001 From: Egor Zhdan Date: Fri, 18 Mar 2022 18:09:31 +0000 Subject: [PATCH 28/88] [cxx-interop] Avoid crashing when importing functions that take pointers to dependent types Importing `type_traits` from libstdc++ currently causes a crash on Linux: ``` swift-ide-test: tools/clang/include/clang/AST/TypeNodes.inc:33: clang::TypeInfo clang::ASTContext::getTypeInfoImpl(const clang::Type *) const: Assertion `!T->isDependentType() && "should not see dependent types here"' failed. PLEASE submit a bug report to https://bugs.llvm.org/ and include the crash backtrace. Stack dump: 0. Program arguments: /home/egorzh/Builds/swift/swift/bin/swift-ide-test -print-module -module-to-print=std -source-filename=x -enable-cxx-interop 1. /usr/lib/gcc/x86_64-linux-gnu/9/../../../../include/c++/9/type_traits:1110:10: importing 'std::__do_is_implicitly_default_constructible_impl' 2. /usr/lib/gcc/x86_64-linux-gnu/9/../../../../include/c++/9/type_traits:1116:22: importing 'std::__do_is_implicitly_default_constructible_impl::__test' ``` This change fixes the crash by bailing on such functions. --- lib/ClangImporter/ImportType.cpp | 3 +++ .../Cxx/templates/Inputs/function-templates.h | 16 ++++++++++++++++ ...emplate-type-parameter-module-interface.swift | 7 ++++--- .../function-template-module-interface.swift | 4 ++++ 4 files changed, 27 insertions(+), 3 deletions(-) diff --git a/lib/ClangImporter/ImportType.cpp b/lib/ClangImporter/ImportType.cpp index c19a50cc1abb6..34c3691d8c17f 100644 --- a/lib/ClangImporter/ImportType.cpp +++ b/lib/ClangImporter/ImportType.cpp @@ -454,6 +454,9 @@ namespace { ImportHint::OtherPointer}; } + if (pointeeQualType->isDependentType()) + return Type(); + // All other C pointers to concrete types map to // UnsafeMutablePointer or OpaquePointer. diff --git a/test/Interop/Cxx/templates/Inputs/function-templates.h b/test/Interop/Cxx/templates/Inputs/function-templates.h index dedb00c3f86b6..b3419ecbc6836 100644 --- a/test/Interop/Cxx/templates/Inputs/function-templates.h +++ b/test/Interop/Cxx/templates/Inputs/function-templates.h @@ -44,6 +44,22 @@ decltype(auto) testAuto(T arg) { return arg; } +template +struct ClassTemplate { + T t; +}; + +template +void takesPointerToDependent(ClassTemplate *ct) { + ct->t++; +} + +template +T usedInDeclType(T) {} + +template +void takesDeclTypePointer(decltype(usedInDeclType()) *) {} + // TODO: Add tests for Decltype, UnaryTransform, and TemplateSpecialization with // a dependent type once those are supported. diff --git a/test/Interop/Cxx/templates/defaulted-template-type-parameter-module-interface.swift b/test/Interop/Cxx/templates/defaulted-template-type-parameter-module-interface.swift index 4eb8abc81842e..eadc312cfe703 100644 --- a/test/Interop/Cxx/templates/defaulted-template-type-parameter-module-interface.swift +++ b/test/Interop/Cxx/templates/defaulted-template-type-parameter-module-interface.swift @@ -17,6 +17,7 @@ // CHECK: func defaultedTemplateReferenceTypeParam(_ t: inout T) // The following types aren't imported correctly, but that does not have to do // with the fact that the template type paramaters are defaulted. -// CHECK: func defaultedTemplatePointerTypeParam(_ t: UnsafeMutablePointer) -// CHECK: func defaultedTemplatePointerReferenceTypeParam(_ t: inout OpaquePointer!) -// CHECK: func defaultedTemplatePointerPointerTypeParam(_ t: UnsafeMutablePointer!) +// TODO: reenable the following checks: (rdar://90587703) +// TODO-CHECK: func defaultedTemplatePointerTypeParam(_ t: UnsafeMutablePointer) +// TODO-CHECK: func defaultedTemplatePointerReferenceTypeParam(_ t: inout OpaquePointer!) +// TODO-CHECK: func defaultedTemplatePointerPointerTypeParam(_ t: UnsafeMutablePointer!) diff --git a/test/Interop/Cxx/templates/function-template-module-interface.swift b/test/Interop/Cxx/templates/function-template-module-interface.swift index 4c044966953c9..9e921b226eaa3 100644 --- a/test/Interop/Cxx/templates/function-template-module-interface.swift +++ b/test/Interop/Cxx/templates/function-template-module-interface.swift @@ -14,6 +14,10 @@ // CHECK: mutating func test2(_: Int32, _ varargs: Any...) // CHECK: } +// TODO: import functions that take a pointer to a dependent type (rdar://90587703). +// CHECK-NOT: func takesPointerToDependent +// CHECK-NOT: func takesDeclTypePointer + // CHECK: func lvalueReference(_ ref: inout T) // CHECK: func constLvalueReference(_: T) // CHECK: func forwardingReference(_: inout T) From 908161136dc905e402941887535b157c5f6fc935 Mon Sep 17 00:00:00 2001 From: Alejandro Alonso Date: Mon, 21 Mar 2022 13:33:42 -0700 Subject: [PATCH 29/88] Renable StringMemoryTest --- validation-test/stdlib/StringMemoryTest.swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validation-test/stdlib/StringMemoryTest.swift b/validation-test/stdlib/StringMemoryTest.swift index b0f99a235acfb..ada1615d4c117 100644 --- a/validation-test/stdlib/StringMemoryTest.swift +++ b/validation-test/stdlib/StringMemoryTest.swift @@ -7,7 +7,7 @@ // REQUIRES: executable_test // REQUIRES: objc_interop -// REQUIRES: rdar85913190 +// UNSUPPORTED: asan import Foundation From a467958df14520b5c9cb318d276890c319615dcb Mon Sep 17 00:00:00 2001 From: Richard Wei Date: Fri, 18 Mar 2022 16:08:25 -0700 Subject: [PATCH 30/88] Integrate newer swift-experimental-string-processing (50ec05d). Friend PR: apple/swift-experimental-string-processing#225 --- stdlib/public/StringProcessing/CMakeLists.txt | 1 + test/StringProcessing/Runtime/regex_basic.swift | 16 ++++++++-------- .../Sema/regex_literal_type_inference.swift | 4 ++-- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/stdlib/public/StringProcessing/CMakeLists.txt b/stdlib/public/StringProcessing/CMakeLists.txt index 7fefef92722d0..2a5a5a5eeaf5f 100644 --- a/stdlib/public/StringProcessing/CMakeLists.txt +++ b/stdlib/public/StringProcessing/CMakeLists.txt @@ -41,6 +41,7 @@ add_swift_target_library(swift_StringProcessing ${SWIFT_STDLIB_LIBRARY_BUILD_TYP -Dswift_StringProcessing_EXPORTS SWIFT_COMPILE_FLAGS ${SWIFT_STANDARD_LIBRARY_SWIFT_FLAGS} + -Xfrontend -enable-experimental-pairwise-build-block LINK_FLAGS "${SWIFT_RUNTIME_SWIFT_LINK_FLAGS}" SWIFT_MODULE_DEPENDS _MatchingEngine diff --git a/test/StringProcessing/Runtime/regex_basic.swift b/test/StringProcessing/Runtime/regex_basic.swift index fb0ee31d32775..697f4b7d886b5 100644 --- a/test/StringProcessing/Runtime/regex_basic.swift +++ b/test/StringProcessing/Runtime/regex_basic.swift @@ -11,7 +11,7 @@ extension String { _ regex: Regex, file: String = #file, line: UInt = #line - ) -> RegexMatch { + ) -> Regex.Match { guard let result = match(regex) else { expectUnreachable("Failed match", file: file, line: line) fatalError() @@ -24,20 +24,20 @@ RegexBasicTests.test("Basic") { let input = "aabccd" let match1 = input.expectMatch(#/aabcc./#) - expectEqual("aabccd", input[match1.range]) - expectTrue("aabccd" == match1.match) + expectEqual("aabccd", match1.0) + expectTrue("aabccd" == match1.output) let match2 = input.expectMatch(#/a*b.+./#) - expectEqual("aabccd", input[match2.range]) - expectTrue("aabccd" == match2.match) + expectEqual("aabccd", match2.0) + expectTrue("aabccd" == match2.output) } RegexBasicTests.test("Modern") { let input = "aabccd" let match1 = input.expectMatch(#|a a bc c /*hello*/ .|#) - expectEqual("aabccd", input[match1.range]) - expectTrue("aabccd" == match1.match) + expectEqual("aabccd", match1.0) + expectTrue("aabccd" == match1.output) } RegexBasicTests.test("Captures") { @@ -50,7 +50,7 @@ RegexBasicTests.test("Captures") { let _: Regex<(Substring, Substring, Substring?, Substring)>.Type = type(of: regex) let match1 = input.expectMatch(regex) - expectEqual(input[...], input[match1.range]) + expectEqual(input[...], match1.0) expectTrue(input == match1.0) expectTrue("A6F0" == match1.1) expectTrue("A6F1" == match1.2) diff --git a/test/StringProcessing/Sema/regex_literal_type_inference.swift b/test/StringProcessing/Sema/regex_literal_type_inference.swift index bd645850886a8..ceda3794d3a94 100644 --- a/test/StringProcessing/Sema/regex_literal_type_inference.swift +++ b/test/StringProcessing/Sema/regex_literal_type_inference.swift @@ -4,7 +4,7 @@ let r0 = #/./# let _: Regex = r0 -func takesRegex(_: Regex) {} +func takesRegex(_: Regex) {} takesRegex(#//#) // okay let r1 = #/.(.)/# @@ -14,7 +14,7 @@ let _: Regex<(Substring, Substring)>.Type = type(of: r1) struct S {} // expected-error @+2 {{cannot assign value of type 'Regex<(Substring, Substring)>' to type 'Regex'}} -// expected-note @+1 {{arguments to generic parameter 'Match' ('(Substring, Substring)' and 'S') are expected to be equal}} +// expected-note @+1 {{arguments to generic parameter 'Output' ('(Substring, Substring)' and 'S') are expected to be equal}} let r2: Regex = #/.(.)/# let r3 = #/(.)(.)/# From 056132cca0c47ff8d411f0dbc49791d4a8d6242e Mon Sep 17 00:00:00 2001 From: Michael Gottesman Date: Mon, 31 Jan 2022 21:32:28 -0800 Subject: [PATCH 31/88] [move-function] Add a new pass that propagates debug_value [moved] into coroutine func-lets. NOTE: debug_value [moved] appearing in the source code implies a _move was used. So this will not effect current stable swift code. This is just a first version of this that I am using to commit/bring up tests for IRGen supporting a full dataflow version of this patch. Big picture is that there is a bunch of work that is done in the LLVM level in the coroutine splitter to work around communicating live variables in the various coroutine func-lets. This logic is all done with debug.declare and we would need to update that logic in the coroutine splitter to handle debug.addr. Rather than do this, after some conversation, AdrianP and I realized that we could get the same effect of a debug.declare by just redeclaring the current live set of debug_value after each possible coroutine funclet start. To do this in full generality, we need a full dataflow but just to bring this up we initially perform a dominance propagation algorithm of the following sort: 1. We walk the CFG along successors. By doing this we guarantee that we visit blocks after their dominators. 2. When we visit a block, we walk the block from start->end. During this walk: a. We grab a new block state from the centralized block->blockState map. This state is a [SILDebugVariable : DebugValueInst]. b. If we see a debug_value, we map blockState[debug_value.getDbgVar()] = debug_value. This ensures that when we get to the bottom of the block, we have pairs of SILDebugVariable + last debug_value on it. c. If we see any coroutine funclet boundaries, we clone the current tracked set of our block state and then walk up the dom tree dumping in each block any debug_value with a SILDebugVariable that we have not already dumped. This is maintained by using a visited set of SILDebugVariable for each funclet boundary. The end result is that at the beginning of each funclet we will basically declare the debug info for an addr. This is insufficient of course for moves that are in conditional control flow, e.x.: ``` let x = Klass() if boolValue { await asyncCall() let _ = _move(x) } ``` but this at least lets me begin to write tests for this in lldb using straight line code and work out the rest of the issues in CodeGen using those tests. --- include/swift/SIL/SILDebugInfoExpression.h | 13 + include/swift/SIL/SILDebugVariable.h | 41 ++- include/swift/SIL/SILInstruction.h | 13 + include/swift/SIL/SILLocation.h | 13 + .../swift/SILOptimizer/PassManager/Passes.def | 2 + lib/SILOptimizer/Mandatory/CMakeLists.txt | 1 + .../Mandatory/DebugInfoCanonicalizer.cpp | 343 ++++++++++++++++++ lib/SILOptimizer/PassManager/PassPipeline.cpp | 6 +- test/SILOptimizer/debuginfo_canonicalizer.sil | 137 +++++++ test/sil-passpipeline-dump/basic.test-sh | 3 +- 10 files changed, 565 insertions(+), 7 deletions(-) create mode 100644 lib/SILOptimizer/Mandatory/DebugInfoCanonicalizer.cpp create mode 100644 test/SILOptimizer/debuginfo_canonicalizer.sil diff --git a/include/swift/SIL/SILDebugInfoExpression.h b/include/swift/SIL/SILDebugInfoExpression.h index ea9639b577192..fc6a2a27d1c1b 100644 --- a/include/swift/SIL/SILDebugInfoExpression.h +++ b/include/swift/SIL/SILDebugInfoExpression.h @@ -20,6 +20,7 @@ #include "swift/AST/Decl.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/Hashing.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/raw_ostream.h" @@ -109,6 +110,12 @@ struct SILDIExprElement { } }; +/// Returns the hashcode for the di expr element. +inline llvm::hash_code hash_value(const SILDIExprElement &elt) { + return llvm::hash_combine(elt.getKind(), elt.getAsDecl(), elt.getAsDecl(), + elt.getAsConstInt()); +} + /// For a given SILDIExprOperator, provides information /// like its textual name and operand types. struct SILDIExprInfo { @@ -273,5 +280,11 @@ class SILDebugInfoExpression { SILDIExprOperator::Fragment; } }; + +/// Returns the hashcode for the di expr element. +inline llvm::hash_code hash_value(const SILDebugInfoExpression &elt) { + return llvm::hash_combine_range(elt.element_begin(), elt.element_end()); +} + } // end namespace swift #endif diff --git a/include/swift/SIL/SILDebugVariable.h b/include/swift/SIL/SILDebugVariable.h index a91111926d35e..bf6671b0cb82f 100644 --- a/include/swift/SIL/SILDebugVariable.h +++ b/include/swift/SIL/SILDebugVariable.h @@ -23,14 +23,23 @@ namespace swift { class AllocationInst; +/// Holds common debug information about local variables and function +/// arguments that are needed by DebugValueInst, AllocStackInst, +/// and AllocBoxInst. +struct SILDebugVariable; +inline llvm::hash_code hash_value(const SILDebugVariable &P); + /// Holds common debug information about local variables and function /// arguments that are needed by DebugValueInst, AllocStackInst, /// and AllocBoxInst. struct SILDebugVariable { + friend llvm::hash_code hash_value(const SILDebugVariable &P); + StringRef Name; unsigned ArgNo : 16; unsigned Constant : 1; unsigned Implicit : 1; + unsigned isDenseMapSingleton : 2; Optional Type; Optional Loc; const SILDebugScope *Scope; @@ -40,26 +49,41 @@ struct SILDebugVariable { SILDebugVariable(const SILDebugVariable &) = default; SILDebugVariable &operator=(const SILDebugVariable &) = default; + enum class IsDenseMapSingleton { No, IsEmpty, IsTombstone }; + SILDebugVariable(IsDenseMapSingleton inputIsDenseMapSingleton) + : SILDebugVariable() { + assert(inputIsDenseMapSingleton != IsDenseMapSingleton::No && + "Should only pass IsEmpty or IsTombstone"); + isDenseMapSingleton = unsigned(inputIsDenseMapSingleton); + } + SILDebugVariable() - : ArgNo(0), Constant(false), Implicit(false), Scope(nullptr) {} + : ArgNo(0), Constant(false), Implicit(false), isDenseMapSingleton(0), + Scope(nullptr) {} SILDebugVariable(bool Constant, uint16_t ArgNo) - : ArgNo(ArgNo), Constant(Constant), Implicit(false), Scope(nullptr) {} + : ArgNo(ArgNo), Constant(Constant), Implicit(false), + isDenseMapSingleton(0), Scope(nullptr) {} SILDebugVariable(StringRef Name, bool Constant, unsigned ArgNo, bool IsImplicit = false, Optional AuxType = {}, Optional DeclLoc = {}, const SILDebugScope *DeclScope = nullptr, llvm::ArrayRef ExprElements = {}) : Name(Name), ArgNo(ArgNo), Constant(Constant), Implicit(IsImplicit), - Type(AuxType), Loc(DeclLoc), Scope(DeclScope), DIExpr(ExprElements) {} + isDenseMapSingleton(0), Type(AuxType), Loc(DeclLoc), Scope(DeclScope), + DIExpr(ExprElements) {} /// Created from either AllocStack or AllocBox instruction static Optional createFromAllocation(const AllocationInst *AI); - bool operator==(const SILDebugVariable &V) { + // We're not comparing DIExpr here because strictly speaking, + // DIExpr is not part of the debug variable. We simply piggyback + // it in this class so that's it's easier to carry DIExpr around. + bool operator==(const SILDebugVariable &V) const { return ArgNo == V.ArgNo && Constant == V.Constant && Name == V.Name && Implicit == V.Implicit && Type == V.Type && Loc == V.Loc && - Scope == V.Scope && DIExpr == V.DIExpr; + Scope == V.Scope && isDenseMapSingleton == V.isDenseMapSingleton && + DIExpr == V.DIExpr; } SILDebugVariable withoutDIExpr() const { @@ -73,6 +97,13 @@ struct SILDebugVariable { bool isVar() const { return Name.size() && !Constant; } }; +/// Returns the hashcode for the new projection path. +inline llvm::hash_code hash_value(const SILDebugVariable &P) { + return llvm::hash_combine(P.ArgNo, P.Constant, P.Name, P.Implicit, + P.isDenseMapSingleton, P.Type, P.Loc, P.Scope, + P.DIExpr); +} + } // namespace swift #endif diff --git a/include/swift/SIL/SILInstruction.h b/include/swift/SIL/SILInstruction.h index ab4fc6fe38176..29dac91d93ff6 100644 --- a/include/swift/SIL/SILInstruction.h +++ b/include/swift/SIL/SILInstruction.h @@ -9863,6 +9863,19 @@ struct ilist_traits<::swift::SILInstruction> : void createNode(const SILInstruction &); }; +template <> +struct DenseMapInfo { + using KeyTy = swift::SILDebugVariable; + static inline KeyTy getEmptyKey() { + return KeyTy(KeyTy::IsDenseMapSingleton::IsEmpty); + } + static inline KeyTy getTombstoneKey() { + return KeyTy(KeyTy::IsDenseMapSingleton::IsTombstone); + } + static unsigned getHashValue(const KeyTy &Val) { return hash_value(Val); } + static bool isEqual(const KeyTy &LHS, const KeyTy &RHS) { return LHS == RHS; } +}; + } // end llvm namespace #endif diff --git a/include/swift/SIL/SILLocation.h b/include/swift/SIL/SILLocation.h index 6f4fe02bfc694..180b368a33cfc 100644 --- a/include/swift/SIL/SILLocation.h +++ b/include/swift/SIL/SILLocation.h @@ -72,8 +72,10 @@ class SILLocation { return line == rhs.line && column == rhs.column && filename.equals(rhs.filename); } + void dump() const; void print(raw_ostream &OS) const; + friend llvm::hash_code hash_value(const FilenameAndLocation &); }; protected: @@ -431,8 +433,19 @@ class SILLocation { } inline bool operator!=(const SILLocation &R) const { return !(*this == R); } + + friend llvm::hash_code hash_value(const SILLocation &); }; +inline llvm::hash_code hash_value(const SILLocation &R) { + return llvm::hash_combine(R.kindAndFlags.packedKindAndFlags, + *R.storage.filePositionLoc); +} + +inline llvm::hash_code hash_value(const SILLocation::FilenameAndLocation &R) { + return llvm::hash_combine(R.line, R.column, R.filename); +} + /// Allowed on any instruction. class RegularLocation : public SILLocation { public: diff --git a/include/swift/SILOptimizer/PassManager/Passes.def b/include/swift/SILOptimizer/PassManager/Passes.def index 1b98211ba9689..9c29da7b09a6d 100644 --- a/include/swift/SILOptimizer/PassManager/Passes.def +++ b/include/swift/SILOptimizer/PassManager/Passes.def @@ -436,6 +436,8 @@ PASS(MoveKillsCopyableAddressesChecker, "sil-move-kills-copyable-addresses-check PASS(MoveFunctionCanonicalization, "sil-move-function-canon", "Pass that canonicalizes certain parts of the IR before we perform move " "function checking.") +PASS(DebugInfoCanonicalizer, "sil-onone-debuginfo-canonicalizer", + "Canonicalize debug info at -Onone by propagating debug info into coroutine funclets") PASS(PruneVTables, "prune-vtables", "Mark class methods that do not require vtable dispatch") PASS_RANGE(AllPasses, AADumper, PruneVTables) diff --git a/lib/SILOptimizer/Mandatory/CMakeLists.txt b/lib/SILOptimizer/Mandatory/CMakeLists.txt index 927259804c56d..97b7e28b571a7 100644 --- a/lib/SILOptimizer/Mandatory/CMakeLists.txt +++ b/lib/SILOptimizer/Mandatory/CMakeLists.txt @@ -5,6 +5,7 @@ target_sources(swiftSILOptimizer PRIVATE CapturePromotion.cpp ClosureLifetimeFixup.cpp ConstantPropagation.cpp + DebugInfoCanonicalizer.cpp DefiniteInitialization.cpp DIMemoryUseCollector.cpp DataflowDiagnostics.cpp diff --git a/lib/SILOptimizer/Mandatory/DebugInfoCanonicalizer.cpp b/lib/SILOptimizer/Mandatory/DebugInfoCanonicalizer.cpp new file mode 100644 index 0000000000000..8fdc0fcbd227b --- /dev/null +++ b/lib/SILOptimizer/Mandatory/DebugInfoCanonicalizer.cpp @@ -0,0 +1,343 @@ +//===--- DebugInfoCanonicalizer.cpp ---------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2021 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +/// +/// This file contains transformations that propagate debug info at the SIL +/// level to make IRGen's job easier. The specific transformations that we +/// perform is that we clone dominating debug_value for a specific +/// SILDebugVariable after all coroutine-func-let boundary instructions. This in +/// practice this as an algorithm works as follows: +/// +/// 1. We walk the CFG along successors. By doing this we guarantee that we +/// visit +/// blocks after their dominators. +/// +/// 2. When we visit a block, we walk the block from start->end. During this +/// walk: +/// +/// a. We grab a new block state from the centralized block->blockState map. +/// This +/// state is a [SILDebugVariable : DebugValueInst]. +/// +/// b. If we see a debug_value, we map blockState[debug_value.getDbgVar()] = +/// debug_value. This ensures that when we get to the bottom of the block, +/// we have pairs of SILDebugVariable + last debug_value on it. +/// +/// c. If we see any coroutine funclet boundaries, we clone the current +/// tracked +/// set of our block state and then walk up the dom tree dumping in each +/// block any debug_value with a SILDebugVariable that we have not already +/// dumped. This is maintained by using a visited set of SILDebugVariable +/// for each funclet boundary. +/// +/// The end result is that at the beginning of each funclet we will basically +/// declare the debug info for an addr. +/// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "sil-onone-debuginfo-canonicalizer" + +#include "swift/Basic/Defer.h" +#include "swift/Basic/FrozenMultiMap.h" +#include "swift/SIL/ApplySite.h" +#include "swift/SIL/BasicBlockBits.h" +#include "swift/SIL/BasicBlockDatastructures.h" +#include "swift/SIL/SILBuilder.h" +#include "swift/SIL/SILInstruction.h" +#include "swift/SIL/SILUndef.h" +#include "swift/SILOptimizer/Analysis/DominanceAnalysis.h" +#include "swift/SILOptimizer/Analysis/PostOrderAnalysis.h" +#include "swift/SILOptimizer/PassManager/Passes.h" +#include "swift/SILOptimizer/PassManager/Transforms.h" +#include "swift/SILOptimizer/Utils/CFGOptUtils.h" +#include "llvm/ADT/MapVector.h" +#include "llvm/ADT/SmallBitVector.h" +#include "llvm/ADT/SmallSet.h" + +using namespace swift; + +//===----------------------------------------------------------------------===// +// Utility +//===----------------------------------------------------------------------===// + +static SILInstruction *cloneDebugValue(DebugValueInst *original, + SILInstruction *insertPt) { + SILBuilderWithScope builder(std::next(insertPt->getIterator())); + builder.setCurrentDebugScope(original->getDebugScope()); + return builder.createDebugValue(original->getLoc(), original->getOperand(), + *original->getVarInfo(), false, + true /*was moved*/); +} + +static SILInstruction *cloneDebugValue(DebugValueInst *original, + SILBasicBlock *block) { + SILBuilderWithScope builder(&block->front()); + builder.setCurrentDebugScope(original->getDebugScope()); + return builder.createDebugValue(original->getLoc(), original->getOperand(), + *original->getVarInfo(), false, + true /*was moved*/); +} + +//===----------------------------------------------------------------------===// +// Implementation +//===----------------------------------------------------------------------===// + +namespace { + +struct BlockState { + llvm::SmallMapVector debugValues; +}; + +struct DebugInfoCanonicalizer { + SILFunction *fn; + DominanceAnalysis *da; + DominanceInfo *dt; + llvm::MapVector blockToBlockState; + + DebugInfoCanonicalizer(SILFunction *fn, DominanceAnalysis *da) + : fn(fn), da(da), dt(nullptr) {} + + // We only need the dominance info if we actually see a funclet boundary. So + // make this lazy so we only create the dom tree in functions that actually + // use coroutines. + DominanceInfo *getDominance() { + if (!dt) + dt = da->get(fn); + return dt; + } + + bool process(); + + /// NOTE: insertPt->getParent() may not equal startBlock! This is b/c if we + /// are propagating from a yield, we want to begin in the yields block, not + /// the yield's insertion point successor block. + bool propagateDebugValuesFromDominators( + PointerUnion insertPt, + SILBasicBlock *startBlock, + llvm::SmallDenseSet &seenDebugVars) { + LLVM_DEBUG(llvm::dbgs() << "==> PROPAGATING VALUE\n"); + if (insertPt.is()) { + LLVM_DEBUG(llvm::dbgs() << "Inst: " << *insertPt.get()); + } + + auto *dt = getDominance(); + auto *domTreeNode = dt->getNode(startBlock); + auto *rootNode = dt->getRootNode(); + if (domTreeNode == rootNode) { + LLVM_DEBUG(llvm::dbgs() << "Root node! Nothing to propagate!\n"); + return false; + } + + LLVM_DEBUG(llvm::dbgs() + << "Root Node: " << rootNode->getBlock()->getDebugID() << '\n'); + + // We already emitted in our caller all debug_value needed from the block we + // were processing. We just need to walk up the dominator tree until we + // process the root node. + bool madeChange = false; + do { + domTreeNode = domTreeNode->getIDom(); + LLVM_DEBUG(llvm::dbgs() << "Visiting idom: " + << domTreeNode->getBlock()->getDebugID() << '\n'); + auto &domBlockState = blockToBlockState[domTreeNode->getBlock()]; + for (auto &pred : domBlockState.debugValues) { + // If we see a nullptr, we had a SILUndef. Do not clone, but mark this + // as a debug var we have seen so if it is again defined in previous + // blocks, we don't clone. + if (!pred.second) { + seenDebugVars.insert(pred.first); + continue; + } + + LLVM_DEBUG(llvm::dbgs() << "Has DebugValue: " << *pred.second); + + // If we have already inserted something for this debug_value, + // continue. + if (!seenDebugVars.insert(pred.first).second) { + LLVM_DEBUG(llvm::dbgs() << "Already seen this one... skipping!\n"); + continue; + } + + // Otherwise do the clone. + LLVM_DEBUG(llvm::dbgs() << "Haven't seen this one... cloning!\n"); + if (auto *inst = insertPt.dyn_cast()) { + cloneDebugValue(pred.second, inst); + } else { + cloneDebugValue(pred.second, insertPt.get()); + } + + madeChange = true; + } + } while (domTreeNode != rootNode); + + return madeChange; + } +}; + +} // namespace + +bool DebugInfoCanonicalizer::process() { + bool madeChange = false; + + // We walk along successor edges depth first. This guarantees that we will + // visit any dominator of a specific block before we visit that block since + // any path to the block along successors by definition of dominators we must + // go through all such dominators. + BasicBlockWorklist worklist(&*fn->begin()); + llvm::SmallDenseSet seenDebugVars; + + while (auto *block = worklist.pop()) { + LLVM_DEBUG(llvm::dbgs() + << "BB: Visiting. bb" << block->getDebugID() << '\n'); + auto &state = blockToBlockState[block]; + + // Then for each inst in the block... + for (auto &inst : *block) { + LLVM_DEBUG(llvm::dbgs() << " Inst: " << inst); + // If we have a debug_value that was moved, store state for it. + if (auto *dvi = dyn_cast(&inst)) { + if (!dvi->getWasMoved()) + continue; + + LLVM_DEBUG(llvm::dbgs() << " Found DebugValueInst!\n"); + auto debugInfo = dvi->getVarInfo(); + if (!debugInfo) { + LLVM_DEBUG(llvm::dbgs() << " Has no var info?! Skipping!\n"); + continue; + } + + // If we have a SILUndef, mark this debug info as being mapped to + // nullptr. + if (isa(dvi->getOperand())) { + LLVM_DEBUG(llvm::dbgs() << " SILUndef.\n"); + auto iter = state.debugValues.insert({*debugInfo, nullptr}); + if (!iter.second) + iter.first->second = nullptr; + continue; + } + + // Otherwise, we may have a new debug_value to track. Try to begin + // tracking it... + auto iter = state.debugValues.insert({*debugInfo, dvi}); + + // If we already have one, we failed to insert... So update the iter + // by hand. We track the last instance always. + if (!iter.second) { + iter.first->second = dvi; + } + LLVM_DEBUG(llvm::dbgs() << " ==> Updated Map.\n"); + continue; + } + + // Otherwise, check if we have a coroutine boundary non-terminator + // instruction. If we do, we just dump the relevant debug_value right + // afterwards. + auto shouldHandleNonTermInst = [](SILInstruction *inst) -> bool { + // This handles begin_apply. + if (auto fas = FullApplySite::isa(inst)) { + if (fas.beginsCoroutineEvaluation() || fas.isAsync()) + return true; + } + if (isa(inst)) + return true; + if (isa(inst) || isa(inst)) + return true; + return false; + }; + if (shouldHandleNonTermInst(&inst)) { + LLVM_DEBUG(llvm::dbgs() << " Found apply edge!.\n"); + // Clone all of the debug_values that we are currently tracking both + // after the begin_apply, + SWIFT_DEFER { seenDebugVars.clear(); }; + + for (auto &pred : state.debugValues) { + // If we found a SILUndef, mark this debug var as seen but do not + // clone. + if (!pred.second) { + seenDebugVars.insert(pred.first); + continue; + } + + cloneDebugValue(pred.second, &inst); + // Inside our block, we know that we do not have any repeats since we + // always track the last debug var. + seenDebugVars.insert(pred.first); + madeChange = true; + } + + // Then walk up the idoms until we reach the entry searching for + // seenDebugVars. + madeChange |= propagateDebugValuesFromDominators( + &inst, inst.getParent(), seenDebugVars); + continue; + } + + // Otherwise, we have a yield. We handle this separately since we need to + // insert the debug_value into its successor blocks. + if (auto *yi = dyn_cast(&inst)) { + LLVM_DEBUG(llvm::dbgs() << " Found Yield: " << *yi); + + SWIFT_DEFER { seenDebugVars.clear(); }; + + // Duplicate all of our tracked debug values into our successor + // blocks. + for (auto *succBlock : yi->getSuccessorBlocks()) { + LLVM_DEBUG(llvm::dbgs() << " Visiting Succ: bb" + << succBlock->getDebugID() << '\n'); + for (auto &pred : state.debugValues) { + if (!pred.second) + continue; + LLVM_DEBUG(llvm::dbgs() << " Cloning: " << *pred.second); + cloneDebugValue(pred.second, succBlock); + madeChange = true; + } + + // We start out dataflow in yi, not in inst, even though we use inst + // as the insert pt. This is b/c inst is in the successor block we + // haven't processed yet so we would emit any debug_value in the + // yields own block twice. + madeChange |= propagateDebugValuesFromDominators( + succBlock, yi->getParent(), seenDebugVars); + } + } + } + + // Now add the block's successor to the worklist if we haven't visited them + // yet. + for (auto *succBlock : block->getSuccessorBlocks()) + worklist.pushIfNotVisited(succBlock); + } + + return madeChange; +} + +//===----------------------------------------------------------------------===// +// Top Level Entrypoint +//===----------------------------------------------------------------------===// + +namespace { + +class DebugInfoCanonicalizerTransform : public SILFunctionTransform { + void run() override { + DebugInfoCanonicalizer canonicalizer(getFunction(), + getAnalysis()); + if (canonicalizer.process()) { + invalidateAnalysis( + SILAnalysis::InvalidationKind::BranchesAndInstructions); + } + } +}; + +} // end anonymous namespace + +SILTransform *swift::createDebugInfoCanonicalizer() { + return new DebugInfoCanonicalizerTransform(); +} diff --git a/lib/SILOptimizer/PassManager/PassPipeline.cpp b/lib/SILOptimizer/PassManager/PassPipeline.cpp index 13eb1599d5601..137eef753ec29 100644 --- a/lib/SILOptimizer/PassManager/PassPipeline.cpp +++ b/lib/SILOptimizer/PassManager/PassPipeline.cpp @@ -19,11 +19,12 @@ /// //===----------------------------------------------------------------------===// -#include "swift/AST/SILOptions.h" #define DEBUG_TYPE "sil-passpipeline-plan" + #include "swift/SILOptimizer/PassManager/PassPipeline.h" #include "swift/AST/ASTContext.h" #include "swift/AST/Module.h" +#include "swift/AST/SILOptions.h" #include "swift/SIL/SILModule.h" #include "swift/SILOptimizer/Analysis/Analysis.h" #include "swift/SILOptimizer/PassManager/Passes.h" @@ -925,6 +926,9 @@ SILPassPipelinePlan::getOnonePassPipeline(const SILOptions &Options) { P.startPipeline("Serialization"); P.addSerializeSILPass(); + // Fix up debug info by propagating dbg_values. + P.addDebugInfoCanonicalizer(); + // Now strip any transparent functions that still have ownership. P.addOwnershipModelEliminator(); diff --git a/test/SILOptimizer/debuginfo_canonicalizer.sil b/test/SILOptimizer/debuginfo_canonicalizer.sil new file mode 100644 index 0000000000000..06a59faa104ff --- /dev/null +++ b/test/SILOptimizer/debuginfo_canonicalizer.sil @@ -0,0 +1,137 @@ +// RUN: %target-sil-opt -sil-onone-debuginfo-canonicalizer -enable-sil-verify-all %s 2>&1 | %FileCheck %s + +sil_stage canonical + +import Builtin + +struct Int64 { + var value: Builtin.Int64 +} + +class Klass { + var value: Int64 +} + +// Since we only take the last debug_value associated with a SILDebugVariable, +// we only should see someVar for debug_value %2. +// +// CHECK-LABEL: sil @yieldOnceCoroutine : $@yield_once @convention(method) (@guaranteed Klass) -> @yields @inout Int64 { +// CHECK: bb0([[ARG:%.*]] : $Klass): +// CHECK-NEXT: debug_value [moved] [[ARG]] : $Klass, let, name "someVar" +// CHECK-NEXT: [[ADDR:%.*]] = ref_element_addr [[ARG]] : $Klass +// CHECK-NEXT: yield [[ADDR]] : $*Int64, resume bb1, unwind bb2 +// +// CHECK: bb1: +// CHECK-NEXT: debug_value [moved] [[ARG]] : $Klass, let, name "someVar" +// CHECK-NEXT: tuple +// CHECK-NEXT: return +// +// CHECK: bb2: +// CHECK-NEXT: debug_value [moved] [[ARG]] : $Klass, let, name "someVar" +// CHECK-NEXT: unwind +// CHECK: } // end sil function 'yieldOnceCoroutine' +sil @yieldOnceCoroutine : $@yield_once @convention(method) (@guaranteed Klass) -> @yields @inout Int64 { +bb0(%0 : $Klass): + debug_value [moved] %0 : $Klass, let, name "someVar" + %1 = ref_element_addr %0 : $Klass, #Klass.value + yield %1 : $*Int64, resume bb1, unwind bb2 + +bb1: + %9999 = tuple() + return %9999 : $() + +bb2: + unwind +} + +// CHECK-LABEL: sil @testSimple : $@convention(thin) (@guaranteed Klass) -> () { +// CHECK: bb0([[ARG:%.*]] : +// CHECK: debug_value [moved] [[ARG]] +// CHECK: begin_apply +// CHECK-NEXT: debug_value [moved] [[ARG]] +// CHECK: end_apply +// CHECK-NEXT: debug_value [moved] [[ARG]] +// CHECK: } // end sil function 'testSimple' +sil @testSimple : $@convention(thin) (@guaranteed Klass) -> () { +bb0(%0 : $Klass): + debug_value [moved] %0 : $Klass, let, name "arg" + %f = function_ref @yieldOnceCoroutine : $@yield_once @convention(method) (@guaranteed Klass) -> @yields @inout Int64 + (%3, %4) = begin_apply %f(%0) : $@yield_once @convention(method) (@guaranteed Klass) -> @yields @inout Int64 + %9999 = tuple() + end_apply %4 + return %9999 : $() +} + +// CHECK-LABEL: sil @testDiamond : $@convention(thin) (@guaranteed Klass) -> () { +// CHECK: bb0([[ARG:%.*]] : +// CHECK: debug_value [moved] [[ARG]] +// CHECK: begin_apply +// CHECK-NEXT: debug_value [moved] [[ARG]] +// CHECK-NEXT: cond_br undef, [[BB_LHS:bb[0-9]+]], [[BB_RHS:bb[0-9]+]] +// +// CHECK: [[BB_LHS]]: +// CHECK: end_apply +// CHECK-NEXT: debug_value [moved] [[ARG]] +// CHECK-NEXT: br [[BB_CONT:bb[0-9]+]] +// +// CHECK: [[BB_RHS]]: +// CHECK: abort_apply +// CHECK-NEXT: debug_value [moved] [[ARG]] +// CHECK: } // end sil function 'testDiamond' +sil @testDiamond : $@convention(thin) (@guaranteed Klass) -> () { +bb0(%0 : $Klass): + debug_value [moved] %0 : $Klass, let, name "arg" + %f = function_ref @yieldOnceCoroutine : $@yield_once @convention(method) (@guaranteed Klass) -> @yields @inout Int64 + (%3, %token) = begin_apply %f(%0) : $@yield_once @convention(method) (@guaranteed Klass) -> @yields @inout Int64 + cond_br undef, bb1, bb2 + +bb1: + end_apply %token + br bb3 + +bb2: + abort_apply %token + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil @testUndefDiamond : $@convention(thin) (@guaranteed Klass) -> () { +// CHECK: bb0([[ARG:%.*]] : +// CHECK: debug_value [moved] [[ARG]] +// CHECK: begin_apply +// CHECK-NEXT: debug_value [moved] [[ARG]] +// CHECK-NEXT: cond_br undef, [[BB_LHS:bb[0-9]+]], [[BB_RHS:bb[0-9]+]] +// +// CHECK: [[BB_LHS]]: +// CHECK: end_apply +// CHECK-NEXT: debug_value [moved] [[ARG]] +// CHECK-NEXT: br [[BB_CONT:bb[0-9]+]] +// +// CHECK: [[BB_RHS]]: +// CHECK-NEXT: debug_value [moved] undef +// CHECK-NEXT: abort_apply +// CHECK-NEXT: br [[BB_CONT]] +// CHECK: } // end sil function 'testUndefDiamond' +sil @testUndefDiamond : $@convention(thin) (@guaranteed Klass) -> () { +bb0(%0 : $Klass): + debug_value [moved] %0 : $Klass, let, name "arg" + %f = function_ref @yieldOnceCoroutine : $@yield_once @convention(method) (@guaranteed Klass) -> @yields @inout Int64 + (%3, %token) = begin_apply %f(%0) : $@yield_once @convention(method) (@guaranteed Klass) -> @yields @inout Int64 + cond_br undef, bb1, bb2 + +bb1: + end_apply %token + br bb3 + +bb2: + debug_value [moved] undef : $Klass, let, name "arg" + abort_apply %token + br bb3 + +bb3: + %9999 = tuple() + return %9999 : $() +} diff --git a/test/sil-passpipeline-dump/basic.test-sh b/test/sil-passpipeline-dump/basic.test-sh index 1aab22a8fce32..b2f196fd0b4da 100644 --- a/test/sil-passpipeline-dump/basic.test-sh +++ b/test/sil-passpipeline-dump/basic.test-sh @@ -6,7 +6,8 @@ // CHECK: "mandatory-arc-opts" ] // CHECK: --- // CHECK: name: Serialization -// CHECK: passes: [ "serialize-sil", "ownership-model-eliminator" ] +// CHECK: passes: [ "serialize-sil", "sil-onone-debuginfo-canonicalizer", +// CHECK-NEXT: "ownership-model-eliminator" ] // CHECK: --- // CHECK: name: Rest of Onone // CHECK: passes: [ "use-prespecialized", "onone-prespecializer", "sil-debuginfo-gen" ] From e1c4e1045f58701383e6da44508470c0f50167d4 Mon Sep 17 00:00:00 2001 From: Michael Gottesman Date: Fri, 25 Feb 2022 14:18:09 -0800 Subject: [PATCH 32/88] [move-function] Update the behavior for debug intrinsics on values in the coroutine frame. This uses the previous simple dominance dbg info propagation implementation in the previous commit. I fix in the next commit the debug info for the last move/reinit in the var test. --- lib/IRGen/IRGenDebugInfo.cpp | 163 +++++++++++++---- .../move_function_dbginfo_async.swift | 171 ++++++++++++++++++ 2 files changed, 295 insertions(+), 39 deletions(-) create mode 100644 test/DebugInfo/move_function_dbginfo_async.swift diff --git a/lib/IRGen/IRGenDebugInfo.cpp b/lib/IRGen/IRGenDebugInfo.cpp index 1e2b0f844da32..b09b420a8f58f 100644 --- a/lib/IRGen/IRGenDebugInfo.cpp +++ b/lib/IRGen/IRGenDebugInfo.cpp @@ -15,10 +15,12 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "debug-info" + #include "IRGenDebugInfo.h" #include "GenOpaque.h" #include "GenStruct.h" #include "GenType.h" +#include "IRBuilder.h" #include "swift/AST/ASTMangler.h" #include "swift/AST/Expr.h" #include "swift/AST/GenericEnvironment.h" @@ -49,6 +51,7 @@ #include "clang/Serialization/ASTReader.h" #include "llvm/ADT/StringSet.h" #include "llvm/Config/config.h" +#include "llvm/IR/Constants.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/IntrinsicInst.h" @@ -2655,6 +2658,67 @@ void IRGenDebugInfoImpl::emitVariableDeclaration( } } +namespace { + +/// A helper struct that is used by emitDbgIntrinsic to factor redundant code. +struct DbgIntrinsicEmitter { + PointerUnion InsertPt; + irgen::IRBuilder &IRBuilder; + llvm::DIBuilder &DIBuilder; + AddrDbgInstrKind ForceDbgDeclare; + + /// Initialize the emitter and initialize the emitter to assume that it is + /// going to insert an llvm.dbg.declare or an llvm.dbg.addr either at the + /// current "generalized insertion point" of the IRBuilder. The "generalized + /// insertion point" is + DbgIntrinsicEmitter(irgen::IRBuilder &IRBuilder, llvm::DIBuilder &DIBuilder, + AddrDbgInstrKind ForceDebugDeclare) + : InsertPt(), IRBuilder(IRBuilder), DIBuilder(DIBuilder), + ForceDbgDeclare(ForceDebugDeclare) { + auto *ParentBB = IRBuilder.GetInsertBlock(); + auto InsertBefore = IRBuilder.GetInsertPoint(); + + if (InsertBefore != ParentBB->end()) + InsertPt = &*InsertBefore; + else + InsertPt = ParentBB; + } + + /// + + llvm::Instruction *insert(llvm::Value *Addr, llvm::DILocalVariable *VarInfo, + llvm::DIExpression *Expr, + const llvm::DILocation *DL) { + if (auto *Inst = InsertPt.dyn_cast()) { + return insert(Addr, VarInfo, Expr, DL, Inst); + } else { + return insert(Addr, VarInfo, Expr, DL, + InsertPt.get()); + } + } + + llvm::Instruction *insert(llvm::Value *Addr, llvm::DILocalVariable *VarInfo, + llvm::DIExpression *Expr, + const llvm::DILocation *DL, + llvm::Instruction *InsertBefore) { + if (ForceDbgDeclare == AddrDbgInstrKind::DbgDeclare) + return DIBuilder.insertDeclare(Addr, VarInfo, Expr, DL, InsertBefore); + return DIBuilder.insertDbgAddrIntrinsic(Addr, VarInfo, Expr, DL, + InsertBefore); + } + + llvm::Instruction *insert(llvm::Value *Addr, llvm::DILocalVariable *VarInfo, + llvm::DIExpression *Expr, + const llvm::DILocation *DL, + llvm::BasicBlock *Block) { + if (ForceDbgDeclare == AddrDbgInstrKind::DbgDeclare) + return DIBuilder.insertDeclare(Addr, VarInfo, Expr, DL, Block); + return DIBuilder.insertDbgAddrIntrinsic(Addr, VarInfo, Expr, DL, Block); + } +}; + +} // namespace + void IRGenDebugInfoImpl::emitDbgIntrinsic( IRBuilder &Builder, llvm::Value *Storage, llvm::DILocalVariable *Var, llvm::DIExpression *Expr, unsigned Line, unsigned Col, @@ -2687,73 +2751,94 @@ void IRGenDebugInfoImpl::emitDbgIntrinsic( } } - struct DbgInserter { - llvm::DIBuilder &builder; - AddrDbgInstrKind forceDbgDeclare; + auto *ParentBlock = Builder.GetInsertBlock(); - llvm::Instruction *insert(llvm::Value *Addr, llvm::DILocalVariable *VarInfo, - llvm::DIExpression *Expr, - const llvm::DILocation *DL, - llvm::Instruction *InsertBefore) { - if (forceDbgDeclare == AddrDbgInstrKind::DbgDeclare) - return builder.insertDeclare(Addr, VarInfo, Expr, DL, InsertBefore); - return builder.insertDbgAddrIntrinsic(Addr, VarInfo, Expr, DL, - InsertBefore); - } + // First before we do anything, check if we have an Undef. In this case, we + // /always/ emit an llvm.dbg.value of undef. + // If we have undef, always emit a llvm.dbg.value in the current position. + if (isa(Storage)) { + DBuilder.insertDbgValueIntrinsic(Storage, Var, Expr, DL, ParentBlock); + return; + } - llvm::Instruction *insert(llvm::Value *Addr, llvm::DILocalVariable *VarInfo, - llvm::DIExpression *Expr, - const llvm::DILocation *DL, - llvm::BasicBlock *Block) { - if (forceDbgDeclare == AddrDbgInstrKind::DbgDeclare) - return builder.insertDeclare(Addr, VarInfo, Expr, DL, Block); - return builder.insertDbgAddrIntrinsic(Addr, VarInfo, Expr, DL, Block); - } - }; - DbgInserter inserter{DBuilder, AddrDInstKind}; + DbgIntrinsicEmitter inserter{Builder, DBuilder, AddrDInstKind}; // If we have a single alloca... if (auto *Alloca = dyn_cast(Storage)) { - auto *ParentBB = Builder.GetInsertBlock(); auto InsertBefore = Builder.GetInsertPoint(); if (AddrDInstKind == AddrDbgInstrKind::DbgDeclare) { - ParentBB = Alloca->getParent(); + ParentBlock = Alloca->getParent(); InsertBefore = std::next(Alloca->getIterator()); } - if (InsertBefore != ParentBB->end()) { + if (InsertBefore != ParentBlock->end()) { inserter.insert(Alloca, Var, Expr, DL, &*InsertBefore); } else { - inserter.insert(Alloca, Var, Expr, DL, ParentBB); + inserter.insert(Alloca, Var, Expr, DL, ParentBlock); } return; } - auto *BB = Builder.GetInsertBlock(); if ((isa(Storage) && cast(Storage)->getIntrinsicID() == llvm::Intrinsic::coro_alloca_get)) { - inserter.insert(Storage, Var, Expr, DL, BB); + inserter.insert(Storage, Var, Expr, DL, ParentBlock); return; } if (InCoroContext) { - // Function arguments in async functions are emitted without a shadow copy - // (that would interfer with coroutine splitting) but with a dbg.declare to - // give CoroSplit.cpp license to emit a shadow copy for them pointing inside - // the Swift Context argument that is valid throughout the function. - auto &EntryBlock = BB->getParent()->getEntryBlock(); - if (auto *InsertBefore = &*EntryBlock.getFirstInsertionPt()) + PointerUnion InsertPt; + + // If we have a dbg.declare, we are relying on a contract with the coroutine + // splitter that in split coroutines we always create debug info for values + // in the coroutine context by creating a llvm.dbg.declare for the variable + // in the entry block of each funclet. + if (AddrDInstKind == AddrDbgInstrKind::DbgDeclare) { + // Function arguments in async functions are emitted without a shadow copy + // (that would interfer with coroutine splitting) but with a + // llvm.dbg.declare to give CoroSplit.cpp license to emit a shadow copy + // for them pointing inside the Swift Context argument that is valid + // throughout the function. + auto &EntryBlock = ParentBlock->getParent()->getEntryBlock(); + if (auto *InsertBefore = &*EntryBlock.getFirstInsertionPt()) { + InsertPt = InsertBefore; + } else { + InsertPt = &EntryBlock; + } + } else { + // For llvm.dbg.addr, we just want to insert the intrinsic at the current + // insertion point. This is because our contract with the coroutine + // splitter is that the coroutine splitter just needs to emit the + // llvm.dbg.addr where we placed them. It shouldn't move them or do + // anything special with it. Instead, we have previously inserted extra + // debug_value clones previously after each instruction at the SIL level + // that corresponds with a funclet edge. This operation effectively sets + // up the rest of the pipeline to be stupid and just emit the + // llvm.dbg.addr in the correct places. This is done by the SILOptimizer + // pass DebugInfoCanonicalizer. + auto InsertBefore = Builder.GetInsertPoint(); + if (InsertBefore != ParentBlock->end()) { + InsertPt = &*InsertBefore; + } else { + InsertPt = ParentBlock; + } + } + + // Ok, we now have our insert pt. Call the appropriate operations. + assert(InsertPt); + if (auto *InsertBefore = InsertPt.dyn_cast()) { inserter.insert(Storage, Var, Expr, DL, InsertBefore); - else - inserter.insert(Storage, Var, Expr, DL, &EntryBlock); + } else { + inserter.insert(Storage, Var, Expr, DL, + InsertPt.get()); + } return; } // Insert a dbg.value at the current insertion point. if (isa(Storage) && !Var->getArg() && - BB->getFirstNonPHIOrDbg()) + ParentBlock->getFirstNonPHIOrDbg()) // SelectionDAGISel only generates debug info for a dbg.value // that is associated with a llvm::Argument if either its !DIVariable // is marked as argument or there is no non-debug intrinsic instruction @@ -2762,9 +2847,9 @@ void IRGenDebugInfoImpl::emitDbgIntrinsic( // need to make sure that dbg.value is before any non-phi / no-dbg // instruction. DBuilder.insertDbgValueIntrinsic(Storage, Var, Expr, DL, - BB->getFirstNonPHIOrDbg()); + ParentBlock->getFirstNonPHIOrDbg()); else - DBuilder.insertDbgValueIntrinsic(Storage, Var, Expr, DL, BB); + DBuilder.insertDbgValueIntrinsic(Storage, Var, Expr, DL, ParentBlock); } void IRGenDebugInfoImpl::emitGlobalVariableDeclaration( diff --git a/test/DebugInfo/move_function_dbginfo_async.swift b/test/DebugInfo/move_function_dbginfo_async.swift new file mode 100644 index 0000000000000..3aad3a48dca06 --- /dev/null +++ b/test/DebugInfo/move_function_dbginfo_async.swift @@ -0,0 +1,171 @@ +// RUN: %empty-directory(%t) +// RUN: %target-swift-frontend -parse-as-library -disable-availability-checking -Xllvm -sil-disable-pass=alloc-stack-hoisting -g -emit-ir -o - %s | %FileCheck %s +// RUN: %target-swift-frontend -parse-as-library -disable-availability-checking -Xllvm -sil-disable-pass=alloc-stack-hoisting -g -c %s -o %t/out.o +// RUN: %llvm-dwarfdump --show-children %t/out.o | %FileCheck -check-prefix=DWARF %s + +// This test checks that: +// +// 1. At the IR level, we insert the appropriate llvm.dbg.addr, llvm.dbg.value. +// +// 2. At the Dwarf that we have proper locations with PC validity ranges where +// the value isn't valid. + +// We only run this on macOS right now since we would need to pattern match +// slightly differently on other platforms. +// REQUIRES: OS=macosx +// REQUIRES: CPU=x86_64 +// REQUIRES: optimized_stdlib + +////////////////// +// Declarations // +////////////////// + +public class Klass { + public func doSomething() {} +} + +public protocol P { + static var value: P { get } + func doSomething() +} + +public var trueValue: Bool { true } +public var falseValue: Bool { false } + +public func use(_ t: T) {} +public func forceSplit() async {} + +/////////// +// Tests // +/////////// + +// CHECK-LABEL: define swifttailcc void @"$s27move_function_dbginfo_async13letSimpleTestyyxnYalF"(%swift.context* swiftasync %0, %swift.opaque* noalias %1, %swift.type* %T) +// CHECK: entry: +// CHECK: call void @llvm.dbg.addr(metadata %swift.opaque** %msg.debug, metadata ![[SIMPLE_TEST_METADATA:[0-9]+]], metadata !DIExpression(DW_OP_deref)), !dbg ![[ADDR_LOC:[0-9]+]] +// CHECK: musttail call swifttailcc void +// CHECK-NEXT: ret void + +// CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async13letSimpleTestyyxnYalFTQ0_"(i8* swiftasync %0) +// CHECK: entryresume.0: +// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata ![[SIMPLE_TEST_METADATA_2:[0-9]+]], metadata !DIExpression(DW_OP_deref, DW_OP_plus_uconst, 24, DW_OP_plus_uconst, 8, DW_OP_deref)), +// CHECK: musttail call swifttailcc void +// CHECK-NEXT: ret void +// +// CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async13letSimpleTestyyxnYalFTY1_"(i8* swiftasync %0) +// CHECK: entryresume.1: +// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata ![[SIMPLE_TEST_METADATA_3:[0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 24, DW_OP_plus_uconst, 8, DW_OP_deref)), !dbg ![[ADDR_LOC:[0-9]+]] +// CHECK: call void @llvm.dbg.value(metadata %swift.opaque* undef, metadata ![[SIMPLE_TEST_METADATA_3]], metadata !DIExpression(DW_OP_deref)), !dbg ![[ADDR_LOC]] +// CHECK: musttail call swifttailcc void +// CHECK-NEXT: ret void + +// DWARF: DW_AT_linkage_name ("$s3out13letSimpleTestyyxnYalF") +// DWARF: DW_TAG_formal_parameter +// Disable this part of the test due to a different codegen bug. +// XWARF-NEXT: DW_AT_location (0x{{[a-f0-9]+}}: +// XWARF-NEXT: [0x{{[a-f0-9]+}}, 0x{{[a-f0-9]+}}): DW_OP_breg0 RAX+0, DW_OP_deref) +// XWARF-NEXT: DW_AT_name ("msg") +// +// DWARF: DW_AT_linkage_name ("$s3out13letSimpleTestyyxnYalFTQ0_") +// DWARF: DW_AT_name ("letSimpleTest") +// DWARF: DW_TAG_formal_parameter +// DWARF-NEXT: DW_AT_location (DW_OP_entry_value(DW_OP_reg14 R14), DW_OP_deref, DW_OP_plus_uconst 0x[[MSG_LOC:[a-f0-9]+]], DW_OP_plus_uconst 0x8, DW_OP_deref) +// DWARF-NEXT: DW_AT_name ("msg") +// +// DWARF: DW_AT_linkage_name ("$s3out13letSimpleTestyyxnYalFTY1_") +// DWARF: DW_AT_name ("letSimpleTest") +// DWARF: DW_TAG_formal_parameter +// DWARF: DW_AT_location (0x{{[a-f0-9]+}}: +// DWARF-NEXT: [0x{{[a-f0-9]+}}, 0x{{[a-f0-9]+}}): DW_OP_entry_value(DW_OP_reg14 R14), DW_OP_plus_uconst 0x[[MSG_LOC]], DW_OP_plus_uconst 0x8, DW_OP_deref) +// DWARF-NEXT: DW_AT_name ("msg") +public func letSimpleTest(_ msg: __owned T) async { + await forceSplit() + use(_move(msg)) +} + +// CHECK-LABEL: define swifttailcc void @"$s27move_function_dbginfo_async13varSimpleTestyyxz_xtYalF"(%swift.context* swiftasync %0, %swift.opaque* %1, %swift.opaque* noalias %2, %swift.type* %T) +// CHECK: call void @llvm.dbg.addr(metadata %swift.opaque** %msg.debug, metadata !{{[0-9]+}}, metadata !DIExpression(DW_OP_deref)) +// CHECK: musttail call swifttailcc void @"$s27move_function_dbginfo_async10forceSplityyYaF"(%swift.context* swiftasync %{{[0-9]+}}) +// CHECK-NEXT: ret void +// CHECK-NEXT: } +// +// CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async13varSimpleTestyyxz_xtYalFTQ0_"(i8* swiftasync %0) +// CHECK: entryresume.0: +// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata !{{[0-9]+}}, metadata !DIExpression(DW_OP_deref, DW_OP_plus_uconst, 24, DW_OP_plus_uconst, 8, DW_OP_deref)) +// CHECK: musttail call swifttailcc void @swift_task_switch(%swift.context* swiftasync %9, i8* bitcast (void (i8*)* @"$s27move_function_dbginfo_async13varSimpleTestyyxz_xtYalFTY1_" to i8*), i64 0, i64 0) +// CHECK-NEXT: ret void +// CHECK-NEXT: } +// +// CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async13varSimpleTestyyxz_xtYalFTY1_"(i8* swiftasync %0) +// CHECK: entryresume.1: +// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata ![[METADATA:[0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 24, DW_OP_plus_uconst, 8, DW_OP_deref)), !dbg ![[ADDR_LOC:[0-9]+]] +// CHECK: call void @llvm.dbg.value(metadata %swift.opaque* undef, metadata ![[METADATA]], metadata !DIExpression(DW_OP_deref)), !dbg ![[ADDR_LOC]] +// CHECK: musttail call swifttailcc void @"$s27move_function_dbginfo_async10forceSplityyYaF"(%swift.context* swiftasync %34) +// CHECK-NEXT: ret void +// CHECK-NEXT: } +// +// CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async13varSimpleTestyyxz_xtYalFTQ2_"(i8* swiftasync %0) +// CHECK: entryresume.2: + +// CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async13varSimpleTestyyxz_xtYalFTY3_"(i8* swiftasync %0) +// CHECK: entryresume.3: +// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata !{{[0-9]+}}, metadata !DIExpression(DW_OP_plus_uconst, 24, DW_OP_plus_uconst, 8, DW_OP_deref)) +// +// DWARF: DW_AT_linkage_name ("$s3out13varSimpleTestyyxz_xtYalF") +// DWARF: DW_AT_name ("varSimpleTest") +// DWARF: DW_TAG_formal_parameter +// Disable this part of the test due to an additional error. +// XWARF-NEXT: DW_AT_location (0x{{[a-f0-9]+}}: +// XWARF-NEXT: [0x{{[a-f0-9]+}}, 0x{{[a-f0-9]+}}): DW_OP_breg2 RCX+0, DW_OP_deref) +// XWARF-NEXT: DW_AT_name ("msg") +// +// DWARF: DW_AT_linkage_name ("$s3out13varSimpleTestyyxz_xtYalFTQ0_") +// DWARF: DW_AT_name ("varSimpleTest") +// DWARF: DW_TAG_formal_parameter +// DWARF-NEXT: DW_AT_location (DW_OP_entry_value(DW_OP_reg14 R14), DW_OP_deref, DW_OP_plus_uconst 0x[[MSG_LOC:[a-f0-9]+]], DW_OP_plus_uconst 0x8, DW_OP_deref) +// DWARF-NEXT: DW_AT_name ("msg") +// +// DWARF: DW_AT_linkage_name ("$s3out13varSimpleTestyyxz_xtYalFTY1_") +// DWARF: DW_AT_name ("varSimpleTest") +// DWARF: DW_TAG_formal_parameter +// DWARF-NEXT: DW_AT_location (0x{{[a-f0-9]+}}: +// DWARF-NEXT: [0x{{[a-f0-9]+}}, 0x{{[a-f0-9]+}}): DW_OP_entry_value(DW_OP_reg14 R14), DW_OP_plus_uconst 0x[[MSG_LOC]], DW_OP_plus_uconst 0x8, DW_OP_deref) +// DWARF-NEXT: DW_AT_name ("msg") +// +// TODO: Missing debug info in s3out13varSimpleTestyyxz_xtYalFTQ2_ +// DWARF: DW_AT_linkage_name ("$s3out13varSimpleTestyyxz_xtYalFTQ2_") +// DWARF: DW_AT_name ("varSimpleTest") +// +// We perform moves in this funclet so we at first have an entry_value value +// that is moved and then we use a normal register. +// +// DWARF: DW_AT_linkage_name ("$s3out13varSimpleTestyyxz_xtYalFTY3_") +// DWARF: DW_AT_name ("varSimpleTest") +// DWARF: DW_TAG_formal_parameter +// DWARF: DW_AT_location (0x{{[a-f0-9]+}}: +// DWARF-NEXT: [0x{{[a-f0-9]+}}, 0x{{[a-f0-9]+}}): +// DWARF-SAME: DW_OP_entry_value(DW_OP_reg14 R14), DW_OP_plus_uconst 0x[[MSG_LOC]], DW_OP_plus_uconst 0x8, DW_OP_deref +// DWARF-NEXT: [0x{{[a-f0-9]+}}, 0x{{[a-f0-9]+}}) +// DWARF-SAME: DW_OP_breg{{.*}}, DW_OP_deref, DW_OP_plus_uconst 0x[[MSG_LOC]], DW_OP_plus_uconst 0x8, DW_OP_deref) +// DWARF-NEXT: DW_AT_name ("msg") +// +// DWARF: DW_AT_linkage_name ("$s3out13varSimpleTestyyxz_xtYalFTQ4_") +// DWARF: DW_AT_name ("varSimpleTest") +// DWARF: DW_TAG_formal_parameter +// DWARF-NEXT: DW_AT_location (DW_OP_entry_value(DW_OP_reg14 R14), DW_OP_deref, DW_OP_plus_uconst 0x[[MSG_LOC]], DW_OP_plus_uconst 0x8, DW_OP_deref) +// DWARF-NEXT: DW_AT_name ("msg") +// +// DWARF: DW_AT_linkage_name ("$s3out13varSimpleTestyyxz_xtYalFTY5_") +// DWARF: DW_AT_name ("varSimpleTest") +// DWARF: DW_TAG_formal_parameter +// DWARF-NEXT: DW_AT_location (DW_OP_entry_value(DW_OP_reg14 R14), DW_OP_plus_uconst 0x[[MSG_LOC]], DW_OP_plus_uconst 0x8, DW_OP_deref) +// DWARF-NEXT: DW_AT_name ("msg") +public func varSimpleTest(_ msg: inout T, _ msg2: T) async { + await forceSplit() + use(_move(msg)) + await forceSplit() + msg = msg2 + let msg3 = _move(msg) + let _ = msg3 + msg = msg2 + await forceSplit() +} From 9da5a4b91fde8e60bd42205b40b46964fafcd4fd Mon Sep 17 00:00:00 2001 From: Michael Gottesman Date: Thu, 3 Mar 2022 13:18:07 -0800 Subject: [PATCH 33/88] [move-function-addr] Fix a subtle bug where we were not inserting debug info in the case of vars. The specific problem was that we assume that SILGen will only emit a single debug_value for a value. This condition breaks once we start processing since we are inserting extra debug_value when reiniting. The end result is that we do not insert the undef, addr. To fix this I just hoisted out this computation upon the address before we do anything. --- .../MoveKillsCopyableAddressesChecker.cpp | 101 ++++++++++-------- .../move_function_dbginfo_async.swift | 16 ++- 2 files changed, 70 insertions(+), 47 deletions(-) diff --git a/lib/SILOptimizer/Mandatory/MoveKillsCopyableAddressesChecker.cpp b/lib/SILOptimizer/Mandatory/MoveKillsCopyableAddressesChecker.cpp index 112e8c45d1308..c45d8059707ff 100644 --- a/lib/SILOptimizer/Mandatory/MoveKillsCopyableAddressesChecker.cpp +++ b/lib/SILOptimizer/Mandatory/MoveKillsCopyableAddressesChecker.cpp @@ -1399,13 +1399,14 @@ struct DataflowState { closureConsumes(closureConsumes) {} void init(); bool process( - SILValue address, + SILValue address, DebugVarCarryingInst addressDebugInst, SmallBlotSetVector &postDominatingConsumingUsers); bool handleSingleBlockClosure(SILArgument *address, ClosureOperandState &state); bool cleanupAllDestroyAddr( - SILValue address, SILFunction *fn, SmallBitVector &destroyIndices, - SmallBitVector &reinitIndices, SmallBitVector &consumingClosureIndices, + SILValue address, DebugVarCarryingInst addressDebugInst, SILFunction *fn, + SmallBitVector &destroyIndices, SmallBitVector &reinitIndices, + SmallBitVector &consumingClosureIndices, BasicBlockSet &blocksVisitedWhenProcessingNewTakes, BasicBlockSet &blocksWithMovesThatAreNowTakes, SmallBlotSetVector &postDominatingConsumingUsers); @@ -1423,16 +1424,15 @@ struct DataflowState { } // namespace bool DataflowState::cleanupAllDestroyAddr( - SILValue address, SILFunction *fn, SmallBitVector &destroyIndices, - SmallBitVector &reinitIndices, SmallBitVector &consumingClosureIndices, + SILValue address, DebugVarCarryingInst addressDebugInst, SILFunction *fn, + SmallBitVector &destroyIndices, SmallBitVector &reinitIndices, + SmallBitVector &consumingClosureIndices, BasicBlockSet &blocksVisitedWhenProcessingNewTakes, BasicBlockSet &blocksWithMovesThatAreNowTakes, SmallBlotSetVector &postDominatingConsumingUsers) { bool madeChange = false; BasicBlockWorklist worklist(fn); - auto debugVarInst = DebugVarCarryingInst::getFromValue(address); - LLVM_DEBUG(llvm::dbgs() << "Cleanup up destroy addr!\n"); LLVM_DEBUG(llvm::dbgs() << " Visiting destroys!\n"); LLVM_DEBUG(llvm::dbgs() << " Destroy Indices: " << destroyIndices << "\n"); @@ -1537,12 +1537,13 @@ bool DataflowState::cleanupAllDestroyAddr( convertMemoryReinitToInitForm(*reinit); // Make sure to create a new debug_value for the reinit value. - if (debugVarInst) { - if (auto varInfo = debugVarInst.getVarInfo()) { + if (addressDebugInst) { + if (auto varInfo = addressDebugInst.getVarInfo()) { SILBuilderWithScope reinitBuilder(*reinit); - reinitBuilder.setCurrentDebugScope(debugVarInst->getDebugScope()); - reinitBuilder.createDebugValue(debugVarInst.inst->getLoc(), address, - *varInfo, false, /*was moved*/ true); + reinitBuilder.setCurrentDebugScope(addressDebugInst->getDebugScope()); + reinitBuilder.createDebugValue( + addressDebugInst.inst->getLoc(), address, *varInfo, false, + /*was moved*/ true); } } madeChange = true; @@ -1583,7 +1584,7 @@ bool DataflowState::cleanupAllDestroyAddr( } bool DataflowState::process( - SILValue address, + SILValue address, DebugVarCarryingInst addressDebugInst, SmallBlotSetVector &postDominatingConsumingUsers) { SILFunction *fn = address->getFunction(); assert(fn); @@ -1612,6 +1613,7 @@ bool DataflowState::process( BasicBlockSet blocksVisitedWhenProcessingNewTakes(fn); BasicBlockSet blocksWithMovesThatAreNowTakes(fn); bool convertedMarkMoveToTake = false; + for (auto *mvi : markMovesThatPropagateDownwards) { bool emittedSingleDiagnostic = false; @@ -1777,13 +1779,13 @@ bool DataflowState::process( // Now that we have processed all of our mark_moves, eliminate all of the // destroy_addr and set our debug value as being moved. - if (auto debug = DebugVarCarryingInst::getFromValue(address)) { - debug.markAsMoved(); - if (auto varInfo = debug.getVarInfo()) { + if (addressDebugInst) { + addressDebugInst.markAsMoved(); + if (auto varInfo = addressDebugInst.getVarInfo()) { SILBuilderWithScope undefBuilder(builder); - undefBuilder.setCurrentDebugScope(debug->getDebugScope()); + undefBuilder.setCurrentDebugScope(addressDebugInst->getDebugScope()); undefBuilder.createDebugValue( - debug->getLoc(), + addressDebugInst->getLoc(), SILUndef::get(address->getType(), builder.getModule()), *varInfo, false /*poison*/, true /*was moved*/); } @@ -1806,8 +1808,8 @@ bool DataflowState::process( // Now that we have processed all of our mark_moves, eliminate all of the // destroy_addr. madeChange |= cleanupAllDestroyAddr( - address, fn, getIndicesOfPairedDestroys(), getIndicesOfPairedReinits(), - getIndicesOfPairedConsumingClosureUses(), + address, addressDebugInst, fn, getIndicesOfPairedDestroys(), + getIndicesOfPairedReinits(), getIndicesOfPairedConsumingClosureUses(), blocksVisitedWhenProcessingNewTakes, blocksWithMovesThatAreNowTakes, postDominatingConsumingUsers); @@ -1932,6 +1934,7 @@ struct MoveKillsCopyableAddressesChecker { void emitDiagnosticForMove(SILValue borrowedValue, StringRef borrowedValueName, MoveValueInst *mvi); bool performSingleBasicBlockAnalysis(SILValue address, + DebugVarCarryingInst addressDebugInst, MarkUnresolvedMoveAddrInst *mvi); ASTContext &getASTContext() const { return fn->getASTContext(); } @@ -2049,7 +2052,8 @@ bool MoveKillsCopyableAddressesChecker::performClosureDataflow( // case. Returns false if we visited all of the uses and seeded the UseState // struct with the information needed to perform our interprocedural dataflow. bool MoveKillsCopyableAddressesChecker::performSingleBasicBlockAnalysis( - SILValue address, MarkUnresolvedMoveAddrInst *mvi) { + SILValue address, DebugVarCarryingInst addressDebugInst, + MarkUnresolvedMoveAddrInst *mvi) { // First scan downwards to make sure we are move out of this block. auto &useState = dataflowState.useState; auto &applySiteToPromotedArgIndices = @@ -2075,17 +2079,17 @@ bool MoveKillsCopyableAddressesChecker::performSingleBasicBlockAnalysis( builder.createCopyAddr(mvi->getLoc(), mvi->getSrc(), mvi->getDest(), IsTake, IsInitialization); // Also, mark the alloc_stack as being moved at some point. - if (auto debug = DebugVarCarryingInst::getFromValue(address)) { - if (auto varInfo = debug.getVarInfo()) { + if (addressDebugInst) { + if (auto varInfo = addressDebugInst.getVarInfo()) { SILBuilderWithScope undefBuilder(builder); - undefBuilder.setCurrentDebugScope(debug->getDebugScope()); + undefBuilder.setCurrentDebugScope(addressDebugInst->getDebugScope()); undefBuilder.createDebugValue( - debug->getLoc(), + addressDebugInst->getLoc(), SILUndef::get(address->getType(), builder.getModule()), *varInfo, false, /*was moved*/ true); } - debug.markAsMoved(); + addressDebugInst.markAsMoved(); } useState.destroys.erase(dvi); @@ -2185,29 +2189,29 @@ bool MoveKillsCopyableAddressesChecker::performSingleBasicBlockAnalysis( SILBuilderWithScope builder(mvi); builder.createCopyAddr(mvi->getLoc(), mvi->getSrc(), mvi->getDest(), IsTake, IsInitialization); - if (auto debug = DebugVarCarryingInst::getFromValue(address)) { - if (auto varInfo = debug.getVarInfo()) { + if (addressDebugInst) { + if (auto varInfo = addressDebugInst.getVarInfo()) { { SILBuilderWithScope undefBuilder(builder); - undefBuilder.setCurrentDebugScope(debug->getDebugScope()); + undefBuilder.setCurrentDebugScope(addressDebugInst->getDebugScope()); undefBuilder.createDebugValue( - debug->getLoc(), + addressDebugInst->getLoc(), SILUndef::get(address->getType(), builder.getModule()), *varInfo, false, - /*was moved*/ true); + /*was moved*/ true); } { // Make sure at the reinit point to create a new debug value after the // reinit instruction so we reshow the variable. auto *next = interestingUser->getNextInstruction(); SILBuilderWithScope reinitBuilder(next); - reinitBuilder.setCurrentDebugScope(debug->getDebugScope()); - reinitBuilder.createDebugValue(debug->getLoc(), address, *varInfo, - false, + reinitBuilder.setCurrentDebugScope(addressDebugInst->getDebugScope()); + reinitBuilder.createDebugValue(addressDebugInst->getLoc(), + address, *varInfo, false, /*was moved*/ true); } } - debug.markAsMoved(); + addressDebugInst.markAsMoved(); } mvi->eraseFromParent(); return false; @@ -2238,17 +2242,17 @@ bool MoveKillsCopyableAddressesChecker::performSingleBasicBlockAnalysis( LLVM_DEBUG(llvm::dbgs() << "Found apply site to clone: " << **fas); LLVM_DEBUG(llvm::dbgs() << "BitVector: "; dumpBitVector(llvm::dbgs(), bitVector); llvm::dbgs() << '\n'); - if (auto debug = DebugVarCarryingInst::getFromValue(address)) { - if (auto varInfo = debug.getVarInfo()) { + if (addressDebugInst) { + if (auto varInfo = addressDebugInst.getVarInfo()) { SILBuilderWithScope undefBuilder(builder); - undefBuilder.setCurrentDebugScope(debug->getDebugScope()); + undefBuilder.setCurrentDebugScope(addressDebugInst->getDebugScope()); undefBuilder.createDebugValue( - debug->getLoc(), + addressDebugInst->getLoc(), SILUndef::get(address->getType(), builder.getModule()), *varInfo, false, /*was moved*/ true); } - debug.markAsMoved(); + addressDebugInst.markAsMoved(); } mvi->eraseFromParent(); return false; @@ -2337,9 +2341,21 @@ bool MoveKillsCopyableAddressesChecker::check(SILValue address) { // routine also prepares the pass for running the multi-basic block // diagnostic. bool emittedSingleBBDiagnostic = false; + + // Before we process any moves, gather the debug inst associated with our + // address. + // + // NOTE: The reason why we do this early is that we rely on our address + // initially having a single DebugValueCarryingInst (either an alloc_stack + // itself or a debug_value associated with an argument). If we do this while + // processing, as we insert additional debug info we will cause this condition + // to begin failing. + auto addressDebugInst = DebugVarCarryingInst::getFromValue(address); + for (auto *mvi : useState.markMoves) { LLVM_DEBUG(llvm::dbgs() << "Performing single block analysis on: " << *mvi); - emittedSingleBBDiagnostic |= performSingleBasicBlockAnalysis(address, mvi); + emittedSingleBBDiagnostic |= + performSingleBasicBlockAnalysis(address, addressDebugInst, mvi); } if (emittedSingleBBDiagnostic) { @@ -2359,7 +2375,8 @@ bool MoveKillsCopyableAddressesChecker::check(SILValue address) { // Ok, we need to perform global dataflow for one of our moves. Initialize our // dataflow state engine and then run the dataflow itself. dataflowState.init(); - bool result = dataflowState.process(address, closureConsumes); + bool result = dataflowState.process( + address, addressDebugInst, closureConsumes); return result; } diff --git a/test/DebugInfo/move_function_dbginfo_async.swift b/test/DebugInfo/move_function_dbginfo_async.swift index 3aad3a48dca06..aed16a980b318 100644 --- a/test/DebugInfo/move_function_dbginfo_async.swift +++ b/test/DebugInfo/move_function_dbginfo_async.swift @@ -47,13 +47,13 @@ public func forceSplit() async {} // CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async13letSimpleTestyyxnYalFTQ0_"(i8* swiftasync %0) // CHECK: entryresume.0: -// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata ![[SIMPLE_TEST_METADATA_2:[0-9]+]], metadata !DIExpression(DW_OP_deref, DW_OP_plus_uconst, 24, DW_OP_plus_uconst, 8, DW_OP_deref)), +// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata ![[SIMPLE_TEST_METADATA_2:[0-9]+]], metadata !DIExpression(DW_OP_deref, DW_OP_plus_uconst, 16, DW_OP_plus_uconst, 8, DW_OP_deref)), // CHECK: musttail call swifttailcc void // CHECK-NEXT: ret void // // CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async13letSimpleTestyyxnYalFTY1_"(i8* swiftasync %0) // CHECK: entryresume.1: -// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata ![[SIMPLE_TEST_METADATA_3:[0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 24, DW_OP_plus_uconst, 8, DW_OP_deref)), !dbg ![[ADDR_LOC:[0-9]+]] +// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata ![[SIMPLE_TEST_METADATA_3:[0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 16, DW_OP_plus_uconst, 8, DW_OP_deref)), !dbg ![[ADDR_LOC:[0-9]+]] // CHECK: call void @llvm.dbg.value(metadata %swift.opaque* undef, metadata ![[SIMPLE_TEST_METADATA_3]], metadata !DIExpression(DW_OP_deref)), !dbg ![[ADDR_LOC]] // CHECK: musttail call swifttailcc void // CHECK-NEXT: ret void @@ -90,14 +90,14 @@ public func letSimpleTest(_ msg: __owned T) async { // // CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async13varSimpleTestyyxz_xtYalFTQ0_"(i8* swiftasync %0) // CHECK: entryresume.0: -// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata !{{[0-9]+}}, metadata !DIExpression(DW_OP_deref, DW_OP_plus_uconst, 24, DW_OP_plus_uconst, 8, DW_OP_deref)) +// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata !{{[0-9]+}}, metadata !DIExpression(DW_OP_deref, DW_OP_plus_uconst, 16, DW_OP_plus_uconst, 8, DW_OP_deref)) // CHECK: musttail call swifttailcc void @swift_task_switch(%swift.context* swiftasync %9, i8* bitcast (void (i8*)* @"$s27move_function_dbginfo_async13varSimpleTestyyxz_xtYalFTY1_" to i8*), i64 0, i64 0) // CHECK-NEXT: ret void // CHECK-NEXT: } // // CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async13varSimpleTestyyxz_xtYalFTY1_"(i8* swiftasync %0) // CHECK: entryresume.1: -// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata ![[METADATA:[0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 24, DW_OP_plus_uconst, 8, DW_OP_deref)), !dbg ![[ADDR_LOC:[0-9]+]] +// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata ![[METADATA:[0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 16, DW_OP_plus_uconst, 8, DW_OP_deref)), !dbg ![[ADDR_LOC:[0-9]+]] // CHECK: call void @llvm.dbg.value(metadata %swift.opaque* undef, metadata ![[METADATA]], metadata !DIExpression(DW_OP_deref)), !dbg ![[ADDR_LOC]] // CHECK: musttail call swifttailcc void @"$s27move_function_dbginfo_async10forceSplityyYaF"(%swift.context* swiftasync %34) // CHECK-NEXT: ret void @@ -108,7 +108,12 @@ public func letSimpleTest(_ msg: __owned T) async { // CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async13varSimpleTestyyxz_xtYalFTY3_"(i8* swiftasync %0) // CHECK: entryresume.3: -// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata !{{[0-9]+}}, metadata !DIExpression(DW_OP_plus_uconst, 24, DW_OP_plus_uconst, 8, DW_OP_deref)) +// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata ![[METADATA:[0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 16, DW_OP_plus_uconst, 8, DW_OP_deref)), !dbg ![[ADDR_LOC:[0-9]+]] +// CHECK: call void @llvm.dbg.value(metadata %swift.opaque* undef, metadata ![[METADATA]], metadata !DIExpression(DW_OP_deref)), !dbg ![[ADDR_LOC]] +// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata ![[METADATA]], metadata !DIExpression(DW_OP_plus_uconst, 16, DW_OP_plus_uconst, 8, DW_OP_deref)), !dbg ![[ADDR_LOC]] +// CHECK: musttail call swifttailcc void @"$s27move_function_dbginfo_async10forceSplityyYaF"( +// CHECK-NEXT: ret void +// CHECK-NEXT: } // // DWARF: DW_AT_linkage_name ("$s3out13varSimpleTestyyxz_xtYalF") // DWARF: DW_AT_name ("varSimpleTest") @@ -159,6 +164,7 @@ public func letSimpleTest(_ msg: __owned T) async { // DWARF: DW_TAG_formal_parameter // DWARF-NEXT: DW_AT_location (DW_OP_entry_value(DW_OP_reg14 R14), DW_OP_plus_uconst 0x[[MSG_LOC]], DW_OP_plus_uconst 0x8, DW_OP_deref) // DWARF-NEXT: DW_AT_name ("msg") + public func varSimpleTest(_ msg: inout T, _ msg2: T) async { await forceSplit() use(_move(msg)) From 7091c87f6751abe97d0db39d03347e038666007d Mon Sep 17 00:00:00 2001 From: Michael Gottesman Date: Sat, 12 Mar 2022 18:23:08 -0800 Subject: [PATCH 34/88] [move-function] Update a test now that I have merged in the better debug info salvaging while coroutine splitting into LLVM. Thanks again to Adrian Prantl for his help with this work! --- .../move_function_dbginfo_async.swift | 22 +++++++++---------- 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/test/DebugInfo/move_function_dbginfo_async.swift b/test/DebugInfo/move_function_dbginfo_async.swift index aed16a980b318..54bd99b6d5994 100644 --- a/test/DebugInfo/move_function_dbginfo_async.swift +++ b/test/DebugInfo/move_function_dbginfo_async.swift @@ -41,7 +41,7 @@ public func forceSplit() async {} // CHECK-LABEL: define swifttailcc void @"$s27move_function_dbginfo_async13letSimpleTestyyxnYalF"(%swift.context* swiftasync %0, %swift.opaque* noalias %1, %swift.type* %T) // CHECK: entry: -// CHECK: call void @llvm.dbg.addr(metadata %swift.opaque** %msg.debug, metadata ![[SIMPLE_TEST_METADATA:[0-9]+]], metadata !DIExpression(DW_OP_deref)), !dbg ![[ADDR_LOC:[0-9]+]] +// CHECK: call void @llvm.dbg.addr(metadata %swift.context* %0, metadata ![[SIMPLE_TEST_METADATA:[0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 16, DW_OP_plus_uconst, 8, DW_OP_deref)), !dbg ![[ADDR_LOC:[0-9]+]] // CHECK: musttail call swifttailcc void // CHECK-NEXT: ret void @@ -60,10 +60,8 @@ public func forceSplit() async {} // DWARF: DW_AT_linkage_name ("$s3out13letSimpleTestyyxnYalF") // DWARF: DW_TAG_formal_parameter -// Disable this part of the test due to a different codegen bug. -// XWARF-NEXT: DW_AT_location (0x{{[a-f0-9]+}}: -// XWARF-NEXT: [0x{{[a-f0-9]+}}, 0x{{[a-f0-9]+}}): DW_OP_breg0 RAX+0, DW_OP_deref) -// XWARF-NEXT: DW_AT_name ("msg") +// DWARF-NEXT: DW_AT_location (DW_OP_entry_value(DW_OP_reg14 R14), DW_OP_plus_uconst 0x10, DW_OP_plus_uconst 0x8, DW_OP_deref) +// DWARF-NEXT: DW_AT_name ("msg") // // DWARF: DW_AT_linkage_name ("$s3out13letSimpleTestyyxnYalFTQ0_") // DWARF: DW_AT_name ("letSimpleTest") @@ -83,7 +81,7 @@ public func letSimpleTest(_ msg: __owned T) async { } // CHECK-LABEL: define swifttailcc void @"$s27move_function_dbginfo_async13varSimpleTestyyxz_xtYalF"(%swift.context* swiftasync %0, %swift.opaque* %1, %swift.opaque* noalias %2, %swift.type* %T) -// CHECK: call void @llvm.dbg.addr(metadata %swift.opaque** %msg.debug, metadata !{{[0-9]+}}, metadata !DIExpression(DW_OP_deref)) +// CHECK: call void @llvm.dbg.addr(metadata %swift.context* %0, metadata !{{[0-9]+}}, metadata !DIExpression(DW_OP_plus_uconst, 16, DW_OP_plus_uconst, 8, DW_OP_deref)) // CHECK: musttail call swifttailcc void @"$s27move_function_dbginfo_async10forceSplityyYaF"(%swift.context* swiftasync %{{[0-9]+}}) // CHECK-NEXT: ret void // CHECK-NEXT: } @@ -118,10 +116,8 @@ public func letSimpleTest(_ msg: __owned T) async { // DWARF: DW_AT_linkage_name ("$s3out13varSimpleTestyyxz_xtYalF") // DWARF: DW_AT_name ("varSimpleTest") // DWARF: DW_TAG_formal_parameter -// Disable this part of the test due to an additional error. -// XWARF-NEXT: DW_AT_location (0x{{[a-f0-9]+}}: -// XWARF-NEXT: [0x{{[a-f0-9]+}}, 0x{{[a-f0-9]+}}): DW_OP_breg2 RCX+0, DW_OP_deref) -// XWARF-NEXT: DW_AT_name ("msg") +// DWARF-NEXT: DW_AT_location (DW_OP_entry_value(DW_OP_reg14 R14), DW_OP_plus_uconst 0x10, DW_OP_plus_uconst 0x8, DW_OP_deref) +// DWARF-NEXT: DW_AT_name ("msg") // // DWARF: DW_AT_linkage_name ("$s3out13varSimpleTestyyxz_xtYalFTQ0_") // DWARF: DW_AT_name ("varSimpleTest") @@ -162,8 +158,10 @@ public func letSimpleTest(_ msg: __owned T) async { // DWARF: DW_AT_linkage_name ("$s3out13varSimpleTestyyxz_xtYalFTY5_") // DWARF: DW_AT_name ("varSimpleTest") // DWARF: DW_TAG_formal_parameter -// DWARF-NEXT: DW_AT_location (DW_OP_entry_value(DW_OP_reg14 R14), DW_OP_plus_uconst 0x[[MSG_LOC]], DW_OP_plus_uconst 0x8, DW_OP_deref) -// DWARF-NEXT: DW_AT_name ("msg") +// DWARF: DW_AT_location (0x{{[a-f0-9]+}}: +// DWARF: [0x{{[a-f0-9]+}}, 0x{{[a-f0-9]+}}): DW_OP_entry_value(DW_OP_reg14 R14), DW_OP_plus_uconst 0x10, DW_OP_plus_uconst 0x8, DW_OP_deref +// DWARF: [0x{{[a-f0-9]+}}, 0x{{[a-f0-9]+}}): DW_OP_breg6 RBP-88, DW_OP_deref, DW_OP_plus_uconst 0x10, DW_OP_plus_uconst 0x8, DW_OP_deref) +// DWARF: DW_AT_name ("msg") public func varSimpleTest(_ msg: inout T, _ msg2: T) async { await forceSplit() From 50a0b915604dae05b70c7d89a14f688aaf397ba8 Mon Sep 17 00:00:00 2001 From: Michael Gottesman Date: Sat, 12 Mar 2022 21:23:25 -0800 Subject: [PATCH 35/88] [move-function] Propagate debug_value undef rather than treat it as an invalidation. The reason that I am doing this is that otherwise in funclets we may show an invalidated value being available if we have an llvm.dbg.addr for a reinit value later in the funclet. This ensures that we have the undef at the beginning of the funclet so before that llvm.dbg.addr we will properly show no value. --- lib/SILOptimizer/Mandatory/DebugInfoCanonicalizer.cpp | 10 ---------- test/DebugInfo/move_function_dbginfo_async.swift | 2 ++ test/SILOptimizer/debuginfo_canonicalizer.sil | 1 + 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/lib/SILOptimizer/Mandatory/DebugInfoCanonicalizer.cpp b/lib/SILOptimizer/Mandatory/DebugInfoCanonicalizer.cpp index 8fdc0fcbd227b..8375fb7e7674e 100644 --- a/lib/SILOptimizer/Mandatory/DebugInfoCanonicalizer.cpp +++ b/lib/SILOptimizer/Mandatory/DebugInfoCanonicalizer.cpp @@ -214,16 +214,6 @@ bool DebugInfoCanonicalizer::process() { continue; } - // If we have a SILUndef, mark this debug info as being mapped to - // nullptr. - if (isa(dvi->getOperand())) { - LLVM_DEBUG(llvm::dbgs() << " SILUndef.\n"); - auto iter = state.debugValues.insert({*debugInfo, nullptr}); - if (!iter.second) - iter.first->second = nullptr; - continue; - } - // Otherwise, we may have a new debug_value to track. Try to begin // tracking it... auto iter = state.debugValues.insert({*debugInfo, dvi}); diff --git a/test/DebugInfo/move_function_dbginfo_async.swift b/test/DebugInfo/move_function_dbginfo_async.swift index 54bd99b6d5994..52b5fb0ad5244 100644 --- a/test/DebugInfo/move_function_dbginfo_async.swift +++ b/test/DebugInfo/move_function_dbginfo_async.swift @@ -146,6 +146,8 @@ public func letSimpleTest(_ msg: __owned T) async { // DWARF-NEXT: [0x{{[a-f0-9]+}}, 0x{{[a-f0-9]+}}): // DWARF-SAME: DW_OP_entry_value(DW_OP_reg14 R14), DW_OP_plus_uconst 0x[[MSG_LOC]], DW_OP_plus_uconst 0x8, DW_OP_deref // DWARF-NEXT: [0x{{[a-f0-9]+}}, 0x{{[a-f0-9]+}}) +// DWARF-SAME: DW_OP_breg{{.*}}, DW_OP_deref, DW_OP_plus_uconst 0x[[MSG_LOC]], DW_OP_plus_uconst 0x8, DW_OP_deref +// DWARF-NEXT: [0x{{[a-f0-9]+}}, 0x{{[a-f0-9]+}}) // DWARF-SAME: DW_OP_breg{{.*}}, DW_OP_deref, DW_OP_plus_uconst 0x[[MSG_LOC]], DW_OP_plus_uconst 0x8, DW_OP_deref) // DWARF-NEXT: DW_AT_name ("msg") // diff --git a/test/SILOptimizer/debuginfo_canonicalizer.sil b/test/SILOptimizer/debuginfo_canonicalizer.sil index 06a59faa104ff..2f4a39923d2ed 100644 --- a/test/SILOptimizer/debuginfo_canonicalizer.sil +++ b/test/SILOptimizer/debuginfo_canonicalizer.sil @@ -113,6 +113,7 @@ bb3: // CHECK: [[BB_RHS]]: // CHECK-NEXT: debug_value [moved] undef // CHECK-NEXT: abort_apply +// CHECK-NEXT: debug_value [moved] undef // CHECK-NEXT: br [[BB_CONT]] // CHECK: } // end sil function 'testUndefDiamond' sil @testUndefDiamond : $@convention(thin) (@guaranteed Klass) -> () { From b6c2b3edff2af715d304bac43faab8f0d4783bdd Mon Sep 17 00:00:00 2001 From: Pavel Yaskevich Date: Mon, 21 Mar 2022 14:55:36 -0700 Subject: [PATCH 36/88] [Diagnostics] Fix out-of-bounds index while fixing argument mismatch Although the overload choice has two parameters, it doesn't mean that there are exactly two arguments passed to it. Resolves: rdar://87407899 --- lib/Sema/CSSimplify.cpp | 6 ++++ .../Sema/SwiftUI/rdar87407899.swift | 33 +++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 validation-test/Sema/SwiftUI/rdar87407899.swift diff --git a/lib/Sema/CSSimplify.cpp b/lib/Sema/CSSimplify.cpp index c7b1018c6c4df..3d8c213b848c7 100644 --- a/lib/Sema/CSSimplify.cpp +++ b/lib/Sema/CSSimplify.cpp @@ -4258,6 +4258,12 @@ static bool repairOutOfOrderArgumentsInBinaryFunction( auto currArgIdx = locator->castLastElementTo().getArgIdx(); + + // Argument is extraneous and has been re-ordered to match one + // of two parameter types. + if (currArgIdx >= 2) + return false; + auto otherArgIdx = currArgIdx == 0 ? 1 : 0; auto argType = cs.getType(argument); diff --git a/validation-test/Sema/SwiftUI/rdar87407899.swift b/validation-test/Sema/SwiftUI/rdar87407899.swift new file mode 100644 index 0000000000000..be81afd4b8b72 --- /dev/null +++ b/validation-test/Sema/SwiftUI/rdar87407899.swift @@ -0,0 +1,33 @@ +// RUN: %target-typecheck-verify-swift -target %target-cpu-apple-macosx10.15 -swift-version 5 +// REQUIRES: objc_interop +// REQUIRES: OS=macosx + +import SwiftUI + +struct AStruct { + let aField: MyEnum = .aCase // expected-note {{change 'let' to 'var' to make it mutable}} +} + +enum MyEnum { +case aCase +} + +extension EmptyView { + func doImport(showImport: Binding, anEnum: MyEnum) -> some View { + EmptyView() + } +} + +struct SegFaultingView: View { + @Binding var aStruct: AStruct + @Binding var showImport: Bool + @State var importMessage: String = "none" + + var body: some View { + EmptyView() + .doImport(showImport: showImport, // expected-error {{cannot convert value 'showImport' of type 'Bool' to expected type 'Binding', use wrapper instead}} + importMessage: importMessage, // expected-error {{extra argument 'importMessage' in call}} + anEnum: $aStruct.aField) // expected-error {{cannot convert value of type 'Binding' to expected argument type 'MyEnum'}} + // expected-error@-1 {{cannot assign to property: 'aField' is a 'let' constant}} + } +} From 81fc0aed87abf060305775693442ed469e4d60d4 Mon Sep 17 00:00:00 2001 From: Allan Shortlidge Date: Fri, 18 Mar 2022 15:20:26 -0700 Subject: [PATCH 37/88] Frontend: By default, assume `-target-min-inlining-version min` for modules compiled with `-library-level api` to catch availability issues in API swiftinterfaces. Resolves rdar://90575987 --- lib/Frontend/CompilerInvocation.cpp | 16 +++++++--------- test/attr/attr_inlinable_available.swift | 18 +++++++++++++++--- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp index 8f6978be44907..eb26858fcfbb2 100644 --- a/lib/Frontend/CompilerInvocation.cpp +++ b/lib/Frontend/CompilerInvocation.cpp @@ -805,17 +805,15 @@ static bool ParseLangArgs(LangOptions &Opts, ArgList &Args, // First, set up default minimum inlining target versions. auto getDefaultMinimumInliningTargetVersion = [&](const llvm::Triple &triple) -> llvm::VersionTuple { -#if SWIFT_DEFAULT_TARGET_MIN_INLINING_VERSION_TO_MIN - // In ABI-stable modules, default to the version when Swift first became - // available. - if (FrontendOpts.EnableLibraryEvolution) + // In API modules, default to the version when Swift first became available. + if (Opts.LibraryLevel == LibraryLevel::API) if (auto minTriple = minimumAvailableOSVersionForTriple(triple)) - return minTriple; -#endif + return *minTriple; - // In ABI-unstable modules, we will never have to interoperate with - // older versions of the module, so we should default to the minimum - // deployment target. + // In other modules, assume that availability is used less consistently + // and that library clients will generally raise deployment targets as the + // library evolves so the min inlining version should be the deployment + // target by default. unsigned major, minor, patch; if (triple.isMacOSX()) triple.getMacOSXVersion(major, minor, patch); diff --git a/test/attr/attr_inlinable_available.swift b/test/attr/attr_inlinable_available.swift index 9126954b1c8da..96451e794657d 100644 --- a/test/attr/attr_inlinable_available.swift +++ b/test/attr/attr_inlinable_available.swift @@ -16,17 +16,29 @@ // RUN: %target-typecheck-verify-swift -swift-version 5 -enable-library-evolution -target %target-next-stable-abi-triple -target-min-inlining-version min +// Check that `-library-level api` implies `-target-min-inlining-version min` +// RUN: %target-typecheck-verify-swift -swift-version 5 -enable-library-evolution -target %target-next-stable-abi-triple -library-level api + + // Check that these rules are only applied when requested and that at least some // diagnostics are not present without it. -// RUN: not %target-typecheck-verify-swift -swift-version 5 -target %target-next-stable-abi-triple 2>&1 | %FileCheck --check-prefix NON_ABI %s -// NON_ABI: error: expected error not produced -// NON_ABI: {'BetweenTargets' is only available in} +// RUN: not %target-typecheck-verify-swift -swift-version 5 -target %target-next-stable-abi-triple 2>&1 | %FileCheck --check-prefix NON_MIN %s + + +// Check that -target-min-inlining-version overrides -library-level, allowing +// library owners to disable this behavior for API libraries if needed. +// RUN: not %target-typecheck-verify-swift -swift-version 5 -target %target-next-stable-abi-triple -target-min-inlining-version target -library-level api 2>&1 | %FileCheck --check-prefix NON_MIN %s // Check that we respect -target-min-inlining-version by cranking it up high // enough to suppress any possible errors. // RUN: %target-swift-frontend -typecheck -disable-objc-attr-requires-foundation-module %s -swift-version 5 -enable-library-evolution -target %target-next-stable-abi-triple -target-min-inlining-version 42.0 + +// NON_MIN: error: expected error not produced +// NON_MIN: {'BetweenTargets' is only available in} + + /// Declaration with no availability annotation. Should be inferred as minimum /// inlining target. public struct NoAvailable { From 1717935c7bcba77aaaf27318adcc5fe9de58ea23 Mon Sep 17 00:00:00 2001 From: Becca Royal-Gordon Date: Mon, 21 Mar 2022 15:13:29 -0700 Subject: [PATCH 38/88] Add diagnostic verifier features to Diagnostics.md We've made changes to the diagnostic verifier over time that we failed to actually document. Update docs/Diagnostics.md to describe: * Fix-it alternation (cdcd726f92bb8e9f8b3d1446f3ea50a3bfdcde05) * Fix-it line offsets (2128678d56c35fe6bb1d1df3f60b19cd4ade4edc) * Additional files (5036a55550d3dbdc755f9f09e85f2f351f12d56c) * Ignoring `` diagnostics (384ab780e9f1dfa92730abca0fbaa9ae86160f75) I've also slightly fleshed out the description of fix-it expectations. --- docs/Diagnostics.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/Diagnostics.md b/docs/Diagnostics.md index 87137fdcf5680..8dd20fabd7e9c 100644 --- a/docs/Diagnostics.md +++ b/docs/Diagnostics.md @@ -141,6 +141,8 @@ If you run into any issues or have questions while following the steps above, fe (This section is specific to the Swift compiler's diagnostic engine.) If the `-verify` frontend flag is used, the Swift compiler will check emitted diagnostics against specially formatted comments in the source. This feature is used extensively throughout the test suite to ensure diagnostics are emitted with the correct message and source location. + +`-verify` parses all ordinary source files passed as inputs to the compiler to look for expectation comments. If you'd like to check for diagnostics in additional files, like swiftinterfaces or even Objective-C headers, specify them with `-verify-additional-file `. By default, `-verify` considers any diagnostic at `:0` (that is, any diagnostic emitted with an invalid source location) to be unexpected; you can disable this by passing `-verify-ignore-unknown`. An expected diagnostic is denoted by a comment which begins with `expected-error`, `expected-warning`, `expected-note`, or `expected-remark`. It is followed by: @@ -150,6 +152,14 @@ An expected diagnostic is denoted by a comment which begins with `expected-error - (Required) The expected error message. The message should be enclosed in double curly braces and should not include the `error:`/`warning:`/`note:`/`remark:` prefix. For example, `// expected-error {{invalid redeclaration of 'y'}}` would match an error with that message on the same line. The expected message does not need to match the emitted message verbatim. As long as the expected message is a substring of the original message, they will match. -- (Optional) Expected fix-its. These are each enclosed in double curly braces and appear after the expected message. An expected fix-it consists of a column range followed by the text it's expected to be replaced with. For example, `let r : Int i = j // expected-error{{consecutive statements}} {{12-12=;}}` will match a fix-it attached to the consecutive statements error which inserts a semicolon at column 12, just after the 't' in 'Int'. The special {{none}} specifier is also supported, which will cause the diagnostic match to fail if unexpected fix-its are produced. +- (Optional) Expected fix-its. These are each enclosed in double curly braces and appear after the expected message. An expected fix-it consists of a column range followed by the text it's expected to be replaced with. For example, `let r : Int i = j // expected-error{{consecutive statements}} {{12-12=;}}` will match a fix-it attached to the consecutive statements error which inserts a semicolon at column 12, just after the 't' in 'Int'. + + * Insertions are represented by identical start and end locations: `{{3-3=@objc }}`. Deletions are represented by empty replacement text: `{{3-9=}}`. + + * Line offsets are also permitted; for instance, `{{-1:12-+1:42=}}` would specify a fix-it that deleted everything between column 12 on the previous line and column 42 on the next line. (The second location could just be written as `1:42` and would mean the same thing.) + + * By default, the verifier ignores any fix-its that are *not* expected; the special `{{none}}` specifier tells it to verify that the diagnostic it's attached to has *only* the fix-its specified and no others. + + * If two (or more) expected fix-its are juxtaposed with nothing (or whitespace) between them, then both must be present for the verifier to match. If two (or more) expected fix-its have `||` between them, then one of them must be present for the verifier to match. `||` binds more tightly than juxtaposition: `{{1-1=a}} {{2-2=b}} || {{2-2=c}} {{3-3=d}} {{none}}` will only match if there is either a set of three fix-its that insert `a`, `b`, and `d`, or a set of three fix-its that insert `a`, `c`, and `d`. (Without the `{{none}}`, it would also permit all four fix-its, but only because one of the four would be unmatched and ignored.) - (Optional) Expected educational notes. These appear as a comma separated list after the expected message, enclosed in double curly braces and prefixed by 'educational-notes='. For example, `{{educational-notes=some-note,some-other-note}}` will verify the educational notes with filenames `some-note` and `some-other-note` appear. Do not include the file extension when specifying note names. From 132620a595c49f79c1bbbcbf10a0c1fb98b89456 Mon Sep 17 00:00:00 2001 From: Becca Royal-Gordon Date: Mon, 21 Mar 2022 15:34:41 -0700 Subject: [PATCH 39/88] Correct description of fix-it line numbers --- docs/Diagnostics.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Diagnostics.md b/docs/Diagnostics.md index 8dd20fabd7e9c..9f8f0439e3d93 100644 --- a/docs/Diagnostics.md +++ b/docs/Diagnostics.md @@ -156,7 +156,7 @@ An expected diagnostic is denoted by a comment which begins with `expected-error * Insertions are represented by identical start and end locations: `{{3-3=@objc }}`. Deletions are represented by empty replacement text: `{{3-9=}}`. - * Line offsets are also permitted; for instance, `{{-1:12-+1:42=}}` would specify a fix-it that deleted everything between column 12 on the previous line and column 42 on the next line. (The second location could just be written as `1:42` and would mean the same thing.) + * Line offsets are also permitted; for instance, `{{-1:12-+1:42=}}` would specify a fix-it that deleted everything between column 12 on the previous line and column 42 on the next line. (If the sign is omitted, it specifies an absolute line number, not an offset.) * By default, the verifier ignores any fix-its that are *not* expected; the special `{{none}}` specifier tells it to verify that the diagnostic it's attached to has *only* the fix-its specified and no others. From 3597652960253685386103751dad1837fe410213 Mon Sep 17 00:00:00 2001 From: Alex Hoppen Date: Mon, 21 Mar 2022 23:41:26 +0100 Subject: [PATCH 40/88] [CodeCompletion] Make sawSolution non-final and override sawSolutionImpl from subclasses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This eliminates a source of bugs if subclasses of `TypeCheckCompletionCallback` forget to call the superclass’s implementation of `sawSolution` from their overridden method. --- include/swift/IDE/ArgumentCompletion.h | 4 ++-- include/swift/IDE/DotExprCompletion.h | 4 ++-- include/swift/IDE/ExprCompletion.h | 4 ++-- include/swift/IDE/KeyPathCompletion.h | 4 ++-- include/swift/IDE/TypeCheckCompletionCallback.h | 8 +++++++- include/swift/IDE/UnresolvedMemberCompletion.h | 4 ++-- lib/IDE/ArgumentCompletion.cpp | 4 +--- lib/IDE/DotExprCompletion.cpp | 3 +-- lib/IDE/ExprCompletion.cpp | 4 +--- lib/IDE/KeyPathCompletion.cpp | 4 +--- lib/IDE/UnresolvedMemberCompletion.cpp | 4 +--- 11 files changed, 22 insertions(+), 25 deletions(-) diff --git a/include/swift/IDE/ArgumentCompletion.h b/include/swift/IDE/ArgumentCompletion.h index ba3f365d147f6..46aa5aaaf77d4 100644 --- a/include/swift/IDE/ArgumentCompletion.h +++ b/include/swift/IDE/ArgumentCompletion.h @@ -63,13 +63,13 @@ class ArgumentTypeCheckCompletionCallback : public TypeCheckCompletionCallback { SmallVectorImpl &Params, SmallVectorImpl &Types); + void sawSolutionImpl(const constraints::Solution &solution) override; + public: ArgumentTypeCheckCompletionCallback(CodeCompletionExpr *CompletionExpr, DeclContext *DC) : CompletionExpr(CompletionExpr), DC(DC) {} - void sawSolution(const constraints::Solution &solution) override; - /// \param IncludeSignature Whether to include a suggestion for the entire /// function signature instead of suggesting individual labels. Used when /// completing after the opening '(' of a function call \param Loc The diff --git a/include/swift/IDE/DotExprCompletion.h b/include/swift/IDE/DotExprCompletion.h index 72e31481b37f4..338a6a8dca129 100644 --- a/include/swift/IDE/DotExprCompletion.h +++ b/include/swift/IDE/DotExprCompletion.h @@ -43,6 +43,8 @@ class DotExprTypeCheckCompletionCallback : public TypeCheckCompletionCallback { SmallVector Results; llvm::DenseMap, size_t> BaseToSolutionIdx; + void sawSolutionImpl(const constraints::Solution &solution) override; + public: DotExprTypeCheckCompletionCallback(CodeCompletionExpr *CompletionExpr, DeclContext *DC) @@ -52,8 +54,6 @@ class DotExprTypeCheckCompletionCallback : public TypeCheckCompletionCallback { /// \c sawSolution for each solution formed. void fallbackTypeCheck(DeclContext *DC) override; - void sawSolution(const constraints::Solution &solution) override; - void deliverResults(Expr *BaseExpr, DeclContext *DC, SourceLoc DotLoc, bool IsInSelector, CodeCompletionContext &CompletionCtx, CodeCompletionConsumer &Consumer); diff --git a/include/swift/IDE/ExprCompletion.h b/include/swift/IDE/ExprCompletion.h index f8475b2a37a90..b98294e3ff29f 100644 --- a/include/swift/IDE/ExprCompletion.h +++ b/include/swift/IDE/ExprCompletion.h @@ -46,14 +46,14 @@ class ExprTypeCheckCompletionCallback : public TypeCheckCompletionCallback { SmallVector Results; + void sawSolutionImpl(const constraints::Solution &solution) override; + public: /// \param DC The decl context in which the \p CompletionExpr occurs. ExprTypeCheckCompletionCallback(CodeCompletionExpr *CompletionExpr, DeclContext *DC) : CompletionExpr(CompletionExpr), DC(DC) {} - void sawSolution(const constraints::Solution &solution) override; - /// \param CCLoc The location of the code completion token. void deliverResults(SourceLoc CCLoc, ide::CodeCompletionContext &CompletionCtx, diff --git a/include/swift/IDE/KeyPathCompletion.h b/include/swift/IDE/KeyPathCompletion.h index cdd70e0f15502..b2f001de11cbc 100644 --- a/include/swift/IDE/KeyPathCompletion.h +++ b/include/swift/IDE/KeyPathCompletion.h @@ -32,11 +32,11 @@ class KeyPathTypeCheckCompletionCallback : public TypeCheckCompletionCallback { KeyPathExpr *KeyPath; SmallVector Results; + void sawSolutionImpl(const constraints::Solution &solution) override; + public: KeyPathTypeCheckCompletionCallback(KeyPathExpr *KeyPath) : KeyPath(KeyPath) {} - void sawSolution(const constraints::Solution &solution) override; - void deliverResults(DeclContext *DC, SourceLoc DotLoc, ide::CodeCompletionContext &CompletionCtx, CodeCompletionConsumer &Consumer); diff --git a/include/swift/IDE/TypeCheckCompletionCallback.h b/include/swift/IDE/TypeCheckCompletionCallback.h index de4d762fc0799..ff12d1c304b59 100644 --- a/include/swift/IDE/TypeCheckCompletionCallback.h +++ b/include/swift/IDE/TypeCheckCompletionCallback.h @@ -41,13 +41,19 @@ namespace ide { class TypeCheckCompletionCallback { bool GotCallback = false; +protected: + /// Subclasses of \c TypeCheckCompletionCallback handle solutions discovered + /// by the constraint system in this function + virtual void sawSolutionImpl(const constraints::Solution &solution) = 0; + public: virtual ~TypeCheckCompletionCallback() {} /// Called for each solution produced while type-checking an expression /// that the code completion expression participates in. - virtual void sawSolution(const constraints::Solution &solution) { + void sawSolution(const constraints::Solution &solution) { GotCallback = true; + sawSolutionImpl(solution); }; /// True if at least one solution was passed via the \c sawSolution diff --git a/include/swift/IDE/UnresolvedMemberCompletion.h b/include/swift/IDE/UnresolvedMemberCompletion.h index 94771a8f2655e..e39fc4f30034e 100644 --- a/include/swift/IDE/UnresolvedMemberCompletion.h +++ b/include/swift/IDE/UnresolvedMemberCompletion.h @@ -40,13 +40,13 @@ class UnresolvedMemberTypeCheckCompletionCallback SmallVector ExprResults; SmallVector EnumPatternTypes; + void sawSolutionImpl(const constraints::Solution &solution) override; + public: UnresolvedMemberTypeCheckCompletionCallback( CodeCompletionExpr *CompletionExpr, DeclContext *DC) : CompletionExpr(CompletionExpr), DC(DC) {} - void sawSolution(const constraints::Solution &solution) override; - void deliverResults(DeclContext *DC, SourceLoc DotLoc, ide::CodeCompletionContext &CompletionCtx, CodeCompletionConsumer &Consumer); diff --git a/lib/IDE/ArgumentCompletion.cpp b/lib/IDE/ArgumentCompletion.cpp index 432177a0dfdbf..7e49600a72b5d 100644 --- a/lib/IDE/ArgumentCompletion.cpp +++ b/lib/IDE/ArgumentCompletion.cpp @@ -90,9 +90,7 @@ bool ArgumentTypeCheckCompletionCallback::addPossibleParams( return ShowGlobalCompletions; } -void ArgumentTypeCheckCompletionCallback::sawSolution(const Solution &S) { - TypeCheckCompletionCallback::sawSolution(S); - +void ArgumentTypeCheckCompletionCallback::sawSolutionImpl(const Solution &S) { Type ExpectedTy = getTypeForCompletion(S, CompletionExpr); auto &CS = S.getConstraintSystem(); diff --git a/lib/IDE/DotExprCompletion.cpp b/lib/IDE/DotExprCompletion.cpp index 6865fef97ccdf..80eae8a74a45b 100644 --- a/lib/IDE/DotExprCompletion.cpp +++ b/lib/IDE/DotExprCompletion.cpp @@ -44,9 +44,8 @@ void DotExprTypeCheckCompletionCallback::fallbackTypeCheck(DeclContext *DC) { [&](const Solution &S) { sawSolution(S); }); } -void DotExprTypeCheckCompletionCallback::sawSolution( +void DotExprTypeCheckCompletionCallback::sawSolutionImpl( const constraints::Solution &S) { - TypeCheckCompletionCallback::sawSolution(S); auto &CS = S.getConstraintSystem(); auto *ParsedExpr = CompletionExpr->getBase(); auto *SemanticExpr = ParsedExpr->getSemanticsProvidingExpr(); diff --git a/lib/IDE/ExprCompletion.cpp b/lib/IDE/ExprCompletion.cpp index 59d4464d7f395..43ba84a7dedc4 100644 --- a/lib/IDE/ExprCompletion.cpp +++ b/lib/IDE/ExprCompletion.cpp @@ -19,10 +19,8 @@ using namespace swift; using namespace swift::ide; using namespace swift::constraints; -void ExprTypeCheckCompletionCallback::sawSolution( +void ExprTypeCheckCompletionCallback::sawSolutionImpl( const constraints::Solution &S) { - TypeCheckCompletionCallback::sawSolution(S); - auto &CS = S.getConstraintSystem(); Type ExpectedTy = getTypeForCompletion(S, CompletionExpr); diff --git a/lib/IDE/KeyPathCompletion.cpp b/lib/IDE/KeyPathCompletion.cpp index 2cc2944aa1874..1588bfbf837bc 100644 --- a/lib/IDE/KeyPathCompletion.cpp +++ b/lib/IDE/KeyPathCompletion.cpp @@ -19,10 +19,8 @@ using namespace swift; using namespace swift::constraints; using namespace swift::ide; -void KeyPathTypeCheckCompletionCallback::sawSolution( +void KeyPathTypeCheckCompletionCallback::sawSolutionImpl( const constraints::Solution &S) { - TypeCheckCompletionCallback::sawSolution(S); - // Determine the code completion. size_t ComponentIndex = 0; for (auto &Component : KeyPath->getComponents()) { diff --git a/lib/IDE/UnresolvedMemberCompletion.cpp b/lib/IDE/UnresolvedMemberCompletion.cpp index 3b96d2d79eaf0..0f6f4bb696bd3 100644 --- a/lib/IDE/UnresolvedMemberCompletion.cpp +++ b/lib/IDE/UnresolvedMemberCompletion.cpp @@ -80,10 +80,8 @@ static VarDecl *getMatchVarIfInPatternMatch(CodeCompletionExpr *CompletionExpr, } } -void UnresolvedMemberTypeCheckCompletionCallback::sawSolution( +void UnresolvedMemberTypeCheckCompletionCallback::sawSolutionImpl( const constraints::Solution &S) { - TypeCheckCompletionCallback::sawSolution(S); - auto &CS = S.getConstraintSystem(); Type ExpectedTy = getTypeForCompletion(S, CompletionExpr); From 045302b4cb153a27186b06973af315bc53e01e7a Mon Sep 17 00:00:00 2001 From: Alex Hoppen Date: Mon, 21 Mar 2022 12:57:02 +0100 Subject: [PATCH 41/88] [CodeCompletion] Filter overloads if their function application doesn't contain the code completion token When solving for code completion, we weren't disabling overloads because the call might be malfored in the presence of a code completion token (because the user is only now writing the function call). But this logic doesn't apply to function calls that don't even involve the code completion token, which happens if completing in result builders. I am hoping that this significantly improves code completion performance inside result builders. --- lib/Sema/CSSimplify.cpp | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/Sema/CSSimplify.cpp b/lib/Sema/CSSimplify.cpp index c7b1018c6c4df..b8bafe16e6aa5 100644 --- a/lib/Sema/CSSimplify.cpp +++ b/lib/Sema/CSSimplify.cpp @@ -10771,8 +10771,12 @@ bool ConstraintSystem::simplifyAppliedOverloadsImpl( // Don't attempt to filter overloads when solving for code completion // because presence of code completion token means that any call // could be malformed e.g. missing arguments e.g. `foo([.#^MEMBER^#` - if (isForCodeCompletion()) - return false; + if (isForCodeCompletion()) { + bool ArgContainsCCTypeVar = Type(argFnType).findIf(isCodeCompletionTypeVar); + if (ArgContainsCCTypeVar || isCodeCompletionTypeVar(fnTypeVar)) { + return false; + } + } if (shouldAttemptFixes()) { auto arguments = argFnType->getParams(); From 1c07c50cf8a19a856ffc2f7f4525574f2e5d7d72 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 22 Mar 2022 11:23:11 -0700 Subject: [PATCH 42/88] Fix a minor (1 per process) leak in AccessEnforcementSelection This fixes a commit from a few days ago where I meant to call unique_ptr::reset() instead of unique_ptr::release() and apparently didn't notice the compiler warning. --- lib/SILOptimizer/Mandatory/AccessEnforcementSelection.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/SILOptimizer/Mandatory/AccessEnforcementSelection.cpp b/lib/SILOptimizer/Mandatory/AccessEnforcementSelection.cpp index 7e395bfbe6767..191d612a2e1dc 100644 --- a/lib/SILOptimizer/Mandatory/AccessEnforcementSelection.cpp +++ b/lib/SILOptimizer/Mandatory/AccessEnforcementSelection.cpp @@ -614,7 +614,7 @@ void AccessEnforcementSelection::run() { closureOrder.compute(); dynamicCaptures = std::make_unique(closureOrder); - SWIFT_DEFER { dynamicCaptures.release(); }; + SWIFT_DEFER { dynamicCaptures.reset(); }; for (SILFunction *function : closureOrder.getTopDownFunctions()) { this->processFunction(function); From b373bd62de4290b5b70536615d67a71e70a77be9 Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Tue, 22 Mar 2022 15:01:26 -0400 Subject: [PATCH 43/88] Sema: Don't verify protocol requirement signatures if there was an error --- lib/Sema/TypeCheckDeclPrimary.cpp | 47 ++++++++++++++++--------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/lib/Sema/TypeCheckDeclPrimary.cpp b/lib/Sema/TypeCheckDeclPrimary.cpp index 3d80fe0add0f0..b00004fda1288 100644 --- a/lib/Sema/TypeCheckDeclPrimary.cpp +++ b/lib/Sema/TypeCheckDeclPrimary.cpp @@ -2653,7 +2653,7 @@ class DeclChecker : public DeclVisitor { // Do this before visiting members, to avoid a request cycle if // a member referenecs another declaration whose generic signature // has a conformance requirement to this protocol. - auto reqSig = PD->getRequirementSignature().getRequirements(); + auto reqSig = PD->getRequirementSignature(); // Check the members. for (auto Member : PD->getMembers()) @@ -2669,8 +2669,9 @@ class DeclChecker : public DeclVisitor { TypeChecker::inferDefaultWitnesses(PD); if (PD->getASTContext().TypeCheckerOpts.DebugGenericSignatures) { - auto requirementsSig = - GenericSignature::get({PD->getProtocolSelfType()}, reqSig); + auto sig = + GenericSignature::get({PD->getProtocolSelfType()}, + reqSig.getRequirements()); llvm::errs() << "\n"; llvm::errs() << "Protocol requirement signature:\n"; @@ -2679,34 +2680,34 @@ class DeclChecker : public DeclVisitor { llvm::errs() << "Requirement signature: "; PrintOptions Opts; Opts.ProtocolQualifiedDependentMemberTypes = true; - requirementsSig->print(llvm::errs(), Opts); + sig->print(llvm::errs(), Opts); llvm::errs() << "\n"; llvm::errs() << "Canonical requirement signature: "; - auto canRequirementSig = - CanGenericSignature::getCanonical(requirementsSig.getGenericParams(), - requirementsSig.getRequirements()); - canRequirementSig->print(llvm::errs(), Opts); + auto canSig = + CanGenericSignature::getCanonical(sig.getGenericParams(), + sig.getRequirements()); + canSig->print(llvm::errs(), Opts); llvm::errs() << "\n"; } - if (getASTContext().LangOpts.RequirementMachineProtocolSignatures == - RequirementMachineMode::Disabled) { - #ifndef NDEBUG - // The GenericSignatureBuilder outputs incorrectly-minimized signatures - // sometimes, so only check invariants in asserts builds. - PD->getGenericSignature().verify(reqSig); - #endif - } else { - // When using the Requirement Machine, always verify signatures. - // An incorrect signature indicates a serious problem which can cause - // miscompiles or inadvertent ABI dependencies on compiler bugs, so - // we really want to avoid letting one slip by. - PD->getGenericSignature().verify(reqSig); + if (!reqSig.getErrors()) { + if (getASTContext().LangOpts.RequirementMachineProtocolSignatures == + RequirementMachineMode::Disabled) { + #ifndef NDEBUG + // The GenericSignatureBuilder outputs incorrectly-minimized signatures + // sometimes, so only check invariants in asserts builds. + PD->getGenericSignature().verify(reqSig.getRequirements()); + #endif + } else { + // When using the Requirement Machine, always verify signatures. + // An incorrect signature indicates a serious problem which can cause + // miscompiles or inadvertent ABI dependencies on compiler bugs, so + // we really want to avoid letting one slip by. + PD->getGenericSignature().verify(reqSig.getRequirements()); + } } - (void) reqSig; - checkExplicitAvailability(PD); } From 4069434100120c9d7c1a5bed4edf7ce572fde78b Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Mon, 21 Mar 2022 17:42:38 -0400 Subject: [PATCH 44/88] RequirementMachine: Preserve sugar when splitting concrete equivalence classes Instead of kicking off an AbstractGenericSignatureRequest recursively, handle the rebuilding in a loop in {Abstract,Inferred}GenericSignatureRequest. This also avoids an unnecessary call to verify() when rebuilding. --- .../RequirementMachineRequests.cpp | 167 +++++++++--------- test/Generics/reconstitute_sugar.swift | 14 ++ 2 files changed, 102 insertions(+), 79 deletions(-) diff --git a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp index 2f14a20e4355f..f0221cf7ca3b8 100644 --- a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp +++ b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp @@ -254,6 +254,10 @@ static bool shouldSplitConcreteEquivalenceClass(Requirement req, sig->isConcreteType(req.getSecondType())); } +/// Returns true if this generic signature contains abstract same-type +/// requirements between concrete type parameters. In this case, we split +/// the abstract same-type requirements into pairs of concrete type +/// requirements, and minimize the signature again. static bool shouldSplitConcreteEquivalenceClasses(GenericSignature sig) { for (auto req : sig.getRequirements()) { if (shouldSplitConcreteEquivalenceClass(req, sig)) @@ -263,9 +267,16 @@ static bool shouldSplitConcreteEquivalenceClasses(GenericSignature sig) { return false; } -static GenericSignature splitConcreteEquivalenceClasses( - GenericSignature sig, ASTContext &ctx) { - SmallVector reqs; +/// Replace each same-type requirement 'T == U' where 'T' (and therefore 'U') +/// is known to equal a concrete type 'C' with a pair of requirements +/// 'T == C' and 'U == C'. We build the signature again in this case, since +/// one of the two requirements will be redundant, but we don't know which +/// ahead of time. +static void splitConcreteEquivalenceClasses( + ASTContext &ctx, + GenericSignature sig, + SmallVectorImpl &requirements) { + requirements.clear(); for (auto req : sig.getRequirements()) { if (shouldSplitConcreteEquivalenceClass(req, sig)) { @@ -273,27 +284,17 @@ static GenericSignature splitConcreteEquivalenceClasses( sig.getCanonicalTypeInContext( req.getSecondType())); - reqs.emplace_back(RequirementKind::SameType, - req.getFirstType(), - canType); - reqs.emplace_back(RequirementKind::SameType, - req.getSecondType(), - canType); - } else { - reqs.push_back(req); + Requirement firstReq(RequirementKind::SameType, + req.getFirstType(), canType); + Requirement secondReq(RequirementKind::SameType, + req.getSecondType(), canType); + requirements.push_back({firstReq, SourceLoc(), /*inferred=*/false}); + requirements.push_back({secondReq, SourceLoc(), /*inferred=*/false}); + continue; } - } - - SmallVector genericParams; - genericParams.append(sig.getGenericParams().begin(), - sig.getGenericParams().end()); - return evaluateOrDefault( - ctx.evaluator, - AbstractGenericSignatureRequestRQM{ - /*baseSignature=*/nullptr, - genericParams, reqs}, - GenericSignatureWithError()).getPointer(); + requirements.push_back({req, SourceLoc(), /*inferred=*/false}); + } } GenericSignatureWithError @@ -427,32 +428,36 @@ AbstractGenericSignatureRequestRQM::evaluate( } } - // Heap-allocate the requirement machine to save stack space. - std::unique_ptr machine(new RequirementMachine( - ctx.getRewriteContext())); + for (;;) { + // Heap-allocate the requirement machine to save stack space. + std::unique_ptr machine(new RequirementMachine( + ctx.getRewriteContext())); - auto status = - machine->initWithWrittenRequirements(genericParams, requirements); - machine->checkCompletionResult(status.first); + auto status = + machine->initWithWrittenRequirements(genericParams, requirements); + machine->checkCompletionResult(status.first); - // We pass reconstituteSugar=false to ensure that if the original - // requirements were canonical, the final signature remains canonical. - auto minimalRequirements = - machine->computeMinimalGenericSignatureRequirements( - /*reconstituteSugar=*/false); + // We pass reconstituteSugar=false to ensure that if the original + // requirements were canonical, the final signature remains canonical. + auto minimalRequirements = + machine->computeMinimalGenericSignatureRequirements( + /*reconstituteSugar=*/false); - auto result = GenericSignature::get(genericParams, minimalRequirements); - auto errorFlags = machine->getErrors(); + auto result = GenericSignature::get(genericParams, minimalRequirements); + auto errorFlags = machine->getErrors(); - if (!errorFlags) { - if (shouldSplitConcreteEquivalenceClasses(result)) - result = splitConcreteEquivalenceClasses(result, ctx); + if (!errorFlags) { + if (shouldSplitConcreteEquivalenceClasses(result)) { + splitConcreteEquivalenceClasses(ctx, result, requirements); + continue; + } - // Check invariants. - result.verify(); - } + // Check invariants. + result.verify(); + } - return GenericSignatureWithError(result, errorFlags); + return GenericSignatureWithError(result, errorFlags); + } } GenericSignatureWithError @@ -563,51 +568,55 @@ InferredGenericSignatureRequestRQM::evaluate( } } - // Heap-allocate the requirement machine to save stack space. - std::unique_ptr machine(new RequirementMachine( - ctx.getRewriteContext())); + for (;;) { + // Heap-allocate the requirement machine to save stack space. + std::unique_ptr machine(new RequirementMachine( + ctx.getRewriteContext())); - auto status = - machine->initWithWrittenRequirements(genericParams, requirements); - if (status.first != CompletionResult::Success) { - ctx.Diags.diagnose(loc, - diag::requirement_machine_completion_failed, - /*protocol=*/0, - unsigned(status.first)); + auto status = + machine->initWithWrittenRequirements(genericParams, requirements); + if (status.first != CompletionResult::Success) { + ctx.Diags.diagnose(loc, + diag::requirement_machine_completion_failed, + /*protocol=*/0, + unsigned(status.first)); - auto rule = machine->getRuleAsStringForDiagnostics(status.second); - ctx.Diags.diagnose(loc, - diag::requirement_machine_completion_rule, - rule); + auto rule = machine->getRuleAsStringForDiagnostics(status.second); + ctx.Diags.diagnose(loc, + diag::requirement_machine_completion_rule, + rule); - auto result = GenericSignature::get(genericParams, - parentSig.getRequirements()); - return GenericSignatureWithError( - result, GenericSignatureErrorFlags::CompletionFailed); - } + auto result = GenericSignature::get(genericParams, + parentSig.getRequirements()); + return GenericSignatureWithError( + result, GenericSignatureErrorFlags::CompletionFailed); + } - auto minimalRequirements = - machine->computeMinimalGenericSignatureRequirements( - /*reconstituteSugar=*/true); + auto minimalRequirements = + machine->computeMinimalGenericSignatureRequirements( + /*reconstituteSugar=*/true); - auto result = GenericSignature::get(genericParams, minimalRequirements); - auto errorFlags = machine->getErrors(); + auto result = GenericSignature::get(genericParams, minimalRequirements); + auto errorFlags = machine->getErrors(); - if (ctx.LangOpts.RequirementMachineInferredSignatures == - RequirementMachineMode::Enabled) { - machine->System.computeRedundantRequirementDiagnostics(errors); - diagnoseRequirementErrors(ctx, errors, allowConcreteGenericParams); - } + if (ctx.LangOpts.RequirementMachineInferredSignatures == + RequirementMachineMode::Enabled) { + machine->System.computeRedundantRequirementDiagnostics(errors); + diagnoseRequirementErrors(ctx, errors, allowConcreteGenericParams); + } - // FIXME: Handle allowConcreteGenericParams + // FIXME: Handle allowConcreteGenericParams - if (!errorFlags) { - if (shouldSplitConcreteEquivalenceClasses(result)) - result = splitConcreteEquivalenceClasses(result, ctx); + if (!errorFlags) { + if (shouldSplitConcreteEquivalenceClasses(result)) { + splitConcreteEquivalenceClasses(ctx, result, requirements); + continue; + } - // Check invariants. - result.verify(); - } + // Check invariants. + result.verify(); + } - return GenericSignatureWithError(result, errorFlags); + return GenericSignatureWithError(result, errorFlags); + } } diff --git a/test/Generics/reconstitute_sugar.swift b/test/Generics/reconstitute_sugar.swift index 77158c184820d..1a08c9f252c38 100644 --- a/test/Generics/reconstitute_sugar.swift +++ b/test/Generics/reconstitute_sugar.swift @@ -45,3 +45,17 @@ extension G where X : C> {} // CHECK-LABEL: ExtensionDecl line={{.*}} base=G // CHECK-NEXT: Generic signature: > extension G where X : C<()> {} + +// Make sure we reconstitute sugar when splitting concrete +// equivalence classes too. + +protocol P { + associatedtype T where T == [U] + associatedtype U +} + +struct G2 {} + +// CHECK-LABEL: ExtensionDecl line={{.*}} base=G2 +// CHECK-NEXT: Generic signature: +extension G2 where T2.U == [Int], T1.T == T2.T {} \ No newline at end of file From 9ccdd15d586707bde6141aad3ae66dd2a67eadb1 Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Mon, 21 Mar 2022 18:01:04 -0400 Subject: [PATCH 45/88] RequirementMachine: Add upper bound on number of attempts at splitting concrete equivalence classes --- include/swift/Basic/LangOptions.h | 4 ++++ include/swift/Option/FrontendOptions.td | 6 +++++ .../RequirementMachineRequests.cpp | 24 +++++++++++++++---- lib/Frontend/CompilerInvocation.cpp | 11 +++++++++ 4 files changed, 41 insertions(+), 4 deletions(-) diff --git a/include/swift/Basic/LangOptions.h b/include/swift/Basic/LangOptions.h index 9c147c093bcff..f1f29e4c57a55 100644 --- a/include/swift/Basic/LangOptions.h +++ b/include/swift/Basic/LangOptions.h @@ -525,6 +525,10 @@ namespace swift { /// algorithm. unsigned RequirementMachineMaxConcreteNesting = 30; + /// Maximum number of attempts to make when splitting concrete equivalence + /// classes. + unsigned RequirementMachineMaxSplitConcreteEquivClassAttempts = 2; + /// Enable the new experimental protocol requirement signature minimization /// algorithm. RequirementMachineMode RequirementMachineProtocolSignatures = diff --git a/include/swift/Option/FrontendOptions.td b/include/swift/Option/FrontendOptions.td index 4f1f9d2fef01d..636fd9851ec34 100644 --- a/include/swift/Option/FrontendOptions.td +++ b/include/swift/Option/FrontendOptions.td @@ -355,6 +355,12 @@ def requirement_machine_max_concrete_nesting : Joined<["-"], "requirement-machin Flags<[FrontendOption, HelpHidden, DoesNotAffectIncrementalBuild]>, HelpText<"Set the maximum concrete type nesting depth before giving up">; +def requirement_machine_max_split_concrete_equiv_class_attempts : Joined<["-"], "requirement-machine-max-split-concrete-equiv-class-attempts=">, + Flags<[FrontendOption, HelpHidden, DoesNotAffectIncrementalBuild]>, + HelpText<"Set the maximum concrete number of attempts at splitting " + "concrete equivalence classes before giving up. There should " + "never be a reason to change this">; + def disable_requirement_machine_concrete_contraction : Flag<["-"], "disable-requirement-machine-concrete-contraction">, HelpText<"Disable preprocessing pass to eliminate conformance requirements " "on generic parameters which are made concrete">; diff --git a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp index f0221cf7ca3b8..12786bf71fc13 100644 --- a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp +++ b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp @@ -275,7 +275,19 @@ static bool shouldSplitConcreteEquivalenceClasses(GenericSignature sig) { static void splitConcreteEquivalenceClasses( ASTContext &ctx, GenericSignature sig, - SmallVectorImpl &requirements) { + SmallVectorImpl &requirements, + unsigned &attempt) { + unsigned maxAttempts = + ctx.LangOpts.RequirementMachineMaxSplitConcreteEquivClassAttempts; + + ++attempt; + if (attempt >= maxAttempts) { + llvm::errs() << "Splitting concrete equivalence classes did not " + << "reach fixed point after " << attempt << " attempts.\n"; + llvm::errs() << "Last result: " << sig << "\n"; + abort(); + } + requirements.clear(); for (auto req : sig.getRequirements()) { @@ -428,6 +440,7 @@ AbstractGenericSignatureRequestRQM::evaluate( } } + unsigned attempt = 0; for (;;) { // Heap-allocate the requirement machine to save stack space. std::unique_ptr machine(new RequirementMachine( @@ -448,7 +461,7 @@ AbstractGenericSignatureRequestRQM::evaluate( if (!errorFlags) { if (shouldSplitConcreteEquivalenceClasses(result)) { - splitConcreteEquivalenceClasses(ctx, result, requirements); + splitConcreteEquivalenceClasses(ctx, result, requirements, attempt); continue; } @@ -568,6 +581,7 @@ InferredGenericSignatureRequestRQM::evaluate( } } + unsigned attempt = 0; for (;;) { // Heap-allocate the requirement machine to save stack space. std::unique_ptr machine(new RequirementMachine( @@ -599,7 +613,8 @@ InferredGenericSignatureRequestRQM::evaluate( auto result = GenericSignature::get(genericParams, minimalRequirements); auto errorFlags = machine->getErrors(); - if (ctx.LangOpts.RequirementMachineInferredSignatures == + if (attempt == 0 && + ctx.LangOpts.RequirementMachineInferredSignatures == RequirementMachineMode::Enabled) { machine->System.computeRedundantRequirementDiagnostics(errors); diagnoseRequirementErrors(ctx, errors, allowConcreteGenericParams); @@ -608,8 +623,9 @@ InferredGenericSignatureRequestRQM::evaluate( // FIXME: Handle allowConcreteGenericParams if (!errorFlags) { + // Check if we need to rebuild the signature. if (shouldSplitConcreteEquivalenceClasses(result)) { - splitConcreteEquivalenceClasses(ctx, result, requirements); + splitConcreteEquivalenceClasses(ctx, result, requirements, attempt); continue; } diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp index 8f6978be44907..22e814c310295 100644 --- a/lib/Frontend/CompilerInvocation.cpp +++ b/lib/Frontend/CompilerInvocation.cpp @@ -998,6 +998,17 @@ static bool ParseLangArgs(LangOptions &Opts, ArgList &Args, } } + if (const Arg *A = Args.getLastArg(OPT_requirement_machine_max_split_concrete_equiv_class_attempts)) { + unsigned limit; + if (StringRef(A->getValue()).getAsInteger(10, limit)) { + Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value, + A->getAsString(Args), A->getValue()); + HadError = true; + } else { + Opts.RequirementMachineMaxSplitConcreteEquivClassAttempts = limit; + } + } + if (Args.hasArg(OPT_disable_requirement_machine_concrete_contraction)) Opts.EnableRequirementMachineConcreteContraction = false; From bf779d31a06155e7de8b43bbe7176e84c4101424 Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Mon, 21 Mar 2022 21:34:03 -0400 Subject: [PATCH 46/88] RequirementMachine: Allow query operations to be invoked on requirement machine instances for fresh signatures --- lib/AST/RequirementMachine/GenericSignatureQueries.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/AST/RequirementMachine/GenericSignatureQueries.cpp b/lib/AST/RequirementMachine/GenericSignatureQueries.cpp index c7f113ea14ffb..7b701d28eadd3 100644 --- a/lib/AST/RequirementMachine/GenericSignatureQueries.cpp +++ b/lib/AST/RequirementMachine/GenericSignatureQueries.cpp @@ -683,9 +683,11 @@ void RequirementMachine::verify(const MutableTerm &term) const { if (term.begin()->getKind() == Symbol::Kind::GenericParam) { auto *genericParam = term.begin()->getGenericParam(); TypeArrayView genericParams = getGenericParams(); - auto found = std::find(genericParams.begin(), - genericParams.end(), - genericParam); + auto found = std::find_if(genericParams.begin(), + genericParams.end(), + [&](GenericTypeParamType *otherType) { + return genericParam->isEqual(otherType); + }); if (found == genericParams.end()) { llvm::errs() << "Bad generic parameter in " << term << "\n"; dump(llvm::errs()); From 24b662427551671586dea334a54068470bd321e7 Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Tue, 22 Mar 2022 11:52:46 -0400 Subject: [PATCH 47/88] RequirementMachine: Move some code around in RequirementMachineRequests.cpp --- .../RequirementMachineRequests.cpp | 132 +++++++++--------- 1 file changed, 66 insertions(+), 66 deletions(-) diff --git a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp index 12786bf71fc13..d8f854e7734df 100644 --- a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp +++ b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp @@ -36,6 +36,72 @@ using namespace swift; using namespace rewriting; +/// Hack for GenericSignatureBuilder compatibility. We might end up with a +/// same-type requirement between type parameters where one of them has an +/// implied concrete type requirement. In this case, split it up into two +/// concrete type requirements. +static bool shouldSplitConcreteEquivalenceClass(Requirement req, + GenericSignature sig) { + return (req.getKind() == RequirementKind::SameType && + req.getSecondType()->isTypeParameter() && + sig->isConcreteType(req.getSecondType())); +} + +/// Returns true if this generic signature contains abstract same-type +/// requirements between concrete type parameters. In this case, we split +/// the abstract same-type requirements into pairs of concrete type +/// requirements, and minimize the signature again. +static bool shouldSplitConcreteEquivalenceClasses(GenericSignature sig) { + for (auto req : sig.getRequirements()) { + if (shouldSplitConcreteEquivalenceClass(req, sig)) + return true; + } + + return false; +} + +/// Replace each same-type requirement 'T == U' where 'T' (and therefore 'U') +/// is known to equal a concrete type 'C' with a pair of requirements +/// 'T == C' and 'U == C'. We build the signature again in this case, since +/// one of the two requirements will be redundant, but we don't know which +/// ahead of time. +static void splitConcreteEquivalenceClasses( + ASTContext &ctx, + GenericSignature sig, + SmallVectorImpl &requirements, + unsigned &attempt) { + unsigned maxAttempts = + ctx.LangOpts.RequirementMachineMaxSplitConcreteEquivClassAttempts; + + ++attempt; + if (attempt >= maxAttempts) { + llvm::errs() << "Splitting concrete equivalence classes did not " + << "reach fixed point after " << attempt << " attempts.\n"; + llvm::errs() << "Last result: " << sig << "\n"; + abort(); + } + + requirements.clear(); + + for (auto req : sig.getRequirements()) { + if (shouldSplitConcreteEquivalenceClass(req, sig)) { + auto canType = sig->getSugaredType( + sig.getCanonicalTypeInContext( + req.getSecondType())); + + Requirement firstReq(RequirementKind::SameType, + req.getFirstType(), canType); + Requirement secondReq(RequirementKind::SameType, + req.getSecondType(), canType); + requirements.push_back({firstReq, SourceLoc(), /*inferred=*/false}); + requirements.push_back({secondReq, SourceLoc(), /*inferred=*/false}); + continue; + } + + requirements.push_back({req, SourceLoc(), /*inferred=*/false}); + } +} + /// Builds the requirement signatures for each protocol in this strongly /// connected component. llvm::DenseMap @@ -243,72 +309,6 @@ static bool isCanonicalRequest(GenericSignature baseSignature, return true; } -/// Hack for GenericSignatureBuilder compatibility. We might end up with a -/// same-type requirement between type parameters where one of them has an -/// implied concrete type requirement. In this case, split it up into two -/// concrete type requirements. -static bool shouldSplitConcreteEquivalenceClass(Requirement req, - GenericSignature sig) { - return (req.getKind() == RequirementKind::SameType && - req.getSecondType()->isTypeParameter() && - sig->isConcreteType(req.getSecondType())); -} - -/// Returns true if this generic signature contains abstract same-type -/// requirements between concrete type parameters. In this case, we split -/// the abstract same-type requirements into pairs of concrete type -/// requirements, and minimize the signature again. -static bool shouldSplitConcreteEquivalenceClasses(GenericSignature sig) { - for (auto req : sig.getRequirements()) { - if (shouldSplitConcreteEquivalenceClass(req, sig)) - return true; - } - - return false; -} - -/// Replace each same-type requirement 'T == U' where 'T' (and therefore 'U') -/// is known to equal a concrete type 'C' with a pair of requirements -/// 'T == C' and 'U == C'. We build the signature again in this case, since -/// one of the two requirements will be redundant, but we don't know which -/// ahead of time. -static void splitConcreteEquivalenceClasses( - ASTContext &ctx, - GenericSignature sig, - SmallVectorImpl &requirements, - unsigned &attempt) { - unsigned maxAttempts = - ctx.LangOpts.RequirementMachineMaxSplitConcreteEquivClassAttempts; - - ++attempt; - if (attempt >= maxAttempts) { - llvm::errs() << "Splitting concrete equivalence classes did not " - << "reach fixed point after " << attempt << " attempts.\n"; - llvm::errs() << "Last result: " << sig << "\n"; - abort(); - } - - requirements.clear(); - - for (auto req : sig.getRequirements()) { - if (shouldSplitConcreteEquivalenceClass(req, sig)) { - auto canType = sig->getSugaredType( - sig.getCanonicalTypeInContext( - req.getSecondType())); - - Requirement firstReq(RequirementKind::SameType, - req.getFirstType(), canType); - Requirement secondReq(RequirementKind::SameType, - req.getSecondType(), canType); - requirements.push_back({firstReq, SourceLoc(), /*inferred=*/false}); - requirements.push_back({secondReq, SourceLoc(), /*inferred=*/false}); - continue; - } - - requirements.push_back({req, SourceLoc(), /*inferred=*/false}); - } -} - GenericSignatureWithError AbstractGenericSignatureRequestRQM::evaluate( Evaluator &evaluator, From 11b45ca2691506bc3b8cf00effea9029884d7419 Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Tue, 22 Mar 2022 12:17:13 -0400 Subject: [PATCH 48/88] RequirementMachine: splitConcreteEquivalenceClass() uses getConcreteType() instead of getCanonicalTypeInContext() --- lib/AST/RequirementMachine/RequirementMachineRequests.cpp | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp index d8f854e7734df..1c654c75278ea 100644 --- a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp +++ b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp @@ -85,14 +85,12 @@ static void splitConcreteEquivalenceClasses( for (auto req : sig.getRequirements()) { if (shouldSplitConcreteEquivalenceClass(req, sig)) { - auto canType = sig->getSugaredType( - sig.getCanonicalTypeInContext( - req.getSecondType())); + auto concreteType = sig->getConcreteType(req.getSecondType()); Requirement firstReq(RequirementKind::SameType, - req.getFirstType(), canType); + req.getFirstType(), concreteType); Requirement secondReq(RequirementKind::SameType, - req.getSecondType(), canType); + req.getSecondType(), concreteType); requirements.push_back({firstReq, SourceLoc(), /*inferred=*/false}); requirements.push_back({secondReq, SourceLoc(), /*inferred=*/false}); continue; From 466d6a946889c6f7190f65df2bdeb22dbb93fa6e Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Tue, 22 Mar 2022 12:17:15 -0400 Subject: [PATCH 49/88] RequirementMachine: Refactor shouldSplitConcreteEquivalenceClasses() and splitConcreteEquivalenceClasses() a bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Note that this changes the behavior of a test slightly when the -disable-concrete-contraction flag is used. This is because we're not using the Requirement Machine that minimized the signature and not the Requirement Machine built from the minimized signature; the former includes a concrete conformance rule. The isConcreteType() query returns true on the former when given the generic parameter τ_0_0. Since -disable-concrete-contraction is only meant for debugging, I'm just removing that line from the test. --- .../RequirementMachineRequests.cpp | 57 ++++++++++++------- ..._conforms_with_default_concrete_self.swift | 1 - 2 files changed, 37 insertions(+), 21 deletions(-) diff --git a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp index 1c654c75278ea..dde9c4c44737f 100644 --- a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp +++ b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp @@ -40,20 +40,23 @@ using namespace rewriting; /// same-type requirement between type parameters where one of them has an /// implied concrete type requirement. In this case, split it up into two /// concrete type requirements. -static bool shouldSplitConcreteEquivalenceClass(Requirement req, - GenericSignature sig) { +static bool shouldSplitConcreteEquivalenceClass( + Requirement req, + const RequirementMachine *machine) { return (req.getKind() == RequirementKind::SameType && req.getSecondType()->isTypeParameter() && - sig->isConcreteType(req.getSecondType())); + machine->isConcreteType(req.getSecondType())); } /// Returns true if this generic signature contains abstract same-type /// requirements between concrete type parameters. In this case, we split /// the abstract same-type requirements into pairs of concrete type /// requirements, and minimize the signature again. -static bool shouldSplitConcreteEquivalenceClasses(GenericSignature sig) { - for (auto req : sig.getRequirements()) { - if (shouldSplitConcreteEquivalenceClass(req, sig)) +static bool shouldSplitConcreteEquivalenceClasses( + ArrayRef requirements, + const RequirementMachine *machine) { + for (auto req : requirements) { + if (shouldSplitConcreteEquivalenceClass(req, machine)) return true; } @@ -67,8 +70,10 @@ static bool shouldSplitConcreteEquivalenceClasses(GenericSignature sig) { /// ahead of time. static void splitConcreteEquivalenceClasses( ASTContext &ctx, - GenericSignature sig, - SmallVectorImpl &requirements, + ArrayRef requirements, + const RequirementMachine *machine, + TypeArrayView genericParams, + SmallVectorImpl &splitRequirements, unsigned &attempt) { unsigned maxAttempts = ctx.LangOpts.RequirementMachineMaxSplitConcreteEquivClassAttempts; @@ -77,26 +82,32 @@ static void splitConcreteEquivalenceClasses( if (attempt >= maxAttempts) { llvm::errs() << "Splitting concrete equivalence classes did not " << "reach fixed point after " << attempt << " attempts.\n"; - llvm::errs() << "Last result: " << sig << "\n"; + llvm::errs() << "Last attempt produced these requirements:\n"; + for (auto req : requirements) { + req.dump(llvm::errs()); + llvm::errs() << "\n"; + } + machine->dump(llvm::errs()); abort(); } - requirements.clear(); + splitRequirements.clear(); - for (auto req : sig.getRequirements()) { - if (shouldSplitConcreteEquivalenceClass(req, sig)) { - auto concreteType = sig->getConcreteType(req.getSecondType()); + for (auto req : requirements) { + if (shouldSplitConcreteEquivalenceClass(req, machine)) { + auto concreteType = machine->getConcreteType( + req.getSecondType(), genericParams); Requirement firstReq(RequirementKind::SameType, req.getFirstType(), concreteType); Requirement secondReq(RequirementKind::SameType, req.getSecondType(), concreteType); - requirements.push_back({firstReq, SourceLoc(), /*inferred=*/false}); - requirements.push_back({secondReq, SourceLoc(), /*inferred=*/false}); + splitRequirements.push_back({firstReq, SourceLoc(), /*inferred=*/false}); + splitRequirements.push_back({secondReq, SourceLoc(), /*inferred=*/false}); continue; } - requirements.push_back({req, SourceLoc(), /*inferred=*/false}); + splitRequirements.push_back({req, SourceLoc(), /*inferred=*/false}); } } @@ -458,8 +469,11 @@ AbstractGenericSignatureRequestRQM::evaluate( auto errorFlags = machine->getErrors(); if (!errorFlags) { - if (shouldSplitConcreteEquivalenceClasses(result)) { - splitConcreteEquivalenceClasses(ctx, result, requirements, attempt); + if (shouldSplitConcreteEquivalenceClasses(result.getRequirements(), + machine.get())) { + splitConcreteEquivalenceClasses(ctx, result.getRequirements(), + machine.get(), result.getGenericParams(), + requirements, attempt); continue; } @@ -622,8 +636,11 @@ InferredGenericSignatureRequestRQM::evaluate( if (!errorFlags) { // Check if we need to rebuild the signature. - if (shouldSplitConcreteEquivalenceClasses(result)) { - splitConcreteEquivalenceClasses(ctx, result, requirements, attempt); + if (shouldSplitConcreteEquivalenceClasses(result.getRequirements(), + machine.get())) { + splitConcreteEquivalenceClasses(ctx, result.getRequirements(), + machine.get(), result.getGenericParams(), + requirements, attempt); continue; } diff --git a/test/SILGen/class_conforms_with_default_concrete_self.swift b/test/SILGen/class_conforms_with_default_concrete_self.swift index e779f24bdc680..ae216dc88de94 100644 --- a/test/SILGen/class_conforms_with_default_concrete_self.swift +++ b/test/SILGen/class_conforms_with_default_concrete_self.swift @@ -1,5 +1,4 @@ // RUN: %target-swift-emit-silgen %s -requirement-machine-abstract-signatures=on | %FileCheck %s -// RUN: %target-swift-emit-silgen %s -requirement-machine-abstract-signatures=on -disable-requirement-machine-concrete-contraction | %FileCheck %s public protocol P { associatedtype A : Q where A.B == Self From 4446f2afcf2c11d4e61d5b41365699e2d356b014 Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Mon, 21 Mar 2022 21:54:05 -0400 Subject: [PATCH 50/88] RequirementMachine: Move some code out of RuleBuilder and into RequirementSignatureRequest --- .../RequirementMachine/RequirementMachine.cpp | 10 ++++++---- lib/AST/RequirementMachine/RequirementMachine.h | 4 +++- .../RequirementMachineRequests.cpp | 17 ++++++++++++++--- lib/AST/RequirementMachine/RuleBuilder.cpp | 17 ++++++++++------- lib/AST/RequirementMachine/RuleBuilder.h | 5 ++++- 5 files changed, 37 insertions(+), 16 deletions(-) diff --git a/lib/AST/RequirementMachine/RequirementMachine.cpp b/lib/AST/RequirementMachine/RequirementMachine.cpp index 672a90d453de2..e1c99e5d7966a 100644 --- a/lib/AST/RequirementMachine/RequirementMachine.cpp +++ b/lib/AST/RequirementMachine/RequirementMachine.cpp @@ -167,22 +167,24 @@ RequirementMachine::initWithGenericSignature(CanGenericSignature sig) { /// Returns failure if completion fails within the configured number of steps. std::pair RequirementMachine::initWithProtocolWrittenRequirements( - ArrayRef protos) { + ArrayRef component, + const llvm::DenseMap> protos) { FrontendStatsTracer tracer(Stats, "build-rewrite-system"); if (Dump) { llvm::dbgs() << "Adding protocols"; - for (auto *proto : protos) { + for (auto *proto : component) { llvm::dbgs() << " " << proto->getName(); } llvm::dbgs() << " {\n"; } RuleBuilder builder(Context, System.getReferencedProtocols()); - builder.initWithProtocolWrittenRequirements(protos); + builder.initWithProtocolWrittenRequirements(component, protos); // Add the initial set of rewrite rules to the rewrite system. - System.initialize(/*recordLoops=*/true, protos, + System.initialize(/*recordLoops=*/true, component, std::move(builder.WrittenRequirements), std::move(builder.ImportedRules), std::move(builder.PermanentRules), diff --git a/lib/AST/RequirementMachine/RequirementMachine.h b/lib/AST/RequirementMachine/RequirementMachine.h index a224eaed7ef7b..e10cf0c7fe33b 100644 --- a/lib/AST/RequirementMachine/RequirementMachine.h +++ b/lib/AST/RequirementMachine/RequirementMachine.h @@ -99,7 +99,9 @@ class RequirementMachine final { std::pair initWithProtocolWrittenRequirements( - ArrayRef protos); + ArrayRef component, + const llvm::DenseMap> protos); std::pair initWithWrittenRequirements( diff --git a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp index dde9c4c44737f..61bc71ab55b94 100644 --- a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp +++ b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp @@ -185,13 +185,23 @@ RequirementSignatureRequestRQM::evaluate(Evaluator &evaluator, // component at the same time. auto component = ctx.getRewriteContext().getProtocolComponent(proto); + // Collect user-written requirements from the protocols in this connected + // component. + llvm::DenseMap> protos; + for (const auto *proto : component) { + auto &requirements = protos[proto]; + for (auto req : proto->getStructuralRequirements()) + requirements.push_back(req); + for (auto req : proto->getTypeAliasRequirements()) + requirements.push_back({req, SourceLoc(), /*inferred=*/false}); + } + // Heap-allocate the requirement machine to save stack space. std::unique_ptr machine(new RequirementMachine( ctx.getRewriteContext())); - SmallVector errors; - - auto status = machine->initWithProtocolWrittenRequirements(component); + auto status = machine->initWithProtocolWrittenRequirements(component, protos); if (status.first != CompletionResult::Success) { // All we can do at this point is diagnose and give each protocol an empty // requirement signature. @@ -260,6 +270,7 @@ RequirementSignatureRequestRQM::evaluate(Evaluator &evaluator, if (ctx.LangOpts.RequirementMachineProtocolSignatures == RequirementMachineMode::Enabled) { + SmallVector errors; machine->System.computeRedundantRequirementDiagnostics(errors); diagnoseRequirementErrors(ctx, errors, /*allowConcreteGenericParams=*/false); diff --git a/lib/AST/RequirementMachine/RuleBuilder.cpp b/lib/AST/RequirementMachine/RuleBuilder.cpp index 6c03267723ac9..68a9ee4e1bba4 100644 --- a/lib/AST/RequirementMachine/RuleBuilder.cpp +++ b/lib/AST/RequirementMachine/RuleBuilder.cpp @@ -123,28 +123,31 @@ void RuleBuilder::initWithProtocolSignatureRequirements( /// user-written requirements. Used when actually building requirement /// signatures. void RuleBuilder::initWithProtocolWrittenRequirements( - ArrayRef protos) { + ArrayRef component, + const llvm::DenseMap> protos) { assert(!Initialized); Initialized = 1; // Add all protocols to the referenced set, so that subsequent calls // to addReferencedProtocol() with one of these protocols don't add // them to the import list. - for (auto *proto : protos) { + for (const auto *proto : component) ReferencedProtocols.insert(proto); - } - for (auto *proto : protos) { + for (const auto *proto : component) { + auto found = protos.find(proto); + assert(found != protos.end()); + const auto &reqs = found->second; + if (Dump) { llvm::dbgs() << "protocol " << proto->getName() << " {\n"; } addPermanentProtocolRules(proto); - for (auto req : proto->getStructuralRequirements()) + for (auto req : reqs) addRequirement(req, proto); - for (auto req : proto->getTypeAliasRequirements()) - addRequirement(req.getCanonical(), proto); for (auto *otherProto : proto->getProtocolDependencies()) addReferencedProtocol(otherProto); diff --git a/lib/AST/RequirementMachine/RuleBuilder.h b/lib/AST/RequirementMachine/RuleBuilder.h index 97ac2bf672b07..f5d7efa5ae8d1 100644 --- a/lib/AST/RequirementMachine/RuleBuilder.h +++ b/lib/AST/RequirementMachine/RuleBuilder.h @@ -98,7 +98,10 @@ struct RuleBuilder { void initWithGenericSignatureRequirements(ArrayRef requirements); void initWithWrittenRequirements(ArrayRef requirements); void initWithProtocolSignatureRequirements(ArrayRef proto); - void initWithProtocolWrittenRequirements(ArrayRef proto); + void initWithProtocolWrittenRequirements( + ArrayRef component, + const llvm::DenseMap> protos); void initWithConditionalRequirements(ArrayRef requirements, ArrayRef substitutions); void addReferencedProtocol(const ProtocolDecl *proto); From ff40f109ca4f36b5fa761a04316f0184694272fc Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Mon, 21 Mar 2022 22:13:52 -0400 Subject: [PATCH 51/88] RequirementMachine: Allow RequirementMachine::isConcreteType() and ::getCanonicalTypeInContext() to be used with protocol connected components --- .../GenericSignatureQueries.cpp | 18 ++++++++++++++---- .../RequirementMachine/RequirementMachine.cpp | 4 ++++ .../RequirementMachine/RequirementMachine.h | 6 ++++-- .../RequirementMachineRequests.cpp | 2 -- 4 files changed, 22 insertions(+), 8 deletions(-) diff --git a/lib/AST/RequirementMachine/GenericSignatureQueries.cpp b/lib/AST/RequirementMachine/GenericSignatureQueries.cpp index 7b701d28eadd3..01ff2534870bf 100644 --- a/lib/AST/RequirementMachine/GenericSignatureQueries.cpp +++ b/lib/AST/RequirementMachine/GenericSignatureQueries.cpp @@ -156,9 +156,14 @@ getSuperclassBound(Type depType, return props->getSuperclassBound(genericParams, term, Map); } -bool RequirementMachine::isConcreteType(Type depType) const { +/// Unlike the other queries, we have occasion to call this on a requirement +/// machine for a protocol connected component as well as a top-level +/// generic signature, so plumb through the protocol to use for the root +/// `Self` generic parameter here. +bool RequirementMachine::isConcreteType(Type depType, + const ProtocolDecl *proto) const { auto term = Context.getMutableTermForType(depType->getCanonicalType(), - /*proto=*/nullptr); + proto); System.simplify(term); verify(term); @@ -169,11 +174,16 @@ bool RequirementMachine::isConcreteType(Type depType) const { return props->isConcreteType(); } +/// Unlike the other queries, we have occasion to call this on a requirement +/// machine for a protocol connected component as well as a top-level +/// generic signature, so plumb through the protocol to use for the root +/// `Self` generic parameter here. Type RequirementMachine:: getConcreteType(Type depType, - TypeArrayView genericParams) const { + TypeArrayView genericParams, + const ProtocolDecl *proto) const { auto term = Context.getMutableTermForType(depType->getCanonicalType(), - /*proto=*/nullptr); + proto); System.simplify(term); verify(term); diff --git a/lib/AST/RequirementMachine/RequirementMachine.cpp b/lib/AST/RequirementMachine/RequirementMachine.cpp index e1c99e5d7966a..ca75428d9054f 100644 --- a/lib/AST/RequirementMachine/RequirementMachine.cpp +++ b/lib/AST/RequirementMachine/RequirementMachine.cpp @@ -172,6 +172,10 @@ RequirementMachine::initWithProtocolWrittenRequirements( SmallVector> protos) { FrontendStatsTracer tracer(Stats, "build-rewrite-system"); + // For RequirementMachine::verify() when called by generic signature queries; + // We have a single valid generic parameter at depth 0, index 0. + Params.push_back(component[0]->getSelfInterfaceType()->getCanonicalType()); + if (Dump) { llvm::dbgs() << "Adding protocols"; for (auto *proto : component) { diff --git a/lib/AST/RequirementMachine/RequirementMachine.h b/lib/AST/RequirementMachine/RequirementMachine.h index e10cf0c7fe33b..78b0f7dfa81ff 100644 --- a/lib/AST/RequirementMachine/RequirementMachine.h +++ b/lib/AST/RequirementMachine/RequirementMachine.h @@ -142,9 +142,11 @@ class RequirementMachine final { GenericSignature::RequiredProtocols getRequiredProtocols(Type depType) const; Type getSuperclassBound(Type depType, TypeArrayView genericParams) const; - bool isConcreteType(Type depType) const; + bool isConcreteType(Type depType, + const ProtocolDecl *proto=nullptr) const; Type getConcreteType(Type depType, - TypeArrayView genericParams) const; + TypeArrayView genericParams, + const ProtocolDecl *proto=nullptr) const; bool areSameTypeParameterInContext(Type depType1, Type depType2) const; bool isCanonicalTypeInContext(Type type) const; Type getCanonicalTypeInContext(Type type, diff --git a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp index 61bc71ab55b94..20dd34a4bec7d 100644 --- a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp +++ b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp @@ -119,8 +119,6 @@ RequirementMachine::computeMinimalProtocolRequirements() { assert(protos.size() > 0 && "Not a protocol connected component rewrite system"); - assert(Params.empty() && - "Not a protocol connected component rewrite system"); System.minimizeRewriteSystem(); From 441fa1679a1b2643a4d80fdeff0ec50b7b1e98d2 Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Tue, 22 Mar 2022 13:10:04 -0400 Subject: [PATCH 52/88] RequirementMachine: Splitting concrete equivalence classes in protocol requirement signatures --- .../RequirementMachineRequests.cpp | 193 +++++++++++------- ...ncrete_equivalence_class_in_protocol.swift | 51 +++++ ...ntial_member_accesses_self_assoctype.swift | 4 +- ...associated_type_inference_fixed_type.swift | 8 +- 4 files changed, 178 insertions(+), 78 deletions(-) create mode 100644 test/Generics/split_concrete_equivalence_class_in_protocol.swift diff --git a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp index 20dd34a4bec7d..b19d5c6184d30 100644 --- a/lib/AST/RequirementMachine/RequirementMachineRequests.cpp +++ b/lib/AST/RequirementMachine/RequirementMachineRequests.cpp @@ -42,10 +42,11 @@ using namespace rewriting; /// concrete type requirements. static bool shouldSplitConcreteEquivalenceClass( Requirement req, + const ProtocolDecl *proto, const RequirementMachine *machine) { return (req.getKind() == RequirementKind::SameType && req.getSecondType()->isTypeParameter() && - machine->isConcreteType(req.getSecondType())); + machine->isConcreteType(req.getSecondType(), proto)); } /// Returns true if this generic signature contains abstract same-type @@ -54,9 +55,24 @@ static bool shouldSplitConcreteEquivalenceClass( /// requirements, and minimize the signature again. static bool shouldSplitConcreteEquivalenceClasses( ArrayRef requirements, + const ProtocolDecl *proto, const RequirementMachine *machine) { for (auto req : requirements) { - if (shouldSplitConcreteEquivalenceClass(req, machine)) + if (shouldSplitConcreteEquivalenceClass(req, proto, machine)) + return true; + } + + return false; +} + +/// Same as the above, but with the requirements of a protocol connected +/// component. +static bool shouldSplitConcreteEquivalenceClasses( + const llvm::DenseMap &protos, + const RequirementMachine *machine) { + for (const auto &pair : protos) { + if (shouldSplitConcreteEquivalenceClasses(pair.second.getRequirements(), + pair.first, machine)) return true; } @@ -71,6 +87,7 @@ static bool shouldSplitConcreteEquivalenceClasses( static void splitConcreteEquivalenceClasses( ASTContext &ctx, ArrayRef requirements, + const ProtocolDecl *proto, const RequirementMachine *machine, TypeArrayView genericParams, SmallVectorImpl &splitRequirements, @@ -78,7 +95,6 @@ static void splitConcreteEquivalenceClasses( unsigned maxAttempts = ctx.LangOpts.RequirementMachineMaxSplitConcreteEquivClassAttempts; - ++attempt; if (attempt >= maxAttempts) { llvm::errs() << "Splitting concrete equivalence classes did not " << "reach fixed point after " << attempt << " attempts.\n"; @@ -94,9 +110,9 @@ static void splitConcreteEquivalenceClasses( splitRequirements.clear(); for (auto req : requirements) { - if (shouldSplitConcreteEquivalenceClass(req, machine)) { + if (shouldSplitConcreteEquivalenceClass(req, proto, machine)) { auto concreteType = machine->getConcreteType( - req.getSecondType(), genericParams); + req.getSecondType(), genericParams, proto); Requirement firstReq(RequirementKind::SameType, req.getFirstType(), concreteType); @@ -111,6 +127,25 @@ static void splitConcreteEquivalenceClasses( } } +/// Same as the above, but with the requirements of a protocol connected +/// component. +static void splitConcreteEquivalenceClasses( + ASTContext &ctx, + const llvm::DenseMap &protos, + const RequirementMachine *machine, + llvm::DenseMap> &splitProtos, + unsigned &attempt) { + for (const auto &pair : protos) { + const auto *proto = pair.first; + auto genericParams = proto->getGenericSignature().getGenericParams(); + splitConcreteEquivalenceClasses(ctx, pair.second.getRequirements(), + proto, machine, genericParams, + splitProtos[proto], + attempt); + } +} + /// Builds the requirement signatures for each protocol in this strongly /// connected component. llvm::DenseMap @@ -195,87 +230,99 @@ RequirementSignatureRequestRQM::evaluate(Evaluator &evaluator, requirements.push_back({req, SourceLoc(), /*inferred=*/false}); } - // Heap-allocate the requirement machine to save stack space. - std::unique_ptr machine(new RequirementMachine( - ctx.getRewriteContext())); + unsigned attempt = 0; + for (;;) { + // Heap-allocate the requirement machine to save stack space. + std::unique_ptr machine(new RequirementMachine( + ctx.getRewriteContext())); - auto status = machine->initWithProtocolWrittenRequirements(component, protos); - if (status.first != CompletionResult::Success) { - // All we can do at this point is diagnose and give each protocol an empty - // requirement signature. - for (const auto *otherProto : component) { - ctx.Diags.diagnose(otherProto->getLoc(), - diag::requirement_machine_completion_failed, - /*protocol=*/1, - unsigned(status.first)); + auto status = machine->initWithProtocolWrittenRequirements(component, protos); + if (status.first != CompletionResult::Success) { + // All we can do at this point is diagnose and give each protocol an empty + // requirement signature. + for (const auto *otherProto : component) { + ctx.Diags.diagnose(otherProto->getLoc(), + diag::requirement_machine_completion_failed, + /*protocol=*/1, + unsigned(status.first)); + + auto rule = machine->getRuleAsStringForDiagnostics(status.second); + ctx.Diags.diagnose(otherProto->getLoc(), + diag::requirement_machine_completion_rule, + rule); + + if (otherProto != proto) { + ctx.evaluator.cacheOutput( + RequirementSignatureRequestRQM{const_cast(otherProto)}, + RequirementSignature(GenericSignatureErrorFlags::CompletionFailed)); + } + } - auto rule = machine->getRuleAsStringForDiagnostics(status.second); - ctx.Diags.diagnose(otherProto->getLoc(), - diag::requirement_machine_completion_rule, - rule); + return RequirementSignature(GenericSignatureErrorFlags::CompletionFailed); + } - if (otherProto != proto) { - ctx.evaluator.cacheOutput( - RequirementSignatureRequestRQM{const_cast(otherProto)}, - RequirementSignature(GenericSignatureErrorFlags::CompletionFailed)); + auto minimalRequirements = machine->computeMinimalProtocolRequirements(); + + if (!machine->getErrors()) { + if (shouldSplitConcreteEquivalenceClasses(minimalRequirements, machine.get())) { + ++attempt; + splitConcreteEquivalenceClasses(ctx, minimalRequirements, + machine.get(), protos, attempt); + continue; } } - return RequirementSignature(GenericSignatureErrorFlags::CompletionFailed); - } - - auto minimalRequirements = machine->computeMinimalProtocolRequirements(); + bool debug = machine->getDebugOptions().contains(DebugFlags::Minimization); - bool debug = machine->getDebugOptions().contains(DebugFlags::Minimization); + // The requirement signature for the actual protocol that the result + // was kicked off with. + Optional result; - // The requirement signature for the actual protocol that the result - // was kicked off with. - Optional result; + if (debug) { + llvm::dbgs() << "\nRequirement signatures:\n"; + } - if (debug) { - llvm::dbgs() << "\nRequirement signatures:\n"; - } + for (const auto &pair : minimalRequirements) { + auto *otherProto = pair.first; + const auto &reqs = pair.second; - for (const auto &pair : minimalRequirements) { - auto *otherProto = pair.first; - const auto &reqs = pair.second; + // Dump the result if requested. + if (debug) { + llvm::dbgs() << "- Protocol " << otherProto->getName() << ": "; - // Dump the result if requested. - if (debug) { - llvm::dbgs() << "- Protocol " << otherProto->getName() << ": "; + auto sig = GenericSignature::get( + otherProto->getGenericSignature().getGenericParams(), + reqs.getRequirements()); - auto sig = GenericSignature::get( - otherProto->getGenericSignature().getGenericParams(), - reqs.getRequirements()); + PrintOptions opts; + opts.ProtocolQualifiedDependentMemberTypes = true; + sig.print(llvm::dbgs(), opts); + llvm::dbgs() << "\n"; + } - PrintOptions opts; - opts.ProtocolQualifiedDependentMemberTypes = true; - sig.print(llvm::dbgs(), opts); - llvm::dbgs() << "\n"; + // Don't call setRequirementSignature() on the original proto; the + // request evaluator will do it for us. + if (otherProto == proto) + result = reqs; + else { + auto temp = reqs; + ctx.evaluator.cacheOutput( + RequirementSignatureRequestRQM{const_cast(otherProto)}, + std::move(temp)); + } } - // Don't call setRequirementSignature() on the original proto; the - // request evaluator will do it for us. - if (otherProto == proto) - result = reqs; - else { - auto temp = reqs; - ctx.evaluator.cacheOutput( - RequirementSignatureRequestRQM{const_cast(otherProto)}, - std::move(temp)); + if (ctx.LangOpts.RequirementMachineProtocolSignatures == + RequirementMachineMode::Enabled) { + SmallVector errors; + machine->System.computeRedundantRequirementDiagnostics(errors); + diagnoseRequirementErrors(ctx, errors, + /*allowConcreteGenericParams=*/false); } - } - if (ctx.LangOpts.RequirementMachineProtocolSignatures == - RequirementMachineMode::Enabled) { - SmallVector errors; - machine->System.computeRedundantRequirementDiagnostics(errors); - diagnoseRequirementErrors(ctx, errors, - /*allowConcreteGenericParams=*/false); + // Return the result for the specific protocol this request was kicked off on. + return *result; } - - // Return the result for the specific protocol this request was kicked off on. - return *result; } /// Builds the top-level generic signature requirements for this rewrite system. @@ -479,9 +526,12 @@ AbstractGenericSignatureRequestRQM::evaluate( if (!errorFlags) { if (shouldSplitConcreteEquivalenceClasses(result.getRequirements(), + /*proto=*/nullptr, machine.get())) { + ++attempt; splitConcreteEquivalenceClasses(ctx, result.getRequirements(), - machine.get(), result.getGenericParams(), + /*proto=*/nullptr, machine.get(), + result.getGenericParams(), requirements, attempt); continue; } @@ -646,9 +696,12 @@ InferredGenericSignatureRequestRQM::evaluate( if (!errorFlags) { // Check if we need to rebuild the signature. if (shouldSplitConcreteEquivalenceClasses(result.getRequirements(), + /*proto=*/nullptr, machine.get())) { + ++attempt; splitConcreteEquivalenceClasses(ctx, result.getRequirements(), - machine.get(), result.getGenericParams(), + /*proto=*/nullptr, machine.get(), + result.getGenericParams(), requirements, attempt); continue; } diff --git a/test/Generics/split_concrete_equivalence_class_in_protocol.swift b/test/Generics/split_concrete_equivalence_class_in_protocol.swift new file mode 100644 index 0000000000000..c76b6d35161ee --- /dev/null +++ b/test/Generics/split_concrete_equivalence_class_in_protocol.swift @@ -0,0 +1,51 @@ +// RUN: not %target-swift-frontend -typecheck %s -debug-generic-signatures 2>&1 | %FileCheck %s + +// CHECK-LABEL: .P1@ +// CHECK-NEXT: Requirement signature: +protocol P1 { + associatedtype T : P1 + associatedtype U where U == Int + associatedtype V where V == T.U +} + +struct G {} + +// CHECK-LABEL: .P2@ +// CHECK-NEXT: Requirement signature: , Self.[P2]V == G> +protocol P2 { + associatedtype T : P2 + associatedtype U where U == G + associatedtype V where V == T.U + associatedtype X +} + +// CHECK-LABEL: .P3@ +// CHECK-NEXT: Requirement signature: , Self.[P3]V == G, Self.[P3]X == Self.[P3]T.[P3]X> +protocol P3 { + associatedtype T : P3 + associatedtype U where U == G + associatedtype V where V == T.U, V == G + associatedtype X +} + +// CHECK-LABEL: .P4@ +// CHECK-NEXT: Requirement signature: , Self.[P4]V == G, Self.[P4]X == Self.[P4]T.[P4]X> +protocol P4 { + associatedtype T : P4 + associatedtype U where U == G + associatedtype V where V == T.U + associatedtype X where X == T.X +} + +// We don't split concrete equivalence classes if the signature had an error, +// but we also shouldn't crash in verify() because of an unsplit concrete +// equivalence class. + +// CHECK-LABEL: .P4Bad@ +// CHECK-NEXT: Requirement signature: , Self.[P4Bad]V == Self.[P4Bad]T.[P4Bad]U> +protocol P4Bad { + associatedtype T : P4Bad + associatedtype U where U == G + associatedtype V where V == T.U + associatedtype X where X == U.X +} diff --git a/test/decl/protocol/existential_member_accesses_self_assoctype.swift b/test/decl/protocol/existential_member_accesses_self_assoctype.swift index ed1fd9d144085..3b0a70f1604b5 100644 --- a/test/decl/protocol/existential_member_accesses_self_assoctype.swift +++ b/test/decl/protocol/existential_member_accesses_self_assoctype.swift @@ -1,6 +1,4 @@ -// RUN: %target-typecheck-verify-swift -disable-availability-checking -requirement-machine-protocol-signatures=off -requirement-machine-abstract-signatures=on - -// TODO: Get this to pass with -requirement-machine-protocol-signatures=on. +// RUN: %target-typecheck-verify-swift -disable-availability-checking -requirement-machine-abstract-signatures=on //===----------------------------------------------------------------------===// // Use of protocols with Self or associated type requirements diff --git a/test/decl/protocol/req/associated_type_inference_fixed_type.swift b/test/decl/protocol/req/associated_type_inference_fixed_type.swift index 58125df977ce2..4568559f14229 100644 --- a/test/decl/protocol/req/associated_type_inference_fixed_type.swift +++ b/test/decl/protocol/req/associated_type_inference_fixed_type.swift @@ -1,7 +1,5 @@ -// RUN: %target-typecheck-verify-swift -requirement-machine-protocol-signatures=off -// RUN: not %target-swift-frontend -typecheck -dump-type-witness-systems %s -requirement-machine-protocol-signatures=off 2>&1 | %FileCheck %s - -// TODO: Get this to pass with -requirement-machine-protocol-signatures=on. +// RUN: %target-typecheck-verify-swift +// RUN: not %target-swift-frontend -typecheck -dump-type-witness-systems %s 2>&1 | %FileCheck %s protocol P1 where A == Never { associatedtype A @@ -572,7 +570,7 @@ protocol P32e where A == B { } protocol Q32: P32e, P32a, P32b, P32c, P32d {} // expected-error@-1 {{'Self.B' cannot be equal to both 'Never' and 'Int'}} -// expected-error@-2 {{'Self.B' cannot be equal to both 'Void' and 'Int'}} +// expected-error@-2 {{'Self.B' cannot be equal to both '()' and 'Int'}} // expected-error@-3 {{'Self.A' cannot be equal to both 'Bool' and 'Int'}} // expected-note@-4 3 {{same-type constraint 'Self.A' == 'Int' implied here}} do { From 527a519ebabb701cd6f34e374136bb83df20fd00 Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Tue, 22 Mar 2022 14:19:28 -0400 Subject: [PATCH 53/88] AST: TypeMatcher needs to recurse into OpaqueTypeArchetypeTypes Fixes https://bugs.swift.org/browse/SR-16040. --- include/swift/AST/TypeMatcher.h | 36 ++++++++++++++++++++++++++- test/Generics/sr16040.swift | 44 +++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 1 deletion(-) create mode 100644 test/Generics/sr16040.swift diff --git a/include/swift/AST/TypeMatcher.h b/include/swift/AST/TypeMatcher.h index 24c51e1f3f244..1ca80e9f30088 100644 --- a/include/swift/AST/TypeMatcher.h +++ b/include/swift/AST/TypeMatcher.h @@ -229,7 +229,41 @@ class TypeMatcher { } TRIVIAL_CASE(ModuleType) - TRIVIAL_CASE(ArchetypeType) + + bool visitArchetypeType(CanArchetypeType firstArchetype, + Type secondType, + Type sugaredFirstType) { + if (auto firstOpaqueArchetype = dyn_cast(firstArchetype)) { + if (auto secondOpaqueArchetype = secondType->getAs()) { + if (firstOpaqueArchetype->getDecl() == secondOpaqueArchetype->getDecl()) { + auto firstSubMap = firstOpaqueArchetype->getSubstitutions(); + auto secondSubMap = secondOpaqueArchetype->getSubstitutions(); + assert(firstSubMap.getReplacementTypes().size() == + secondSubMap.getReplacementTypes().size()); + + for (unsigned i : indices(firstSubMap.getReplacementTypes())) { + auto firstSubstType = firstSubMap.getReplacementTypes()[i]; + auto secondSubstType = secondSubMap.getReplacementTypes()[i]; + + if (!this->visit(firstSubstType->getCanonicalType(), + secondSubstType, firstSubstType)) + return false; + } + + return true; + } + } + } + + // FIXME: Once OpenedArchetypeType stores substitutions, do something + // similar to the above. + + if (firstArchetype->isEqual(secondType)) + return true; + + + return mismatch(firstArchetype.getPointer(), secondType, sugaredFirstType); + } bool visitDynamicSelfType(CanDynamicSelfType firstDynamicSelf, Type secondType, diff --git a/test/Generics/sr16040.swift b/test/Generics/sr16040.swift new file mode 100644 index 0000000000000..a27acd3094025 --- /dev/null +++ b/test/Generics/sr16040.swift @@ -0,0 +1,44 @@ +// RUN: %target-swift-frontend -typecheck %s -disable-availability-checking -debug-generic-signatures 2>&1 | %FileCheck %s + +public protocol View { + associatedtype Body : View + var body: Body { get } +} + +public struct Text : View { + public init(_: String) {} + public var body: Self { return self } +} + +public protocol DisplayableValue {} + +public protocol SingleValueDisplay: View { + associatedtype DisplayedValue + init (_ singleValue: DisplayedValue) + var displayedValue: DisplayedValue { get } +} + +// CHECK-LABEL: .RawDisplayableValue@ +// CHECK-NEXT: Requirement signature: +public protocol RawDisplayableValue: DisplayableValue { + associatedtype RawDisplay: SingleValueDisplay + where RawDisplay.DisplayedValue == Self +} + +// CHECK-LABEL: .RawTextDisplayableValue@ +// CHECK-NEXT: Requirement signature: > +public protocol RawTextDisplayableValue: RawDisplayableValue + where Self: CustomStringConvertible, + RawDisplay == RawTextDisplay { } + +public struct RawTextDisplay : SingleValueDisplay { + public var displayedValue: Value + + public init (_ singleValue: Value) { + self.displayedValue = singleValue + } + + public var body: some View { + Text(displayedValue.description) + } +} From 5acbeed5a4c1bfa41bc24c471744fc7c452ae9a7 Mon Sep 17 00:00:00 2001 From: Michael Gottesman Date: Sun, 13 Mar 2022 14:59:10 -0700 Subject: [PATCH 54/88] [debug-info-canonicalization] Add support for propagating debug info from alloc_stack. --- include/swift/SIL/DebugUtils.h | 33 +++++++++++++++ lib/LLVMPasses/DbgAddrBlockSplitter.cpp | 2 + .../Mandatory/DebugInfoCanonicalizer.cpp | 34 +++++++++------ .../move_function_dbginfo_async.swift | 41 +++++++++++++++++++ 4 files changed, 97 insertions(+), 13 deletions(-) diff --git a/include/swift/SIL/DebugUtils.h b/include/swift/SIL/DebugUtils.h index 661e35cd123c6..c6a566a5a4dd9 100644 --- a/include/swift/SIL/DebugUtils.h +++ b/include/swift/SIL/DebugUtils.h @@ -341,6 +341,39 @@ struct DebugVarCarryingInst { } } + /// Returns true if this DebugVarCarryingInst was moved. + bool getWasMoved() const { + switch (kind) { + case Kind::Invalid: + llvm_unreachable("Invalid?!"); + case Kind::DebugValue: + return cast(inst)->getWasMoved(); + case Kind::AllocStack: + return cast(inst)->getWasMoved(); + case Kind::AllocBox: + llvm_unreachable("Not implemented"); + } + } + + /// If we are attempting to create a "debug_value" clone of this debug var + /// carrying inst, return the appropriate SILValue to use as the operand of + /// that debug value. + /// + /// For a debug_value, we just return the actual operand, otherwise we return + /// the pointer address. + SILValue getOperandForDebugValueClone() const { + switch (kind) { + case Kind::Invalid: + llvm_unreachable("Invalid?!"); + case Kind::DebugValue: + return cast(inst)->getOperand(); + case Kind::AllocStack: + return cast(inst); + case Kind::AllocBox: + llvm_unreachable("Not implemented"); + } + } + /// If \p value is an alloc_stack, alloc_box use that. Otherwise, see if \p /// value has a single debug user, return that. Otherwise return the invalid /// DebugVarCarryingInst. diff --git a/lib/LLVMPasses/DbgAddrBlockSplitter.cpp b/lib/LLVMPasses/DbgAddrBlockSplitter.cpp index 50c64a3b5651c..97c9ac414301e 100644 --- a/lib/LLVMPasses/DbgAddrBlockSplitter.cpp +++ b/lib/LLVMPasses/DbgAddrBlockSplitter.cpp @@ -39,6 +39,8 @@ struct SwiftDbgAddrBlockSplitter : FunctionPass { bool SwiftDbgAddrBlockSplitter::runOnFunction(Function &fn) { SmallVector breakBlockPoints; + // If we are in the first block, + for (auto &block : fn) { for (auto &inst : block) { if (isa(&inst)) { diff --git a/lib/SILOptimizer/Mandatory/DebugInfoCanonicalizer.cpp b/lib/SILOptimizer/Mandatory/DebugInfoCanonicalizer.cpp index 8375fb7e7674e..1202ece4bdc59 100644 --- a/lib/SILOptimizer/Mandatory/DebugInfoCanonicalizer.cpp +++ b/lib/SILOptimizer/Mandatory/DebugInfoCanonicalizer.cpp @@ -50,6 +50,7 @@ #include "swift/SIL/ApplySite.h" #include "swift/SIL/BasicBlockBits.h" #include "swift/SIL/BasicBlockDatastructures.h" +#include "swift/SIL/DebugUtils.h" #include "swift/SIL/SILBuilder.h" #include "swift/SIL/SILInstruction.h" #include "swift/SIL/SILUndef.h" @@ -68,22 +69,22 @@ using namespace swift; // Utility //===----------------------------------------------------------------------===// -static SILInstruction *cloneDebugValue(DebugValueInst *original, +static SILInstruction *cloneDebugValue(DebugVarCarryingInst original, SILInstruction *insertPt) { SILBuilderWithScope builder(std::next(insertPt->getIterator())); builder.setCurrentDebugScope(original->getDebugScope()); - return builder.createDebugValue(original->getLoc(), original->getOperand(), - *original->getVarInfo(), false, - true /*was moved*/); + return builder.createDebugValue( + original->getLoc(), original.getOperandForDebugValueClone(), + *original.getVarInfo(), false, true /*was moved*/); } -static SILInstruction *cloneDebugValue(DebugValueInst *original, +static SILInstruction *cloneDebugValue(DebugVarCarryingInst original, SILBasicBlock *block) { SILBuilderWithScope builder(&block->front()); builder.setCurrentDebugScope(original->getDebugScope()); - return builder.createDebugValue(original->getLoc(), original->getOperand(), - *original->getVarInfo(), false, - true /*was moved*/); + return builder.createDebugValue( + original->getLoc(), original.getOperandForDebugValueClone(), + *original.getVarInfo(), false, true /*was moved*/); } //===----------------------------------------------------------------------===// @@ -93,7 +94,7 @@ static SILInstruction *cloneDebugValue(DebugValueInst *original, namespace { struct BlockState { - llvm::SmallMapVector debugValues; + llvm::SmallMapVector debugValues; }; struct DebugInfoCanonicalizer { @@ -202,13 +203,20 @@ bool DebugInfoCanonicalizer::process() { // Then for each inst in the block... for (auto &inst : *block) { LLVM_DEBUG(llvm::dbgs() << " Inst: " << inst); - // If we have a debug_value that was moved, store state for it. - if (auto *dvi = dyn_cast(&inst)) { - if (!dvi->getWasMoved()) + + // Skip any alloc box inst we see, we do not support them yet. + if (isa(&inst)) + continue; + + // If we have a debug_value or alloc_stack that was moved, store state for + // it. Once the isa check above is removed, this will handle alloc_box as + // well. + if (auto dvi = DebugVarCarryingInst(&inst)) { + if (!dvi.getWasMoved()) continue; LLVM_DEBUG(llvm::dbgs() << " Found DebugValueInst!\n"); - auto debugInfo = dvi->getVarInfo(); + auto debugInfo = dvi.getVarInfo(); if (!debugInfo) { LLVM_DEBUG(llvm::dbgs() << " Has no var info?! Skipping!\n"); continue; diff --git a/test/DebugInfo/move_function_dbginfo_async.swift b/test/DebugInfo/move_function_dbginfo_async.swift index 52b5fb0ad5244..8c81d03730129 100644 --- a/test/DebugInfo/move_function_dbginfo_async.swift +++ b/test/DebugInfo/move_function_dbginfo_async.swift @@ -165,6 +165,7 @@ public func letSimpleTest(_ msg: __owned T) async { // DWARF: [0x{{[a-f0-9]+}}, 0x{{[a-f0-9]+}}): DW_OP_breg6 RBP-88, DW_OP_deref, DW_OP_plus_uconst 0x10, DW_OP_plus_uconst 0x8, DW_OP_deref) // DWARF: DW_AT_name ("msg") +// Change name to varSimpleTestArg public func varSimpleTest(_ msg: inout T, _ msg2: T) async { await forceSplit() use(_move(msg)) @@ -175,3 +176,43 @@ public func varSimpleTest(_ msg: inout T, _ msg2: T) async { msg = msg2 await forceSplit() } + +// We don't have an argument here, so we shouldn't have an llvm.dbg.addr in the +// initial function. +// +// CHECK-LABEL: define swifttailcc void @"$s27move_function_dbginfo_async16varSimpleTestVaryyYaF"(%swift.context* swiftasync %0) +// CHECK-NOT: llvm.dbg.addr +// +// CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async16varSimpleTestVaryyYaFTY0_"(i8* swiftasync %0) +// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata !{{[0-9]+}}, metadata !DIExpression(DW_OP_plus_uconst, 16, DW_OP_plus_uconst, 8)) +// +// CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async16varSimpleTestVaryyYaFTQ1_"(i8* swiftasync %0) +// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata !{{[0-9]+}}, metadata !DIExpression(DW_OP_deref, DW_OP_plus_uconst, 16, DW_OP_plus_uconst, 8)) + +// CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async16varSimpleTestVaryyYaFTY2_"(i8* swiftasync %0) +// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata ![[METADATA:[0-9]+]], metadata !DIExpression(DW_OP_plus_uconst, 16, DW_OP_plus_uconst, 8)), !dbg ![[ADDR_LOC:[0-9]+]] +// CHECK: call void @llvm.dbg.value(metadata %T27move_function_dbginfo_async5KlassC** undef, metadata ![[METADATA]], metadata !DIExpression()), !dbg ![[ADDR_LOC]] + +// CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async16varSimpleTestVaryyYaFTQ3_"(i8* swiftasync %0) +// We should only see an llvm.dbg.value here. +// CHECK-NOT: llvm.dbg.addr +// CHECK: call void @llvm.dbg.value(metadata %T27move_function_dbginfo_async5KlassC** undef, +// CHECK-NOT: llvm.dbg.addr +// +// We should see first a llvm.dbg.value to undef the value until we reinit. Then +// we should see a llvm.dbg.addr to reinit. +// +// CHECK-LABEL: define internal swifttailcc void @"$s27move_function_dbginfo_async16varSimpleTestVaryyYaFTY4_"(i8* swiftasync %0) +// CHECK: call void @llvm.dbg.value(metadata %T27move_function_dbginfo_async5KlassC** undef, metadata ![[METADATA:[0-9]+]], metadata !DIExpression()), !dbg ![[ADDR_LOC:[0-9]+]] +// CHECK: call void @llvm.dbg.addr(metadata i8* %0, metadata ![[METADATA]], metadata !DIExpression(DW_OP_plus_uconst, 16, DW_OP_plus_uconst, 8)), !dbg ![[ADDR_LOC]] +public func varSimpleTestVar() async { + var k = Klass() + k.doSomething() + await forceSplit() + let m = _move(k) + m.doSomething() + await forceSplit() + k = Klass() + k.doSomething() + print("stop here") +} From 37da96092ce3d07df85c5b035119bd4e8d9e99d9 Mon Sep 17 00:00:00 2001 From: Pavel Yaskevich Date: Tue, 22 Mar 2022 13:16:14 -0700 Subject: [PATCH 55/88] [CSGen] Don't expect implicit casts to have type reprs Implicit casts are allowed to be constructed with a type, instead of a type repr. Constraint generation should honor that, and fallback to using cast type when repr is was not given. --- lib/Sema/CSGen.cpp | 47 ++++++----- unittests/Sema/CMakeLists.txt | 1 + unittests/Sema/ConstraintGenerationTests.cpp | 88 ++++++++++++++++++++ 3 files changed, 115 insertions(+), 21 deletions(-) create mode 100644 unittests/Sema/ConstraintGenerationTests.cpp diff --git a/lib/Sema/CSGen.cpp b/lib/Sema/CSGen.cpp index 30d77332f58f2..fcfeb0ea1666a 100644 --- a/lib/Sema/CSGen.cpp +++ b/lib/Sema/CSGen.cpp @@ -2969,18 +2969,27 @@ namespace { return typeVar; } + Type getTypeForCast(ExplicitCastExpr *E) { + if (auto *const repr = E->getCastTypeRepr()) { + // Validate the resulting type. + return resolveTypeReferenceInExpression( + repr, TypeResolverContext::ExplicitCastExpr, + CS.getConstraintLocator(E)); + } + assert(E->isImplicit()); + return E->getCastType(); + } + Type visitForcedCheckedCastExpr(ForcedCheckedCastExpr *expr) { auto fromExpr = expr->getSubExpr(); if (!fromExpr) // Either wasn't constructed correctly or wasn't folded. return nullptr; - auto *const repr = expr->getCastTypeRepr(); - // Validate the resulting type. - const auto toType = resolveTypeReferenceInExpression( - repr, TypeResolverContext::ExplicitCastExpr, - CS.getConstraintLocator(expr)); + auto toType = getTypeForCast(expr); if (!toType) - return nullptr; + return Type(); + + auto *const repr = expr->getCastTypeRepr(); // Cache the type we're casting to. if (repr) CS.setType(repr, toType); @@ -3001,13 +3010,12 @@ namespace { Type visitCoerceExpr(CoerceExpr *expr) { // Validate the resulting type. - auto *const repr = expr->getCastTypeRepr(); - const auto toType = resolveTypeReferenceInExpression( - repr, TypeResolverContext::ExplicitCastExpr, - CS.getConstraintLocator(expr)); + auto toType = getTypeForCast(expr); if (!toType) return nullptr; + auto *const repr = expr->getCastTypeRepr(); + // Cache the type we're casting to. if (repr) CS.setType(repr, toType); @@ -3033,13 +3041,12 @@ namespace { return nullptr; // Validate the resulting type. - auto *const repr = expr->getCastTypeRepr(); - const auto toType = resolveTypeReferenceInExpression( - repr, TypeResolverContext::ExplicitCastExpr, - CS.getConstraintLocator(expr)); + const auto toType = getTypeForCast(expr); if (!toType) return nullptr; + auto *const repr = expr->getCastTypeRepr(); + // Cache the type we're casting to. if (repr) CS.setType(repr, toType); @@ -3058,17 +3065,14 @@ namespace { } Type visitIsExpr(IsExpr *expr) { - // Validate the type. - // FIXME: Locator for the cast type? - auto &ctx = CS.getASTContext(); - const auto toType = resolveTypeReferenceInExpression( - expr->getCastTypeRepr(), TypeResolverContext::ExplicitCastExpr, - CS.getConstraintLocator(expr)); + auto toType = getTypeForCast(expr); if (!toType) return nullptr; + auto *const repr = expr->getCastTypeRepr(); // Cache the type we're checking. - CS.setType(expr->getCastTypeRepr(), toType); + if (repr) + CS.setType(repr, toType); // Add a checked cast constraint. auto fromType = CS.getType(expr->getSubExpr()); @@ -3076,6 +3080,7 @@ namespace { CS.addConstraint(ConstraintKind::CheckedCast, fromType, toType, CS.getConstraintLocator(expr)); + auto &ctx = CS.getASTContext(); // The result is Bool. auto boolDecl = ctx.getBoolDecl(); diff --git a/unittests/Sema/CMakeLists.txt b/unittests/Sema/CMakeLists.txt index a70e70ab56923..e93687d2621bc 100644 --- a/unittests/Sema/CMakeLists.txt +++ b/unittests/Sema/CMakeLists.txt @@ -2,6 +2,7 @@ add_swift_unittest(swiftSemaTests SemaFixture.cpp BindingInferenceTests.cpp + ConstraintGenerationTests.cpp ConstraintSimplificationTests.cpp UnresolvedMemberLookupTests.cpp PlaceholderTypeInferenceTests.cpp diff --git a/unittests/Sema/ConstraintGenerationTests.cpp b/unittests/Sema/ConstraintGenerationTests.cpp new file mode 100644 index 0000000000000..11a84745da2df --- /dev/null +++ b/unittests/Sema/ConstraintGenerationTests.cpp @@ -0,0 +1,88 @@ +//===--- ConstraintGenerationTests.cpp ------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2022 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include "SemaFixture.h" +#include "swift/AST/Expr.h" + +using namespace swift; +using namespace swift::unittest; +using namespace swift::constraints; + +TEST_F(SemaTest, TestImplicitForceCastConstraintGeneration) { + ConstraintSystem cs(DC, ConstraintSystemOptions()); + + auto *literal = IntegerLiteralExpr::createFromUnsigned(Context, 42); + + auto *cast = ForcedCheckedCastExpr::createImplicit(Context, literal, + getStdlibType("Double")); + + auto *expr = cs.generateConstraints(cast, DC, /*isInputExpression=*/true); + + ASSERT_NE(expr, nullptr); + + SmallVector solutions; + cs.solve(solutions); + + ASSERT_EQ(solutions.size(), (unsigned)1); + + auto &solution = solutions.front(); + + ASSERT_TRUE(solution.getResolvedType(literal)->isEqual(getStdlibType("Int"))); + ASSERT_TRUE(solution.getResolvedType(cast)->isEqual(getStdlibType("Double"))); +} + +TEST_F(SemaTest, TestImplicitCoercionConstraintGeneration) { + ConstraintSystem cs(DC, ConstraintSystemOptions()); + + auto *literal = IntegerLiteralExpr::createFromUnsigned(Context, 42); + + auto *cast = CoerceExpr::createImplicit(Context, literal, + getStdlibType("Double")); + + auto *expr = cs.generateConstraints(cast, DC, /*isInputExpression=*/true); + + ASSERT_NE(expr, nullptr); + + SmallVector solutions; + cs.solve(solutions); + + ASSERT_EQ(solutions.size(), (unsigned)1); + + auto &solution = solutions.front(); + + ASSERT_TRUE(solution.getResolvedType(literal)->isEqual(getStdlibType("Double"))); + ASSERT_TRUE(solution.getResolvedType(cast)->isEqual(getStdlibType("Double"))); +} + +TEST_F(SemaTest, TestImplicitConditionalCastConstraintGeneration) { + ConstraintSystem cs(DC, ConstraintSystemOptions()); + + auto *literal = IntegerLiteralExpr::createFromUnsigned(Context, 42); + + auto *cast = ConditionalCheckedCastExpr::createImplicit( + Context, literal, getStdlibType("Double")); + + auto *expr = cs.generateConstraints(cast, DC, /*isInputExpression=*/true); + + ASSERT_NE(expr, nullptr); + + SmallVector solutions; + cs.solve(solutions); + + ASSERT_EQ(solutions.size(), (unsigned)1); + + auto &solution = solutions.front(); + + ASSERT_TRUE(solution.getResolvedType(literal)->isEqual(getStdlibType("Int"))); + ASSERT_TRUE(solution.getResolvedType(cast)->isEqual( + OptionalType::get(getStdlibType("Double")))); +} From 07fc64edcb45a4cd3487af206c5e87ea41e804a1 Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Tue, 22 Mar 2022 15:42:04 -0400 Subject: [PATCH 56/88] AST: Fix a few more places where TypeMatcher would lose sugar --- include/swift/AST/TypeMatcher.h | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/include/swift/AST/TypeMatcher.h b/include/swift/AST/TypeMatcher.h index 1ca80e9f30088..f1eee3911e604 100644 --- a/include/swift/AST/TypeMatcher.h +++ b/include/swift/AST/TypeMatcher.h @@ -181,9 +181,8 @@ class TypeMatcher { bool visitReferenceStorageType(CanReferenceStorageType firstStorage, Type secondType, Type sugaredFirstType) { - auto _secondStorage = secondType->getCanonicalType(); - if (firstStorage->getKind() == _secondStorage->getKind()) { - auto secondStorage = cast(_secondStorage); + if (firstStorage->getKind() == secondType->getDesugaredType()->getKind()) { + auto secondStorage = secondType->castTo(); return this->visit(firstStorage.getReferentType(), secondStorage->getReferentType(), sugaredFirstType->castTo() @@ -195,9 +194,8 @@ class TypeMatcher { bool visitNominalType(CanNominalType firstNominal, Type secondType, Type sugaredFirstType) { - auto _secondNominal = secondType->getCanonicalType(); - if (firstNominal->getKind() == _secondNominal->getKind()) { - auto secondNominal = cast(_secondNominal); + if (firstNominal->getKind() == secondType->getDesugaredType()->getKind()) { + auto secondNominal = secondType->castTo(); if (firstNominal->getDecl() != secondNominal->getDecl()) return mismatch(firstNominal.getPointer(), secondNominal, sugaredFirstType); @@ -216,9 +214,8 @@ class TypeMatcher { bool visitAnyMetatypeType(CanAnyMetatypeType firstMeta, Type secondType, Type sugaredFirstType) { - auto _secondMeta = secondType->getCanonicalType(); - if (firstMeta->getKind() == _secondMeta->getKind()) { - auto secondMeta = cast(_secondMeta); + if (firstMeta->getKind() == secondType->getDesugaredType()->getKind()) { + auto secondMeta = secondType->castTo(); return this->visit(firstMeta.getInstanceType(), secondMeta->getInstanceType(), sugaredFirstType->castTo() From 02e402721d17827cd6a31dbefe34de2fa9e2d297 Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Tue, 22 Mar 2022 14:24:08 -0400 Subject: [PATCH 57/88] RequirementMachine: Print opaque archetypes using the 'stable' representation --- lib/AST/RequirementMachine/Symbol.cpp | 2 ++ test/Generics/opaque_archetype_concrete_requirement.swift | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/AST/RequirementMachine/Symbol.cpp b/lib/AST/RequirementMachine/Symbol.cpp index 448fec061a793..7ae6a125d6194 100644 --- a/lib/AST/RequirementMachine/Symbol.cpp +++ b/lib/AST/RequirementMachine/Symbol.cpp @@ -658,6 +658,8 @@ void Symbol::dump(llvm::raw_ostream &out) const { PrintOptions opts; opts.AlternativeTypeNames = &substitutionNames; + opts.OpaqueReturnTypePrinting = + PrintOptions::OpaqueReturnTypePrintingMode::StableReference; switch (getKind()) { case Kind::Name: diff --git a/test/Generics/opaque_archetype_concrete_requirement.swift b/test/Generics/opaque_archetype_concrete_requirement.swift index db28a679a52e3..c9472dce3ae62 100644 --- a/test/Generics/opaque_archetype_concrete_requirement.swift +++ b/test/Generics/opaque_archetype_concrete_requirement.swift @@ -77,5 +77,5 @@ protocol HasRecursiveP { extension HasRecursiveP where T == DefinesRecursiveP.T {} // expected-error@-1 {{cannot build rewrite system for generic signature; rule length limit exceeded}} -// expected-note@-2 {{failed rewrite rule is τ_0_0.[HasRecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[concrete: ((((((((((some RecursiveP).T).T).T).T).T).T).T).T).T).T] => τ_0_0.[HasRecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T]}} +// expected-note@-2 {{failed rewrite rule is τ_0_0.[HasRecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[concrete: (((((((((@_opaqueReturnTypeOf("$s37opaque_archetype_concrete_requirement17DefinesRecursivePV1tQrvp", 0) __.T).T).T).T).T).T).T).T).T).T] => τ_0_0.[HasRecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T]}} From 00c33f8f74592d0fd9acae29069055b2cf3485b5 Mon Sep 17 00:00:00 2001 From: Dave Lee Date: Tue, 22 Mar 2022 14:23:12 -0700 Subject: [PATCH 58/88] [Demangling] Disable ShowAsyncResumePartial in SimplifiedUI (#41870) Change `SimplifiedUIDemangleOptions` to remove "partial function" prefixes when demangling async coroutine symbols. This removes the prefixes "await resume partial function" and "suspend resume partial function" from demangled names, in doing so hides the effect of async/coroutine function splitting from stack traces and other symbolication. This output will produce the source level function name. For example, a symbol that previously would have demangled to: ``` (1) await resume partial function for static Main.main() ``` will, with this change, demangle to: ``` static Main.main() ``` See https://github.com/apple/swift/pull/36978 where `ShowAsyncResumePartial` was introduced for lldb. rdar://90455541 --- include/swift/Demangling/Demangle.h | 1 + lib/SILOptimizer/Analysis/ClosureScope.cpp | 6 +++--- test/Demangle/Inputs/simplified-manglings.txt | 2 ++ 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/include/swift/Demangling/Demangle.h b/include/swift/Demangling/Demangle.h index 67543e8d84210..087e228a3e2e6 100644 --- a/include/swift/Demangling/Demangle.h +++ b/include/swift/Demangling/Demangle.h @@ -90,6 +90,7 @@ struct DemangleOptions { Opt.ShortenArchetype = true; Opt.ShowPrivateDiscriminators = false; Opt.ShowFunctionArgumentTypes = false; + Opt.ShowAsyncResumePartial = false; return Opt; }; }; diff --git a/lib/SILOptimizer/Analysis/ClosureScope.cpp b/lib/SILOptimizer/Analysis/ClosureScope.cpp index 1268982dfaaca..d491a9cd0c98b 100644 --- a/lib/SILOptimizer/Analysis/ClosureScope.cpp +++ b/lib/SILOptimizer/Analysis/ClosureScope.cpp @@ -215,9 +215,9 @@ void ClosureGraph::finalize() { #ifndef NDEBUG static void dumpFunctionName(SILFunction *function) { - llvm::dbgs() << Demangle::demangleSymbolAsString( - function->getName(), - Demangle::DemangleOptions::SimplifiedUIDemangleOptions()) + auto opts = Demangle::DemangleOptions::SimplifiedUIDemangleOptions(); + opts.ShowAsyncResumePartial = true; + llvm::dbgs() << Demangle::demangleSymbolAsString(function->getName(), opts) << " '" << function->getName() << "'\n"; } diff --git a/test/Demangle/Inputs/simplified-manglings.txt b/test/Demangle/Inputs/simplified-manglings.txt index a419a117135e8..2283c67cba42f 100644 --- a/test/Demangle/Inputs/simplified-manglings.txt +++ b/test/Demangle/Inputs/simplified-manglings.txt @@ -211,3 +211,5 @@ _TTSf0os___TFVs17_LegacyStringCore15_invariantCheckfT_T_ ---> specialized _Legac _TTSf2o___TTSf2s_d___TFVs17_LegacyStringCoreCfVs13_StringBufferS_ ---> specialized _LegacyStringCore.init(_:) _TTSf2do___TTSf2s_d___TFVs17_LegacyStringCoreCfVs13_StringBufferS_ ---> specialized _LegacyStringCore.init(_:) _TTSf2dos___TTSf2s_d___TFVs17_LegacyStringCoreCfVs13_StringBufferS_ ---> specialized _LegacyStringCore.init(_:) +_$s4main1fSiyYaFTQ0_ ---> f() +_$s4main1fSiyYaFTY0_ ---> f() From 9604304586182d1c09840f8875fb3c03515bc3f6 Mon Sep 17 00:00:00 2001 From: Kavon Farvardin Date: Mon, 21 Mar 2022 22:14:33 -0700 Subject: [PATCH 59/88] Downgrade more errors into warnings for actor inits. In the replacement of the escaping-use restriction with flow-isolation, I hadn't accounted for all of the situations where the isolation changes would break backwards compatability with Swift 5.5 programs. The escaping-use restriction permitted a lot of very unsafe things with warnings that it would become an error in Swift 6. With the introduction of flow-isolation, it was a bit tricky to get the right warnings back in place, while not unnessecarily warning about property accesses that might actually be OK. There is a very careful coordination between the type-checker and the flow-isolation pass. While I had done these downgrades for deinits, I also needed to do them for inits as well, because member accesses to isolated methods within actor initializer were still getting rejected as an error. This patch should be pretty solid now. fixes rdar://90595278 --- include/swift/AST/DiagnosticsSema.def | 6 +- lib/Sema/TypeCheckConcurrency.cpp | 170 ++++++++++++++++++++----- test/Concurrency/actor_isolation.swift | 56 +++++--- test/Concurrency/flow_isolation.swift | 20 ++- 4 files changed, 189 insertions(+), 63 deletions(-) diff --git a/include/swift/AST/DiagnosticsSema.def b/include/swift/AST/DiagnosticsSema.def index 8943815552462..8ca22dc9bc447 100644 --- a/include/swift/AST/DiagnosticsSema.def +++ b/include/swift/AST/DiagnosticsSema.def @@ -4517,17 +4517,13 @@ NOTE(note_distributed_actor_system_conformance_missing_adhoc_requirement,none, ERROR(override_implicit_unowned_executor,none, "cannot override an actor's 'unownedExecutor' property that wasn't " "explicitly defined", ()) -ERROR(actor_isolated_from_decl,none, - "actor-isolated %0 %1 can not be referenced from a non-isolated " - "%select{deinit|autoclosure|closure}2", - (DescriptiveDeclKind, DeclName, unsigned)) ERROR(actor_isolated_non_self_reference,none, "actor-isolated %0 %1 can not be " "%select{referenced|mutated|used 'inout'}2 " "%select{on a non-isolated actor instance|" "from a Sendable function|from a Sendable closure|" "from an 'async let' initializer|from global actor %4|" - "from the main actor|from a non-isolated context}3", + "from the main actor|from a non-isolated context|from a non-isolated autoclosure}3", (DescriptiveDeclKind, DeclName, unsigned, unsigned, Type)) ERROR(distributed_actor_isolated_non_self_reference,none, "distributed actor-isolated %0 %1 can not be accessed from a " diff --git a/lib/Sema/TypeCheckConcurrency.cpp b/lib/Sema/TypeCheckConcurrency.cpp index bd937e26ed51b..68da5282b1c2f 100644 --- a/lib/Sema/TypeCheckConcurrency.cpp +++ b/lib/Sema/TypeCheckConcurrency.cpp @@ -1272,6 +1272,10 @@ namespace { // It is within a nonisolated context. NonIsolatedContext, + // It is within a nonisolated autoclosure argument. This is primarily here + // to aid in giving specific diagnostics, because autoclosures are not + // always easy for programmers to notice. + NonIsolatedAutoclosure }; VarDecl * const actor; @@ -1351,7 +1355,7 @@ namespace { /// /// @returns None if the context expression is either an InOutExpr, /// not tracked, or if the decl is not a property or subscript - Optional kindOfUsage(ValueDecl *decl, Expr *use) const { + Optional kindOfUsage(ValueDecl const* decl, Expr *use) const { // we need a use for lookup. if (!use) return None; @@ -1751,6 +1755,16 @@ namespace { auto var = getReferencedParamOrCapture(expr); bool isPotentiallyIsolated = isPotentiallyIsolatedActor(var); + // helps aid in giving more informative diagnostics for autoclosure args. + auto specificNonIsoClosureKind = + [](DeclContext const* dc) -> ReferencedActor::Kind { + if (auto autoClos = dyn_cast(dc)) + if (autoClos->getThunkKind() == AutoClosureExpr::Kind::None) + return ReferencedActor::NonIsolatedAutoclosure; + + return ReferencedActor::NonIsolatedContext; + }; + // Walk the scopes between the variable reference and the variable // declaration to determine whether it is still isolated. auto dc = const_cast(getDeclContext()); @@ -1774,7 +1788,7 @@ namespace { return ReferencedActor(var, isPotentiallyIsolated, ReferencedActor::SendableClosure); } - return ReferencedActor(var, isPotentiallyIsolated, ReferencedActor::NonIsolatedContext); + return ReferencedActor(var, isPotentiallyIsolated, specificNonIsoClosureKind(dc)); case ClosureActorIsolation::ActorInstance: // If the closure is isolated to the same variable, we're all set. @@ -1786,7 +1800,7 @@ namespace { return ReferencedActor(var, isPotentiallyIsolated, ReferencedActor::Isolated); } - return ReferencedActor(var, isPotentiallyIsolated, ReferencedActor::NonIsolatedContext); + return ReferencedActor(var, isPotentiallyIsolated, specificNonIsoClosureKind(dc)); case ClosureActorIsolation::GlobalActor: return ReferencedActor::forGlobalActor( @@ -1939,7 +1953,7 @@ namespace { /// Note that the given actor member is isolated. /// @param context is allowed to be null if no context is appropriate. - void noteIsolatedActorMember(ValueDecl *decl, Expr *context) { + void noteIsolatedActorMember(ValueDecl const* decl, Expr *context) { // detect if it is a distributed actor, to provide better isolation notes auto nominal = decl->getDeclContext()->getSelfNominalTypeDecl(); @@ -2693,40 +2707,124 @@ namespace { return false; } - /// an ad-hoc check specific to member isolation checking. - static bool memberAccessWasAllowedInSwift5(DeclContext const *refCxt, - ValueDecl const *member, - SourceLoc memberLoc) { + /// Based on the former escaping-use restriction, which was replaced by + /// flow-isolation. We need this to support backwards compatability in the + /// type-checker for programs prior to Swift 6. + /// \param fn either a constructor or destructor of an actor. + static bool wasLegacyEscapingUseRestriction(AbstractFunctionDecl *fn) { + assert(fn->getDeclContext()->getSelfClassDecl()->isAnyActor()); + assert(isa(fn) || isa(fn)); + + // according to today's isolation, determine whether it use to have the + // escaping-use restriction + switch (getActorIsolation(fn).getKind()) { + case ActorIsolation::Independent: + case ActorIsolation::GlobalActor: + case ActorIsolation::GlobalActorUnsafe: + // convenience inits did not have the restriction. + if (auto *ctor = dyn_cast(fn)) + if (ctor->isConvenienceInit()) + return false; + + break; // goto basic case + + case ActorIsolation::ActorInstance: + // none of these had the restriction affect them. + assert(fn->hasAsync()); + return false; + + case ActorIsolation::Unspecified: + // this is basically just objc-marked inits. + break; + }; + + return !(fn->hasAsync()); // basic case: not async = had restriction. + } + + /// An ad-hoc check specific to member isolation checking. assumed to be + /// queried when a self-member is being accessed in a context which is not + /// isolated to self. The "special permission" is part of a backwards + /// compatability with actor inits and deinits that maintains the + /// permissive nature of the escaping-use restriction, which was only + /// staged in as a warning. See implementation for more details. + /// + /// \returns true if this access in the given context should be allowed + /// in Sema, with the side-effect of emitting a warning as needed. + /// If false is returned, then the "special permission" was not granted. + bool memberAccessHasSpecialPermissionInSwift5(DeclContext const *refCxt, + ReferencedActor &baseActor, + ValueDecl const *member, + SourceLoc memberLoc, + Expr *exprCxt) { // no need for this in Swift 6+ if (refCxt->getASTContext().isSwiftVersionAtLeast(6)) return false; - // In Swift 5, we were allowing all members to be referenced from a - // deinit, nested within a wide variety of contexts. + // must be an access to an instance member. + if (!member->isInstanceMember()) + return false; + + // In the history of actor initializers prior to Swift 6, self-isolated + // members could be referenced from any init or deinit, even a synchronous + // one, with no diagnostics at all. + // + // When the escaping-use restriction came into place for the release of + // 5.5, it was implemented as a warning and only applied to initializers, + // which stated that it would become an error in Swift 6. + // + // Once 5.6 was released, we also added restrictions in the deinits of + // actors, at least for accessing members other than stored properties. + // + // Later on, for 5.7 we introduced flow-isolation as part of SE-327 for + // both inits and deinits. This meant that stored property accesses now + // are only sometimes going to be problematic. This change also brought + // official changes in isolation for the inits and deinits to handle the + // the non-stored-property members. Since those isolation changes are + // currently in place, the purpose of the code below is to override the + // isolation checking, so that the now-mismatched isolation on member + // access is still permitted, but with a warning stating that it will + // be rejected in Swift 6. + // + // In the checking below, we let stored-property accesses go ignored, + // so that flow-isolation can warn about them only if needed. This helps + // prevent needless warnings on property accesses that will actually be OK + // with flow-isolation in the future. if (auto oldFn = isActorInitOrDeInitContext(refCxt)) { - if (isa(oldFn) && member->isInstanceMember()) { - auto &diags = refCxt->getASTContext().Diags; - - // if the context in which we consider the access matches between - // old and new, and its a stored property, then skip the warning - // because it will still be allowed in Swift 6. - if (!(refCxt == oldFn && isStoredProperty(member))) { - unsigned cxtKind = 0; // deinit - - // try to get a better name for this context. - if (isa(refCxt)) { - cxtKind = 1; - } else if (isa(refCxt)) { - cxtKind = 2; - } + auto oldFnMut = const_cast(oldFn); - diags.diagnose(memberLoc, diag::actor_isolated_from_decl, - member->getDescriptiveKind(), - member->getName(), - cxtKind).warnUntilSwiftVersion(6); - } + // If function did not have the escaping-use restriction, then it gets + // no special permissions here. + if (!wasLegacyEscapingUseRestriction(oldFnMut)) + return false; + + // At this point, the special permission will be granted. But, we + // need to warn now about this permission being taken away in Swift 6 + // for specific kinds of non-stored-property member accesses: + + // If the context in which we consider the access matches between the + // old (escaping-use restriction) and new (flow-isolation) contexts, + // and it is a stored property, then permit it here without any warning. + // Later, flow-isolation pass will check and emit a warning if needed. + if (refCxt == oldFn && isStoredProperty(member)) return true; - } + + + // Otherwise, it's definitely going to be illegal, so warn and permit. + auto &diags = refCxt->getASTContext().Diags; + auto useKind = static_cast( + kindOfUsage(member, exprCxt).getValueOr(VarRefUseEnv::Read)); + + diags.diagnose( + memberLoc, diag::actor_isolated_non_self_reference, + member->getDescriptiveKind(), + member->getName(), + useKind, + baseActor.kind - 1, + baseActor.globalActor) + .warnUntilSwiftVersion(6); + + noteIsolatedActorMember(member, exprCxt); + return true; } return false; @@ -2743,10 +2841,11 @@ namespace { /// /// \returns true iff the member access is permitted in Sema because it will /// be verified later by flow-isolation. - static bool checkedByFlowIsolation(DeclContext const *refCxt, + bool checkedByFlowIsolation(DeclContext const *refCxt, ReferencedActor &baseActor, ValueDecl const *member, - SourceLoc memberLoc) { + SourceLoc memberLoc, + Expr *exprCxt) { // base of member reference must be `self` if (!baseActor.isActorSelf()) @@ -2774,7 +2873,8 @@ namespace { break; } - if (memberAccessWasAllowedInSwift5(refCxt, member, memberLoc)) + if (memberAccessHasSpecialPermissionInSwift5(refCxt, baseActor, member, + memberLoc, exprCxt)) return true; // then permit it now. if (!usesFlowSensitiveIsolation(fnDecl)) @@ -2888,7 +2988,7 @@ namespace { // access an isolated member on `self`. If that case applies, then we // can skip checking. if (checkedByFlowIsolation(getDeclContext(), isolatedActor, - member, memberLoc)) + member, memberLoc, context)) return false; // An escaping partial application of something that is part of diff --git a/test/Concurrency/actor_isolation.swift b/test/Concurrency/actor_isolation.swift index 3f6d837e4e41a..f5dd8da331d43 100644 --- a/test/Concurrency/actor_isolation.swift +++ b/test/Concurrency/actor_isolation.swift @@ -743,7 +743,7 @@ actor LocalFunctionIsolatedActor { func b2() -> Bool { @Sendable func c() -> Bool { - return true && a() // expected-error{{actor-isolated instance method 'a()' can not be referenced from a non-isolated context}} + return true && a() // expected-error{{actor-isolated instance method 'a()' can not be referenced from a non-isolated autoclosure}} } return c() } @@ -811,7 +811,9 @@ extension SomeClassInActor.ID { // ---------------------------------------------------------------------- @available(SwiftStdlib 5.1, *) actor SomeActorWithInits { - var mutableState: Int = 17 // expected-note 2 {{mutation of this property is only permitted within the actor}} + // expected-note@+2 2 {{property declared here}} + // expected-note@+1 3 {{mutation of this property is only permitted within the actor}} + var mutableState: Int = 17 var otherMutableState: Int let nonSendable: SomeClass @@ -825,26 +827,26 @@ actor SomeActorWithInits { self.mutableState = 42 self.otherMutableState = 17 - self.isolated() // expected-error{{actor-isolated instance method 'isolated()' can not be referenced from a non-isolated context}} + self.isolated() // expected-warning{{actor-isolated instance method 'isolated()' can not be referenced from a non-isolated context; this is an error in Swift 6}} self.nonisolated() defer { - isolated() // expected-error{{actor-isolated instance method 'isolated()' can not be referenced from a non-isolated context}} - mutableState += 1 // okay + isolated() // expected-warning{{actor-isolated instance method 'isolated()' can not be referenced from a non-isolated context; this is an error in Swift 6}} + mutableState += 1 // okay through typechecking, since flow-isolation will verify it. nonisolated() } func local() { - isolated() // expected-error{{actor-isolated instance method 'isolated()' can not be referenced from a non-isolated context}} - mutableState += 1 // expected-error{{actor-isolated property 'mutableState' can not be mutated from a non-isolated context}} + isolated() // expected-warning{{actor-isolated instance method 'isolated()' can not be referenced from a non-isolated context; this is an error in Swift 6}} + mutableState += 1 // expected-warning{{actor-isolated property 'mutableState' can not be mutated from a non-isolated context; this is an error in Swift 6}} nonisolated() } local() let _ = { defer { - isolated() // expected-error{{actor-isolated instance method 'isolated()' can not be referenced from a non-isolated context}} - mutableState += 1 // expected-error{{actor-isolated property 'mutableState' can not be mutated from a non-isolated context}} + isolated() // expected-warning{{actor-isolated instance method 'isolated()' can not be referenced from a non-isolated context; this is an error in Swift 6}} + mutableState += 1 // expected-warning{{actor-isolated property 'mutableState' can not be mutated from a non-isolated context; this is an error in Swift 6}} nonisolated() } nonisolated() @@ -861,12 +863,14 @@ actor SomeActorWithInits { convenience init(i3: Bool) { self.init(i1: i3) + _ = mutableState // expected-error{{actor-isolated property 'mutableState' can not be referenced from a non-isolated context}} self.isolated() // expected-error{{actor-isolated instance method 'isolated()' can not be referenced from a non-isolated context}} self.nonisolated() } convenience init(i4: Bool) async { self.init(i1: i4) + _ = mutableState self.isolated() self.nonisolated() } @@ -876,7 +880,7 @@ actor SomeActorWithInits { self.otherMutableState = 17 self.nonSendable = x - self.isolated() // expected-error{{actor-isolated instance method 'isolated()' can not be referenced from the main actor}} + self.isolated() // expected-warning{{actor-isolated instance method 'isolated()' can not be referenced from the main actor; this is an error in Swift 6}} self.nonisolated() } @@ -886,33 +890,47 @@ actor SomeActorWithInits { await self.isolated() self.nonisolated() + + _ = mutableState // will be caught by flow-isolation } @MainActor convenience init(i7: Bool) { self.init(i1: i7) + _ = mutableState // expected-error{{actor-isolated property 'mutableState' can not be referenced from the main actor}} self.isolated() // expected-error{{actor-isolated instance method 'isolated()' can not be referenced from the main actor}} self.nonisolated() } @MainActor convenience init(i8: Bool) async { self.init(i1: i8) + _ = await mutableState await self.isolated() self.nonisolated() } + nonisolated init(i9: Bool) async { + self.mutableState = 0 + self.otherMutableState = 1 + + await self.isolated() + self.nonisolated() + + _ = mutableState // will be caught by flow-isolation + } + deinit { let _ = self.nonSendable // OK only through typechecking, not SIL. defer { - isolated() // expected-warning{{actor-isolated instance method 'isolated()' can not be referenced from a non-isolated deinit; this is an error in Swift 6}} + isolated() // expected-warning{{actor-isolated instance method 'isolated()' can not be referenced from a non-isolated context; this is an error in Swift 6}} mutableState += 1 // okay nonisolated() } let _ = { defer { - isolated() // expected-warning{{actor-isolated instance method 'isolated()' can not be referenced from a non-isolated closure; this is an error in Swift 6}} - mutableState += 1 // expected-warning{{actor-isolated property 'mutableState' can not be referenced from a non-isolated closure; this is an error in Swift 6}} + isolated() // expected-warning{{actor-isolated instance method 'isolated()' can not be referenced from a non-isolated context; this is an error in Swift 6}} + mutableState += 1 // expected-warning{{actor-isolated property 'mutableState' can not be mutated from a non-isolated context; this is an error in Swift 6}} nonisolated() } nonisolated() @@ -920,7 +938,7 @@ actor SomeActorWithInits { } - func isolated() { } // expected-note 7 {{calls to instance method 'isolated()' from outside of its actor context are implicitly asynchronous}} + func isolated() { } // expected-note 9 {{calls to instance method 'isolated()' from outside of its actor context are implicitly asynchronous}} nonisolated func nonisolated() {} } @@ -1217,11 +1235,11 @@ actor Counter { } init() { - _ = self.next() // expected-error {{actor-isolated instance method 'next()' can not be referenced from a non-isolated context}} - defer { _ = self.next() } // expected-error {{actor-isolated instance method 'next()' can not be referenced from a non-isolated context}} + _ = self.next() // expected-warning {{actor-isolated instance method 'next()' can not be referenced from a non-isolated context; this is an error in Swift 6}} + defer { _ = self.next() } // expected-warning {{actor-isolated instance method 'next()' can not be referenced from a non-isolated context; this is an error in Swift 6}} - _ = computedProp // expected-error {{actor-isolated property 'computedProp' can not be referenced from a non-isolated context}} - computedProp = 1 // expected-error {{actor-isolated property 'computedProp' can not be mutated from a non-isolated context}} + _ = computedProp // expected-warning {{actor-isolated property 'computedProp' can not be referenced from a non-isolated context; this is an error in Swift 6}} + computedProp = 1 // expected-warning {{actor-isolated property 'computedProp' can not be mutated from a non-isolated context; this is an error in Swift 6}} } @@ -1375,7 +1393,7 @@ final class MainActorInit: Sendable { actor DunkTracker { private var lebron: Int? - private var curry: Int? + private var curry: Int? // expected-note {{property declared here}} deinit { // expected-warning@+1 {{actor-isolated property 'curry' can not be referenced from a non-isolated autoclosure; this is an error in Swift 6}} diff --git a/test/Concurrency/flow_isolation.swift b/test/Concurrency/flow_isolation.swift index b25531d66ce5d..f1da881c454ec 100644 --- a/test/Concurrency/flow_isolation.swift +++ b/test/Concurrency/flow_isolation.swift @@ -584,7 +584,7 @@ actor EscapeArtist { actor Ahmad { nonisolated func f() {} var prop: Int = 0 - var computedProp: Int { 10 } + var computedProp: Int { 10 } // expected-note {{property declared here}} init(v1: Void) { Task.detached { self.f() } // expected-note {{after making a copy of 'self', only non-isolated properties of 'self' can be accessed from this init}} @@ -598,8 +598,20 @@ actor Ahmad { prop += 1 // expected-warning {{cannot access property 'prop' here in non-isolated initializer; this is an error in Swift 6}} } + nonisolated init(v3: Void) async { + prop = 10 + f() // expected-note {{after calling instance method 'f()', only non-isolated properties of 'self' can be accessed from this init}} + prop += 1 // expected-warning {{cannot access property 'prop' here in non-isolated initializer; this is an error in Swift 6}} + } + + @MainActor init(v4: Void) async { + prop = 10 + f() // expected-note {{after calling instance method 'f()', only non-isolated properties of 'self' can be accessed from this init}} + prop += 1 // expected-warning {{cannot access property 'prop' here in non-isolated initializer; this is an error in Swift 6}} + } + deinit { - // expected-warning@+2 {{actor-isolated property 'computedProp' can not be referenced from a non-isolated deinit; this is an error in Swift 6}} + // expected-warning@+2 {{actor-isolated property 'computedProp' can not be referenced from a non-isolated context; this is an error in Swift 6}} // expected-note@+1 {{after accessing property 'computedProp', only non-isolated properties of 'self' can be accessed from a deinit}} let x = computedProp @@ -641,12 +653,12 @@ actor Rain { actor DeinitExceptionForSwift5 { var x: Int = 0 - func cleanup() { + func cleanup() { // expected-note {{calls to instance method 'cleanup()' from outside of its actor context are implicitly asynchronous}} x = 0 } deinit { - // expected-warning@+2 {{actor-isolated instance method 'cleanup()' can not be referenced from a non-isolated deinit; this is an error in Swift 6}} + // expected-warning@+2 {{actor-isolated instance method 'cleanup()' can not be referenced from a non-isolated context; this is an error in Swift 6}} // expected-note@+1 {{after calling instance method 'cleanup()', only non-isolated properties of 'self' can be accessed from a deinit}} cleanup() From aa51bdf17a9f6528eb203f03d952f06accb899b9 Mon Sep 17 00:00:00 2001 From: Evan Wilde Date: Fri, 7 Jan 2022 10:26:17 -0800 Subject: [PATCH 60/88] Add `noasync` availability kind to available attr This patch adds the `noasync` availability kind to `@available`. The spelling is `@available(*, noasync)`. --- include/swift/AST/Attr.h | 10 ++++ include/swift/AST/DiagnosticsParse.def | 5 +- include/swift/AST/DiagnosticsSema.def | 4 +- include/swift/AST/PrintOptions.h | 3 ++ include/swift/Basic/Features.def | 1 + lib/AST/ASTPrinter.cpp | 12 +++++ lib/AST/Attr.cpp | 55 ++++++++++++++++++- lib/AST/Decl.cpp | 5 +- lib/Parse/ParseDecl.cpp | 66 +++++++++++++++++------ lib/Sema/TypeCheckAvailability.cpp | 28 ++++++++-- lib/Serialization/Deserialization.cpp | 5 +- lib/Serialization/ModuleFormat.h | 3 +- lib/Serialization/Serialization.cpp | 1 + lib/SymbolGraphGen/AvailabilityMixin.cpp | 1 + test/ModuleInterface/features.swift | 9 ++++ test/attr/attr_availability.swift | 4 +- test/attr/attr_availability_noasync.swift | 41 ++++++++++++++ 17 files changed, 222 insertions(+), 31 deletions(-) create mode 100644 test/attr/attr_availability_noasync.swift diff --git a/include/swift/AST/Attr.h b/include/swift/AST/Attr.h index bd72feaac7c46..de292071ddff0 100644 --- a/include/swift/AST/Attr.h +++ b/include/swift/AST/Attr.h @@ -614,6 +614,8 @@ enum class PlatformAgnosticAvailabilityKind { PackageDescriptionVersionSpecific, /// The declaration is unavailable for other reasons. Unavailable, + /// The declaration is unavailable from asynchronous contexts + NoAsync, }; /// Defines the @available attribute. @@ -702,6 +704,9 @@ class AvailableAttr : public DeclAttribute { /// Whether this is an unconditionally deprecated entity. bool isUnconditionallyDeprecated() const; + /// Whether this is a noasync attribute. + bool isNoAsync() const; + /// Returns the platform-agnostic availability. PlatformAgnosticAvailabilityKind getPlatformAgnosticAvailability() const { return PlatformAgnostic; @@ -2261,6 +2266,11 @@ class DeclAttributes { /// a declaration will be deprecated in the future, or null otherwise. const AvailableAttr *getSoftDeprecated(const ASTContext &ctx) const; + /// Returns the first @available attribute that indicates + /// a declaration is unavailable from asynchronous contexts, or null + /// otherwise. + const AvailableAttr *getNoAsync(const ASTContext &ctx) const; + SWIFT_DEBUG_DUMPER(dump(const Decl *D = nullptr)); void print(ASTPrinter &Printer, const PrintOptions &Options, const Decl *D = nullptr) const; diff --git a/include/swift/AST/DiagnosticsParse.def b/include/swift/AST/DiagnosticsParse.def index 834854bbd1a97..661e21f6c5ab9 100644 --- a/include/swift/AST/DiagnosticsParse.def +++ b/include/swift/AST/DiagnosticsParse.def @@ -1497,9 +1497,8 @@ ERROR(attr_unsupported_on_target, none, // availability ERROR(attr_availability_platform,none, "expected platform name or '*' for '%0' attribute", (StringRef)) -ERROR(attr_availability_unavailable_deprecated,none, - "'%0' attribute cannot be both unconditionally 'unavailable' and " - "'deprecated'", (StringRef)) +ERROR(attr_availability_multiple_kinds ,none, + "'%0' attribute cannot be both '%1' and '%2'", (StringRef, StringRef, StringRef)) WARNING(attr_availability_invalid_duplicate,none, "'%0' argument has already been specified", (StringRef)) diff --git a/include/swift/AST/DiagnosticsSema.def b/include/swift/AST/DiagnosticsSema.def index 8943815552462..d369d84b048fe 100644 --- a/include/swift/AST/DiagnosticsSema.def +++ b/include/swift/AST/DiagnosticsSema.def @@ -4848,8 +4848,8 @@ ERROR(async_named_decl_must_be_available_from_async,none, "asynchronous %0 %1 must be available from asynchronous contexts", (DescriptiveDeclKind, DeclName)) ERROR(async_unavailable_decl,none, - "%0 %1 is unavailable from asynchronous contexts%select{|; %3}2", - (DescriptiveDeclKind, DeclBaseName, bool, StringRef)) + "%0 %1 is unavailable from asynchronous contexts%select{|; %2}2", + (DescriptiveDeclKind, DeclBaseName, StringRef)) //------------------------------------------------------------------------------ // MARK: String Processing diff --git a/include/swift/AST/PrintOptions.h b/include/swift/AST/PrintOptions.h index 3dd1c50889c7b..9419aef5312f9 100644 --- a/include/swift/AST/PrintOptions.h +++ b/include/swift/AST/PrintOptions.h @@ -301,6 +301,9 @@ struct PrintOptions { /// Whether to print generic requirements in a where clause. bool PrintGenericRequirements = true; + /// Suppress emitting @available(*, noasync) + bool SuppressNoAsyncAvailabilityAttr = false; + /// How to print opaque return types. enum class OpaqueReturnTypePrintingMode { /// 'some P1 & P2'. diff --git a/include/swift/Basic/Features.def b/include/swift/Basic/Features.def index 77870c7343e2c..ab5560952d97e 100644 --- a/include/swift/Basic/Features.def +++ b/include/swift/Basic/Features.def @@ -76,6 +76,7 @@ LANGUAGE_FEATURE(BuiltinAssumeAlignment, 0, "Builtin.assumeAlignment", true) SUPPRESSIBLE_LANGUAGE_FEATURE(UnsafeInheritExecutor, 0, "@_unsafeInheritExecutor", true) SUPPRESSIBLE_LANGUAGE_FEATURE(PrimaryAssociatedTypes, 0, "Primary associated types", true) SUPPRESSIBLE_LANGUAGE_FEATURE(UnavailableFromAsync, 0, "@_unavailableFromAsync", true) +SUPPRESSIBLE_LANGUAGE_FEATURE(NoAsyncAvailability, 340, "@available(*, noasync)", true) #undef SUPPRESSIBLE_LANGUAGE_FEATURE #undef LANGUAGE_FEATURE diff --git a/lib/AST/ASTPrinter.cpp b/lib/AST/ASTPrinter.cpp index 43108fc5ad6ca..000af787b4872 100644 --- a/lib/AST/ASTPrinter.cpp +++ b/lib/AST/ASTPrinter.cpp @@ -3015,6 +3015,18 @@ suppressingFeatureUnavailableFromAsync(PrintOptions &options, options.ExcludeAttrList.resize(originalExcludeAttrCount); } +static bool usesFeatureNoAsyncAvailability(Decl *decl) { + return decl->getAttrs().getNoAsync(decl->getASTContext()) != nullptr; +} + +static void +suppressingFeatureNoAsyncAvailability(PrintOptions &options, + llvm::function_ref action) { + llvm::SaveAndRestore orignalOptions(options); + options.SuppressNoAsyncAvailabilityAttr = true; + action(); +} + /// Suppress the printing of a particular feature. static void suppressingFeature(PrintOptions &options, Feature feature, llvm::function_ref action) { diff --git a/lib/AST/Attr.cpp b/lib/AST/Attr.cpp index 8df6cc0215bad..12060e4eb9429 100644 --- a/lib/AST/Attr.cpp +++ b/lib/AST/Attr.cpp @@ -190,7 +190,7 @@ DeclAttributes::findMostSpecificActivePlatform(const ASTContext &ctx) const{ continue; // We have an attribute that is active for the platform, but - // is it more specific than our curent best? + // is it more specific than our current best? if (!bestAttr || inheritsAvailabilityFromPlatform(avAttr->Platform, bestAttr->Platform)) { bestAttr = avAttr; @@ -356,6 +356,48 @@ DeclAttributes::getSoftDeprecated(const ASTContext &ctx) const { return conditional; } +const AvailableAttr *DeclAttributes::getNoAsync(const ASTContext &ctx) const { + const AvailableAttr *bestAttr = nullptr; + for (const DeclAttribute *attr : *this) { + if (const AvailableAttr *avAttr = dyn_cast(attr)) { + if (avAttr->isInvalid()) + continue; + + if (avAttr->getPlatformAgnosticAvailability() == + PlatformAgnosticAvailabilityKind::NoAsync) { + // An API may only be unavailable on specific platforms. + // If it doesn't have a platform associated with it, then it's + // unavailable for all platforms, so we should include it. If it does + // have a platform and we are not that platform, then it doesn't apply + // to us. + const bool isGoodForPlatform = + (avAttr->hasPlatform() && avAttr->isActivePlatform(ctx)) || + !avAttr->hasPlatform(); + + if (!isGoodForPlatform) + continue; + + if (!bestAttr) { + // If there is no best attr selected + // and the attr either has an active platform, or doesn't have one at + // all, select it. + bestAttr = avAttr; + } else if (bestAttr && avAttr->hasPlatform() && + bestAttr->hasPlatform() && + inheritsAvailabilityFromPlatform(avAttr->Platform, + bestAttr->Platform)) { + // if they both have a viable platform, use the better one + bestAttr = avAttr; + } else if (avAttr->hasPlatform() && !bestAttr->hasPlatform()) { + // Use the one more specific + bestAttr = avAttr; + } + } + } + } + return bestAttr; +} + void DeclAttributes::dump(const Decl *D) const { StreamPrinter P(llvm::errs()); PrintOptions PO = PrintOptions::printDeclarations(); @@ -394,6 +436,7 @@ static bool isShortAvailable(const DeclAttribute *DA) { case PlatformAgnosticAvailabilityKind::Deprecated: case PlatformAgnosticAvailabilityKind::Unavailable: case PlatformAgnosticAvailabilityKind::UnavailableInSwift: + case PlatformAgnosticAvailabilityKind::NoAsync: return false; case PlatformAgnosticAvailabilityKind::None: case PlatformAgnosticAvailabilityKind::SwiftVersionSpecific: @@ -771,6 +814,8 @@ static void printAvailableAttr(const AvailableAttr *Attr, ASTPrinter &Printer, Printer << ", unavailable"; else if (Attr->isUnconditionallyDeprecated()) Printer << ", deprecated"; + else if (Attr->isNoAsync()) + Printer << ", noasync"; if (Attr->Introduced) Printer << ", introduced: " << Attr->Introduced.getValue().getAsString(); @@ -974,6 +1019,8 @@ bool DeclAttribute::printImpl(ASTPrinter &Printer, const PrintOptions &Options, case DAK_Available: { auto Attr = cast(this); + if (Options.SuppressNoAsyncAvailabilityAttr && Attr->isNoAsync()) + return false; if (!Options.PrintSPIs && Attr->IsSPI) { assert(Attr->hasPlatform()); assert(Attr->Introduced.hasValue()); @@ -1705,6 +1752,7 @@ bool AvailableAttr::isUnconditionallyUnavailable() const { case PlatformAgnosticAvailabilityKind::Deprecated: case PlatformAgnosticAvailabilityKind::SwiftVersionSpecific: case PlatformAgnosticAvailabilityKind::PackageDescriptionVersionSpecific: + case PlatformAgnosticAvailabilityKind::NoAsync: return false; case PlatformAgnosticAvailabilityKind::Unavailable: @@ -1722,6 +1770,7 @@ bool AvailableAttr::isUnconditionallyDeprecated() const { case PlatformAgnosticAvailabilityKind::UnavailableInSwift: case PlatformAgnosticAvailabilityKind::SwiftVersionSpecific: case PlatformAgnosticAvailabilityKind::PackageDescriptionVersionSpecific: + case PlatformAgnosticAvailabilityKind::NoAsync: return false; case PlatformAgnosticAvailabilityKind::Deprecated: @@ -1731,6 +1780,10 @@ bool AvailableAttr::isUnconditionallyDeprecated() const { llvm_unreachable("Unhandled PlatformAgnosticAvailabilityKind in switch."); } +bool AvailableAttr::isNoAsync() const { + return PlatformAgnostic == PlatformAgnosticAvailabilityKind::NoAsync; +} + llvm::VersionTuple AvailableAttr::getActiveVersion(const ASTContext &ctx) const { if (isLanguageVersionSpecific()) { return ctx.LangOpts.EffectiveLanguageVersion; diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp index 2b11a2c2abc8c..973cb04234d2a 100644 --- a/lib/AST/Decl.cpp +++ b/lib/AST/Decl.cpp @@ -7533,9 +7533,10 @@ AbstractFunctionDecl *AbstractFunctionDecl::getAsyncAlternative() const { // rename parameter, falling back to the first with a rename. Note that // `getAttrs` is in reverse source order, so the last attribute is the // first in source - if (!attr->Rename.empty() && (attr->Platform == PlatformKind::none || - !avAttr)) + if (!attr->Rename.empty() && + (attr->Platform == PlatformKind::none || !avAttr) && !attr->isNoAsync()) { avAttr = attr; + } } auto *renamedDecl = evaluateOrDefault( diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp index 3cebd21e9a156..2bf7d0b1a1c99 100644 --- a/lib/Parse/ParseDecl.cpp +++ b/lib/Parse/ParseDecl.cpp @@ -327,23 +327,48 @@ ParserResult Parser::parseExtendedAvailabilitySpecList( ++ParamIndex; enum { - IsMessage, IsRenamed, - IsIntroduced, IsDeprecated, IsObsoleted, + IsMessage, + IsRenamed, + IsIntroduced, + IsDeprecated, + IsObsoleted, IsUnavailable, + IsNoAsync, IsInvalid } ArgumentKind = IsInvalid; - + if (Tok.is(tok::identifier)) { - ArgumentKind = - llvm::StringSwitch(ArgumentKindStr) - .Case("message", IsMessage) - .Case("renamed", IsRenamed) - .Case("introduced", IsIntroduced) - .Case("deprecated", IsDeprecated) - .Case("obsoleted", IsObsoleted) - .Case("unavailable", IsUnavailable) - .Default(IsInvalid); - } + ArgumentKind = llvm::StringSwitch(ArgumentKindStr) + .Case("message", IsMessage) + .Case("renamed", IsRenamed) + .Case("introduced", IsIntroduced) + .Case("deprecated", IsDeprecated) + .Case("obsoleted", IsObsoleted) + .Case("unavailable", IsUnavailable) + .Case("noasync", IsNoAsync) + .Default(IsInvalid); + } + + auto platformAgnosticKindToStr = [](PlatformAgnosticAvailabilityKind kind) { + switch (kind) { + case PlatformAgnosticAvailabilityKind::None: + return "none"; + case PlatformAgnosticAvailabilityKind::Deprecated: + return "deprecated"; + case PlatformAgnosticAvailabilityKind::Unavailable: + return "unavailable"; + case PlatformAgnosticAvailabilityKind::NoAsync: + return "noasync"; + + // These are possible platform agnostic availability kinds. + // I'm not sure what their spellings are at the moment, so I'm + // crashing instead of handling them. + case PlatformAgnosticAvailabilityKind::UnavailableInSwift: + case PlatformAgnosticAvailabilityKind::SwiftVersionSpecific: + case PlatformAgnosticAvailabilityKind::PackageDescriptionVersionSpecific: + llvm_unreachable("Unknown availability kind for parser"); + } + }; if (ArgumentKind == IsInvalid) { diagnose(ArgumentLoc, diag::attr_availability_expected_option, AttrName) @@ -419,8 +444,8 @@ ParserResult Parser::parseExtendedAvailabilitySpecList( case IsDeprecated: if (!findAttrValueDelimiter()) { if (PlatformAgnostic != PlatformAgnosticAvailabilityKind::None) { - diagnose(Tok, diag::attr_availability_unavailable_deprecated, - AttrName); + diagnose(Tok, diag::attr_availability_multiple_kinds, AttrName, + "deprecated", platformAgnosticKindToStr(PlatformAgnostic)); } PlatformAgnostic = PlatformAgnosticAvailabilityKind::Deprecated; @@ -467,12 +492,21 @@ ParserResult Parser::parseExtendedAvailabilitySpecList( case IsUnavailable: if (PlatformAgnostic != PlatformAgnosticAvailabilityKind::None) { - diagnose(Tok, diag::attr_availability_unavailable_deprecated, AttrName); + diagnose(Tok, diag::attr_availability_multiple_kinds, AttrName, + "unavailable", platformAgnosticKindToStr(PlatformAgnostic)); } PlatformAgnostic = PlatformAgnosticAvailabilityKind::Unavailable; break; + case IsNoAsync: + if (PlatformAgnostic != PlatformAgnosticAvailabilityKind::None) { + diagnose(Tok, diag::attr_availability_multiple_kinds, AttrName, + "noasync", platformAgnosticKindToStr(PlatformAgnostic)); + } + PlatformAgnostic = PlatformAgnosticAvailabilityKind::NoAsync; + break; + case IsInvalid: llvm_unreachable("handled above"); } diff --git a/lib/Sema/TypeCheckAvailability.cpp b/lib/Sema/TypeCheckAvailability.cpp index 7123ad0554e5e..ef1388d6b7824 100644 --- a/lib/Sema/TypeCheckAvailability.cpp +++ b/lib/Sema/TypeCheckAvailability.cpp @@ -2608,6 +2608,9 @@ bool swift::diagnoseExplicitUnavailability(SourceLoc loc, case PlatformAgnosticAvailabilityKind::Deprecated: llvm_unreachable("shouldn't see deprecations in explicit unavailability"); + case PlatformAgnosticAvailabilityKind::NoAsync: + llvm_unreachable("shouldn't see noasync in explicit unavailability"); + case PlatformAgnosticAvailabilityKind::None: case PlatformAgnosticAvailabilityKind::Unavailable: if (attr->Platform != PlatformKind::none) { @@ -2772,6 +2775,9 @@ bool swift::diagnoseExplicitUnavailability( case PlatformAgnosticAvailabilityKind::Deprecated: llvm_unreachable("shouldn't see deprecations in explicit unavailability"); + case PlatformAgnosticAvailabilityKind::NoAsync: + llvm_unreachable("shouldn't see noasync with explicit unavailability"); + case PlatformAgnosticAvailabilityKind::None: case PlatformAgnosticAvailabilityKind::Unavailable: if (Attr->Platform != PlatformKind::none) { @@ -3296,16 +3302,32 @@ diagnoseDeclUnavailableFromAsync(const ValueDecl *D, SourceRange R, // If we are in a synchronous context, don't check it if (!Where.getDeclContext()->isAsyncContext()) return false; - if (!D->getAttrs().hasAttribute()) - return false; ASTContext &ctx = Where.getDeclContext()->getASTContext(); + if (const AvailableAttr *attr = D->getAttrs().getNoAsync(ctx)) { + SourceLoc diagLoc = call ? call->getLoc() : R.Start; + auto diag = ctx.Diags.diagnose(diagLoc, diag::async_unavailable_decl, + D->getDescriptiveKind(), D->getBaseName(), + attr->Message); + + if (!attr->Rename.empty()) { + fixItAvailableAttrRename(diag, R, D, attr, call); + } + return true; + } + + const bool hasUnavailableAttr = + D->getAttrs().hasAttribute(); + + if (!hasUnavailableAttr) + return false; + // @available(noasync) spelling const UnavailableFromAsyncAttr *attr = D->getAttrs().getAttribute(); SourceLoc diagLoc = call ? call->getLoc() : R.Start; ctx.Diags .diagnose(diagLoc, diag::async_unavailable_decl, D->getDescriptiveKind(), - D->getBaseName(), attr->hasMessage(), attr->Message) + D->getBaseName(), attr->Message) .warnUntilSwiftVersion(6); D->diagnose(diag::decl_declared_here, D->getName()); return true; diff --git a/lib/Serialization/Deserialization.cpp b/lib/Serialization/Deserialization.cpp index a7f3c8d9345d9..7aae3ac33f3dd 100644 --- a/lib/Serialization/Deserialization.cpp +++ b/lib/Serialization/Deserialization.cpp @@ -4361,6 +4361,7 @@ DeclDeserializer::readAvailable_DECL_ATTR(SmallVectorImpl &scratch, bool isImplicit; bool isUnavailable; bool isDeprecated; + bool isNoAsync; bool isPackageDescriptionVersionSpecific; bool isSPI; DEF_VER_TUPLE_PIECES(Introduced); @@ -4371,7 +4372,7 @@ DeclDeserializer::readAvailable_DECL_ATTR(SmallVectorImpl &scratch, // Decode the record, pulling the version tuple information. serialization::decls_block::AvailableDeclAttrLayout::readRecord( - scratch, isImplicit, isUnavailable, isDeprecated, + scratch, isImplicit, isUnavailable, isDeprecated, isNoAsync, isPackageDescriptionVersionSpecific, isSPI, LIST_VER_TUPLE_PIECES(Introduced), LIST_VER_TUPLE_PIECES(Deprecated), LIST_VER_TUPLE_PIECES(Obsoleted), platform, renameDeclID, messageSize, renameSize); @@ -4394,6 +4395,8 @@ DeclDeserializer::readAvailable_DECL_ATTR(SmallVectorImpl &scratch, platformAgnostic = PlatformAgnosticAvailabilityKind::Unavailable; else if (isDeprecated) platformAgnostic = PlatformAgnosticAvailabilityKind::Deprecated; + else if (isNoAsync) + platformAgnostic = PlatformAgnosticAvailabilityKind::NoAsync; else if (((PlatformKind)platform) == PlatformKind::none && (!Introduced.empty() || !Deprecated.empty() || !Obsoleted.empty())) platformAgnostic = diff --git a/lib/Serialization/ModuleFormat.h b/lib/Serialization/ModuleFormat.h index a8fe71727065e..09d4c70d3a0af 100644 --- a/lib/Serialization/ModuleFormat.h +++ b/lib/Serialization/ModuleFormat.h @@ -56,7 +56,7 @@ const uint16_t SWIFTMODULE_VERSION_MAJOR = 0; /// describe what change you made. The content of this comment isn't important; /// it just ensures a conflict if two people change the module format. /// Don't worry about adhering to the 80-column limit for this line. -const uint16_t SWIFTMODULE_VERSION_MINOR = 678; // remove shared_external linkage +const uint16_t SWIFTMODULE_VERSION_MINOR = 679; // NoAsync /// A standard hash seed used for all string hashes in a serialized module. /// @@ -1916,6 +1916,7 @@ namespace decls_block { BCFixed<1>, // implicit flag BCFixed<1>, // is unconditionally unavailable? BCFixed<1>, // is unconditionally deprecated? + BCFixed<1>, // is unavailable from async? BCFixed<1>, // is this PackageDescription version-specific kind? BCFixed<1>, // is SPI? BC_AVAIL_TUPLE, // Introduced diff --git a/lib/Serialization/Serialization.cpp b/lib/Serialization/Serialization.cpp index c51f42a27d062..e257cbd69b36a 100644 --- a/lib/Serialization/Serialization.cpp +++ b/lib/Serialization/Serialization.cpp @@ -2580,6 +2580,7 @@ class Serializer::DeclSerializer : public DeclVisitor { theAttr->isImplicit(), theAttr->isUnconditionallyUnavailable(), theAttr->isUnconditionallyDeprecated(), + theAttr->isNoAsync(), theAttr->isPackageDescriptionVersionSpecific(), theAttr->IsSPI, LIST_VER_TUPLE_PIECES(Introduced), diff --git a/lib/SymbolGraphGen/AvailabilityMixin.cpp b/lib/SymbolGraphGen/AvailabilityMixin.cpp index ba0789b29fedc..120a9049dc535 100644 --- a/lib/SymbolGraphGen/AvailabilityMixin.cpp +++ b/lib/SymbolGraphGen/AvailabilityMixin.cpp @@ -31,6 +31,7 @@ StringRef getDomain(const AvailableAttr &AvAttr) { case PlatformAgnosticAvailabilityKind::Deprecated: case PlatformAgnosticAvailabilityKind::Unavailable: case PlatformAgnosticAvailabilityKind::None: + case PlatformAgnosticAvailabilityKind::NoAsync: break; } diff --git a/test/ModuleInterface/features.swift b/test/ModuleInterface/features.swift index 7a56083bd48b3..206365aba309d 100644 --- a/test/ModuleInterface/features.swift +++ b/test/ModuleInterface/features.swift @@ -185,4 +185,13 @@ public func multipleSuppressible(value: T) async {} @_unavailableFromAsync(message: "Test") public func unavailableFromAsyncFunc() { } +// CHECK: #if compiler(>=5.3) && $NoAsyncAvailability +// CHECK-NEXT: @available(*, noasync, message: "Test") +// CHECK-NEXT: public func noAsyncFunc() +// CHECK-NEXT: #else +// CHECK-NEXT: public func noAsyncFunc() +// CHECK-NEXT: #endif +@available(*, noasync, message: "Test") +public func noAsyncFunc() { } + // CHECK-NOT: extension FeatureTest.MyActor : Swift.Sendable diff --git a/test/attr/attr_availability.swift b/test/attr/attr_availability.swift index 8579fa8104f00..e2588b77b2644 100644 --- a/test/attr/attr_availability.swift +++ b/test/attr/attr_availability.swift @@ -149,7 +149,7 @@ let _: Int @available(*, renamed: "a(:b:)") // expected-error{{'renamed' argument of 'available' attribute must be an operator, identifier, or full function name, optionally prefixed by a type name}} let _: Int -@available(*, deprecated, unavailable, message: "message") // expected-error{{'available' attribute cannot be both unconditionally 'unavailable' and 'deprecated'}} +@available(*, deprecated, unavailable, message: "message") // expected-error{{'available' attribute cannot be both 'unavailable' and 'deprecated'}} struct BadUnconditionalAvailability { }; @available(*, unavailable, message="oh no you don't") // expected-error {{'=' has been replaced with ':' in attribute arguments}} {{35-36=: }} @@ -1230,4 +1230,4 @@ struct UnavailableSubscripts { _ = self[to: 3] // expected-warning {{'subscript(to:)' is deprecated: renamed to 'subscriptTo(_:)'}} // expected-note {{use 'subscriptTo(_:)' instead}} {{13-14=.subscriptTo(}} {{19-20=)}} {{14-18=}} _ = x[to: 3] // expected-warning {{'subscript(to:)' is deprecated: renamed to 'subscriptTo(_:)'}} // expected-note {{use 'subscriptTo(_:)' instead}} {{10-11=.subscriptTo(}} {{16-17=)}} {{11-15=}} } -} \ No newline at end of file +} diff --git a/test/attr/attr_availability_noasync.swift b/test/attr/attr_availability_noasync.swift new file mode 100644 index 0000000000000..b3d6d8dfe3747 --- /dev/null +++ b/test/attr/attr_availability_noasync.swift @@ -0,0 +1,41 @@ +// RUN: %target-typecheck-verify-swift + +// REQUIRES: concurrency + + +@available(*, noasync) +func basicNoAsync() { } + +@available(*, noasync, message: "a message from the author") +func messageNoAsync() { } + +@available(*, noasync, renamed: "asyncReplacement()") +func renamedNoAsync(_ completion: @escaping (Int) -> Void) -> Void { } + +@available(macOS 11, *) +func asyncReplacement() async -> Int { } + +@available(*, noasync, renamed: "IOActor.readString()") +func readStringFromIO() -> String {} + +@available(macOS 11, *) +actor IOActor { + func readString() -> String { + return readStringFromIO() + } +} + +@available(macOS 11, *) +func asyncFunc() async { + // expected-error@+1{{global function 'basicNoAsync' is unavailable from asynchronous contexts}} + basicNoAsync() + + // expected-error@+1{{global function 'messageNoAsync' is unavailable from asynchronous contexts; a message from the author}} + messageNoAsync() + + // expected-error@+1{{global function 'renamedNoAsync' is unavailable from asynchronous contexts}}{{5-19=asyncReplacement}} + renamedNoAsync() { _ in } + + // expected-error@+1{{global function 'readStringFromIO' is unavailable from asynchronous contexts}}{{13-29=IOActor.readString}} + let _ = readStringFromIO() +} From 249c2a11d02ff35295935c899d4a61e3ec5edfea Mon Sep 17 00:00:00 2001 From: Evan Wilde Date: Fri, 7 Jan 2022 12:24:14 -0800 Subject: [PATCH 61/88] Typecheck noasync attr This patch adds validation to ensure that the noasync attribute is only applied to useful declarations. Specifically, the `noasync` attribute cannot be applied to `deinit` declarations, asynchronous functions, or asynchronous properties. --- lib/Sema/TypeCheckAttr.cpp | 29 +++++++++++++++++++++++ test/attr/attr_availability_noasync.swift | 21 ++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/lib/Sema/TypeCheckAttr.cpp b/lib/Sema/TypeCheckAttr.cpp index 215d6be9f4100..a265c6999563c 100644 --- a/lib/Sema/TypeCheckAttr.cpp +++ b/lib/Sema/TypeCheckAttr.cpp @@ -1640,6 +1640,35 @@ void AttributeChecker::visitAvailableAttr(AvailableAttr *attr) { } } + if (attr->isNoAsync()) { + const DeclContext * dctx = dyn_cast(D); + bool isAsyncDeclContext = dctx && dctx->isAsyncContext(); + + if (const AbstractStorageDecl *decl = dyn_cast(D)) { + const AccessorDecl * accessor = decl->getEffectfulGetAccessor(); + isAsyncDeclContext |= accessor && accessor->isAsyncContext(); + } + + if (isAsyncDeclContext) { + if (const ValueDecl *vd = dyn_cast(D)) { + D->getASTContext().Diags.diagnose( + D->getLoc(), diag::async_named_decl_must_be_available_from_async, + D->getDescriptiveKind(), vd->getName()); + } else { + D->getASTContext().Diags.diagnose( + D->getLoc(), diag::async_decl_must_be_available_from_async, + D->getDescriptiveKind()); + } + } + + // deinit's may not be unavailable from async contexts + if (isa(D)) { + D->getASTContext().Diags.diagnose( + D->getLoc(), diag::invalid_decl_attribute, attr); + } + + } + if (!attr->hasPlatform() || !attr->isActivePlatform(Ctx) || !attr->Introduced.hasValue()) { return; diff --git a/test/attr/attr_availability_noasync.swift b/test/attr/attr_availability_noasync.swift index b3d6d8dfe3747..2fa150113b9f5 100644 --- a/test/attr/attr_availability_noasync.swift +++ b/test/attr/attr_availability_noasync.swift @@ -39,3 +39,24 @@ func asyncFunc() async { // expected-error@+1{{global function 'readStringFromIO' is unavailable from asynchronous contexts}}{{13-29=IOActor.readString}} let _ = readStringFromIO() } + +// expected-error@+2{{asynchronous global function 'unavailableAsyncFunc()' must be available from asynchronous contexts}} +@available(*, noasync) +func unavailableAsyncFunc() async { +} + +protocol BadSyncable { + // expected-error@+2{{asynchronous property 'isSyncd' must be available from asynchronous contexts}} + @available(*, noasync) + var isSyncd: Bool { get async } + + // expected-error@+2{{asynchronous instance method 'sync' must be available from asynchronous contexts}} + @available(*, noasync) + func sync(_ str: String) async +} + +class TestClass { + // expected-error@+2{{'@available' attribute cannot be applied to this declaration}} + @available(*, noasync) + deinit { } +} From 900383708b2b3adc6bf79fc81c295634b88f7a15 Mon Sep 17 00:00:00 2001 From: Evan Wilde Date: Thu, 13 Jan 2022 12:21:12 -0800 Subject: [PATCH 62/88] Fix remaining test issues Needed to add availability information for macOS, iOS, and watchOS to avoid emitting unintended errors on the `async` keyword. --- test/attr/attr_availability_noasync.swift | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/test/attr/attr_availability_noasync.swift b/test/attr/attr_availability_noasync.swift index 2fa150113b9f5..d2c7dc7444f4f 100644 --- a/test/attr/attr_availability_noasync.swift +++ b/test/attr/attr_availability_noasync.swift @@ -12,20 +12,20 @@ func messageNoAsync() { } @available(*, noasync, renamed: "asyncReplacement()") func renamedNoAsync(_ completion: @escaping (Int) -> Void) -> Void { } -@available(macOS 11, *) +@available(macOS 11, iOS 13, watchOS 6, *) func asyncReplacement() async -> Int { } @available(*, noasync, renamed: "IOActor.readString()") func readStringFromIO() -> String {} -@available(macOS 11, *) +@available(macOS 11, iOS 13, watchOS 6, *) actor IOActor { func readString() -> String { return readStringFromIO() } } -@available(macOS 11, *) +@available(macOS 11, iOS 13, watchOS 6, *) func asyncFunc() async { // expected-error@+1{{global function 'basicNoAsync' is unavailable from asynchronous contexts}} basicNoAsync() @@ -40,11 +40,13 @@ func asyncFunc() async { let _ = readStringFromIO() } -// expected-error@+2{{asynchronous global function 'unavailableAsyncFunc()' must be available from asynchronous contexts}} +// expected-error@+3{{asynchronous global function 'unavailableAsyncFunc()' must be available from asynchronous contexts}} +@available(macOS 11, iOS 13, watchOS 6, *) @available(*, noasync) func unavailableAsyncFunc() async { } +@available(macOS 11, iOS 13, watchOS 6, *) protocol BadSyncable { // expected-error@+2{{asynchronous property 'isSyncd' must be available from asynchronous contexts}} @available(*, noasync) From 110839e8014b94a2def5e468d7ac371c5f8f7863 Mon Sep 17 00:00:00 2001 From: Evan Wilde Date: Wed, 9 Mar 2022 13:03:46 -0800 Subject: [PATCH 63/88] Add cross-module test Verify importing noasync across modules works correctly. --- test/Concurrency/Inputs/UnavailableFunction.swift | 3 +++ test/Concurrency/unavailable_from_async.swift | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/test/Concurrency/Inputs/UnavailableFunction.swift b/test/Concurrency/Inputs/UnavailableFunction.swift index 0bbc8ee378f4b..db46c404df8f0 100644 --- a/test/Concurrency/Inputs/UnavailableFunction.swift +++ b/test/Concurrency/Inputs/UnavailableFunction.swift @@ -1,2 +1,5 @@ @_unavailableFromAsync public func unavailableFunction() { } + +@available(*, noasync) +public func noasyncFunction() { } diff --git a/test/Concurrency/unavailable_from_async.swift b/test/Concurrency/unavailable_from_async.swift index 325eec48b946b..73f23dba1fc96 100644 --- a/test/Concurrency/unavailable_from_async.swift +++ b/test/Concurrency/unavailable_from_async.swift @@ -64,11 +64,13 @@ func makeAsyncClosuresSynchronously(bop: inout Bop) -> (() async -> Void) { bop.foo() // expected-warning@:9{{'foo' is unavailable from asynchronous contexts}} bop.muppet() // expected-warning@:9{{'muppet' is unavailable from asynchronous contexts}} unavailableFunction() // expected-warning@:5{{'unavailableFunction' is unavailable from asynchronous contexts}} + noasyncFunction() // expected-error@:5{{'noasyncFunction' is unavailable from asynchronous contexts}} // Can use them from synchronous closures _ = { Bop() }() _ = { bop.foo() }() _ = { bop.muppet() }() + _ = { noasyncFunction() }() // Unavailable global function foo() // expected-warning{{'foo' is unavailable from asynchronous contexts}} @@ -87,6 +89,7 @@ func asyncFunc() async { // expected-error{{asynchronous global function 'asyncF bop.foo() // expected-warning@:7{{'foo' is unavailable from asynchronous contexts}} bop.muppet() // expected-warning@:7{{'muppet' is unavailable from asynchronous contexts}} unavailableFunction() // expected-warning@:3{{'unavailableFunction' is unavailable from asynchronous contexts}} + noasyncFunction() // expected-error@:3{{'noasyncFunction' is unavailable from asynchronous contexts}} // Unavailable global function foo() // expected-warning{{'foo' is unavailable from asynchronous contexts}} @@ -101,6 +104,7 @@ func asyncFunc() async { // expected-error{{asynchronous global function 'asyncF bop.foo() bop.muppet() unavailableFunction() + noasyncFunction() _ = { () async -> Void in // Check Unavailable things inside of a nested async closure @@ -109,6 +113,7 @@ func asyncFunc() async { // expected-error{{asynchronous global function 'asyncF bop.muppet() // expected-warning@:11{{'muppet' is unavailable from asynchronous contexts}} _ = Bop() // expected-warning@:11{{'init' is unavailable from asynchronous contexts; Use Bop(a: Int) instead}} unavailableFunction() // expected-warning@:7{{'unavailableFunction' is unavailable from asynchronous contexts}} + noasyncFunction() // expected-error@:7{{'noasyncFunction' is unavailable from asynchronous contexts}} } } @@ -118,6 +123,7 @@ func asyncFunc() async { // expected-error{{asynchronous global function 'asyncF bop.foo() // expected-warning@:9{{'foo' is unavailable from asynchronous contexts}} bop.muppet() // expected-warning@:9{{'muppet' is unavailable from asynchronous contexts}} unavailableFunction() // expected-warning@:5{{'unavailableFunction' is unavailable from asynchronous contexts}} + noasyncFunction() // expected-error@:5{{'noasyncFunction' is unavailable from asynchronous contexts}} _ = { foo() From aa22e79435cc03743c3dbc36f49e0a4f46ec95ec Mon Sep 17 00:00:00 2001 From: Evan Wilde Date: Tue, 15 Mar 2022 11:47:47 -0700 Subject: [PATCH 64/88] Add noasync attribute to changelog --- CHANGELOG.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5ae2e3983d2ae..bb87a9a928f2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,17 @@ _**Note:** This is in reverse chronological order, so newer entries are added to ## Swift 5.7 +* [SE-0340][]: + + It is now possible to make declarations unavailable from use in asynchronous + contexts with the `@available(*, noasync)` attribute. + + This is to protect the consumers of an API against undefined behavior that can + occur when the API uses thread-local storage, or encourages using thread-local + storage, across suspension points, or protect developers against holding locks + across suspension points which may lead to undefined behavior, priority + inversions, or deadlocks. + * [SE-0343][]: Top-level scripts support asynchronous calls. @@ -9083,6 +9094,7 @@ Swift 1.0 [SE-0341]: [SE-0336]: [SE-0343]: +[SE-0340]: [SR-75]: [SR-106]: From 0bb6a7b9e83948bfe8012b461a23f4cbf1a7fa65 Mon Sep 17 00:00:00 2001 From: Allan Shortlidge Date: Tue, 22 Mar 2022 14:36:17 -0700 Subject: [PATCH 65/88] SIL: Avoid serializing the bodies of the original copies of @_backDeploy functions to prevent the optimizer from inlining those bodies into their back deployment thunks. This ensures that the copy of the function in the library dylib is reliably called when it is available, both with and without optimizations enabled. Also update the attr_backDeploy_evolution test case to use precondition() instead of assert() since asserts are compiled out with optimizations enabled. Resolves rdar://90525337 --- include/swift/SIL/SILDeclRef.h | 2 ++ include/swift/SIL/SILLinkage.h | 2 +- lib/SIL/IR/SILDeclRef.cpp | 24 ++++++++++++++ .../back_deploy_attribute_accessor.swift | 2 +- test/SILGen/back_deploy_attribute_func.swift | 4 +-- .../back_deploy_attribute_generic_func.swift | 2 +- .../back_deploy_attribute_struct_method.swift | 2 +- .../back_deploy_attribute_throwing_func.swift | 2 +- test/attr/attr_backDeploy_evolution.swift | 33 +++++++++---------- 9 files changed, 48 insertions(+), 25 deletions(-) diff --git a/include/swift/SIL/SILDeclRef.h b/include/swift/SIL/SILDeclRef.h index 952f14665a747..930037fefed90 100644 --- a/include/swift/SIL/SILDeclRef.h +++ b/include/swift/SIL/SILDeclRef.h @@ -372,6 +372,8 @@ struct SILDeclRef { bool isNoinline() const; /// True if the function has __always inline attribute. bool isAlwaysInline() const; + /// True if the function has the @_backDeploy attribute. + bool isBackDeployed() const; /// Return the expected linkage of this declaration. SILLinkage getLinkage(ForDefinition_t forDefinition) const; diff --git a/include/swift/SIL/SILLinkage.h b/include/swift/SIL/SILLinkage.h index ba286be451c0a..0f8d9a33195d8 100644 --- a/include/swift/SIL/SILLinkage.h +++ b/include/swift/SIL/SILLinkage.h @@ -135,7 +135,7 @@ enum IsSerialized_t : unsigned char { /// /// This flag is only valid for Public, PublicNonABI, PublicExternal, /// HiddenExternal and Shared functions. - /// Functions with external linkage (PublicExternl, HiddenExternal) will not + /// Functions with external linkage (PublicExternal, HiddenExternal) will not /// be serialized, because they are available in a different module (from which /// they were de-serialized). /// diff --git a/lib/SIL/IR/SILDeclRef.cpp b/lib/SIL/IR/SILDeclRef.cpp index ea019127664a2..c99ac31751aac 100644 --- a/lib/SIL/IR/SILDeclRef.cpp +++ b/lib/SIL/IR/SILDeclRef.cpp @@ -689,6 +689,19 @@ IsSerialized_t SILDeclRef::isSerialized() const { if (isClangImported()) return IsSerialized; + // Handle back deployed functions. The original back deployed function + // should not be serialized, but the thunk and fallback should be since they + // need to be emitted into the client. + if (isBackDeployed()) { + switch (backDeploymentKind) { + case BackDeploymentKind::None: + return IsNotSerialized; + case BackDeploymentKind::Fallback: + case BackDeploymentKind::Thunk: + return IsSerialized; + } + } + // Otherwise, ask the AST if we're inside an @inlinable context. if (dc->getResilienceExpansion() == ResilienceExpansion::Minimal) return IsSerialized; @@ -736,6 +749,17 @@ bool SILDeclRef::isAlwaysInline() const { return false; } +bool SILDeclRef::isBackDeployed() const { + if (!hasDecl()) + return false; + + auto *decl = getDecl(); + if (auto afd = dyn_cast(decl)) + return afd->isBackDeployed(); + + return false; +} + bool SILDeclRef::isAnyThunk() const { return isForeignToNativeThunk() || isNativeToForeignThunk() || isDistributedThunk() || isBackDeploymentThunk(); diff --git a/test/SILGen/back_deploy_attribute_accessor.swift b/test/SILGen/back_deploy_attribute_accessor.swift index d2aa336bbc757..77195f24349b1 100644 --- a/test/SILGen/back_deploy_attribute_accessor.swift +++ b/test/SILGen/back_deploy_attribute_accessor.swift @@ -36,7 +36,7 @@ public struct TopLevelStruct { // CHECK: return [[RETURN_BB_ARG]] : $TopLevelStruct // -- Original definition of TopLevelStruct.property.getter - // CHECK-LABEL: sil [serialized] [available 10.51] [ossa] @$s11back_deploy14TopLevelStructV8propertyACvg : $@convention(method) (TopLevelStruct) -> TopLevelStruct + // CHECK-LABEL: sil [available 10.51] [ossa] @$s11back_deploy14TopLevelStructV8propertyACvg : $@convention(method) (TopLevelStruct) -> TopLevelStruct @available(macOS 10.51, *) @_backDeploy(before: macOS 10.52) public var property: TopLevelStruct { self } diff --git a/test/SILGen/back_deploy_attribute_func.swift b/test/SILGen/back_deploy_attribute_func.swift index 76e245b6c475d..77d1b0403a0d1 100644 --- a/test/SILGen/back_deploy_attribute_func.swift +++ b/test/SILGen/back_deploy_attribute_func.swift @@ -36,7 +36,7 @@ // CHECK: return [[RESULT]] : $() // -- Original definition of trivialFunc() -// CHECK-LABEL: sil [serialized] [available 10.51] [ossa] @$s11back_deploy11trivialFuncyyF : $@convention(thin) () -> () +// CHECK-LABEL: sil [available 10.51] [ossa] @$s11back_deploy11trivialFuncyyF : $@convention(thin) () -> () @available(macOS 10.51, *) @_backDeploy(before: macOS 10.52) public func trivialFunc() {} @@ -71,7 +71,7 @@ public func trivialFunc() {} // CHECK: return [[RETURN_BB_ARG]] : $Bool // -- Original definition of isNumber(_:) -// CHECK-LABEL: sil [serialized] [available 10.51] [ossa] @$s11back_deploy8isNumberySbSiF : $@convention(thin) (Int) -> Bool +// CHECK-LABEL: sil [available 10.51] [ossa] @$s11back_deploy8isNumberySbSiF : $@convention(thin) (Int) -> Bool @available(macOS 10.51, *) @_backDeploy(before: macOS 10.52) public func isNumber(_ x: Int) -> Bool { diff --git a/test/SILGen/back_deploy_attribute_generic_func.swift b/test/SILGen/back_deploy_attribute_generic_func.swift index b4c0338b10b2b..2dd2a721aff31 100644 --- a/test/SILGen/back_deploy_attribute_generic_func.swift +++ b/test/SILGen/back_deploy_attribute_generic_func.swift @@ -37,7 +37,7 @@ // CHECK: return [[RESULT]] : $() // -- Original definition of genericFunc() -// CHECK-LABEL: sil [serialized] [available 10.51] [ossa] @$s11back_deploy11genericFuncyxxlF : $@convention(thin) (@in_guaranteed T) -> @out T +// CHECK-LABEL: sil [available 10.51] [ossa] @$s11back_deploy11genericFuncyxxlF : $@convention(thin) (@in_guaranteed T) -> @out T @available(macOS 10.51, *) @_backDeploy(before: macOS 10.52) public func genericFunc(_ t: T) -> T { diff --git a/test/SILGen/back_deploy_attribute_struct_method.swift b/test/SILGen/back_deploy_attribute_struct_method.swift index 9fc630a75904e..ca7bb8e6815fa 100644 --- a/test/SILGen/back_deploy_attribute_struct_method.swift +++ b/test/SILGen/back_deploy_attribute_struct_method.swift @@ -38,7 +38,7 @@ public struct TopLevelStruct { // CHECK: return [[RESULT]] : $() // -- Original definition of TopLevelStruct.trivialMethod() - // CHECK-LABEL: sil [serialized] [available 10.51] [ossa] @$s11back_deploy14TopLevelStructV13trivialMethodyyF : $@convention(method) (TopLevelStruct) -> () + // CHECK-LABEL: sil [available 10.51] [ossa] @$s11back_deploy14TopLevelStructV13trivialMethodyyF : $@convention(method) (TopLevelStruct) -> () @available(macOS 10.51, *) @_backDeploy(before: macOS 10.52) public func trivialMethod() {} diff --git a/test/SILGen/back_deploy_attribute_throwing_func.swift b/test/SILGen/back_deploy_attribute_throwing_func.swift index fd0753bc86bfc..be5baded56f7f 100644 --- a/test/SILGen/back_deploy_attribute_throwing_func.swift +++ b/test/SILGen/back_deploy_attribute_throwing_func.swift @@ -49,7 +49,7 @@ // CHECK: throw [[RETHROW_BB_ARG]] : $Error // -- Original definition of throwingFunc() -// CHECK-LABEL: sil [serialized] [available 10.51] [ossa] @$s11back_deploy12throwingFuncyyKF : $@convention(thin) () -> @error Error +// CHECK-LABEL: sil [available 10.51] [ossa] @$s11back_deploy12throwingFuncyyKF : $@convention(thin) () -> @error Error @available(macOS 10.51, *) @_backDeploy(before: macOS 10.52) public func throwingFunc() throws {} diff --git a/test/attr/attr_backDeploy_evolution.swift b/test/attr/attr_backDeploy_evolution.swift index d5bbc43b21943..da6d04e4ed608 100644 --- a/test/attr/attr_backDeploy_evolution.swift +++ b/test/attr/attr_backDeploy_evolution.swift @@ -32,9 +32,6 @@ // REQUIRES: executable_test // REQUIRES: VENDOR=apple -// rdar://90525337 -// UNSUPPORTED: swift_test_mode_optimize - // ---- (0) Prepare SDK // RUN: %empty-directory(%t) // RUN: %empty-directory(%t/SDK_ABI) @@ -119,24 +116,24 @@ testPrint(handle: #dsohandle, "check") testPrint(handle: libraryHandle(), "check") if isV2OrLater() { - assert(!v2APIsAreStripped()) + precondition(!v2APIsAreStripped()) } // CHECK-ABI: library: trivial // CHECK-BD: client: trivial trivial() -assert(try! pleaseThrow(false)) +precondition(try! pleaseThrow(false)) do { _ = try pleaseThrow(true) fatalError("Should have thrown") } catch { - assert(error as? BadError == BadError.bad) + precondition(error as? BadError == BadError.bad) } do { let zero = MutableInt.zero - assert(zero.value == 0) + precondition(zero.value == 0) var int = MutableInt(5) @@ -144,9 +141,9 @@ do { // CHECK-BD: client: 5 int.print() - assert(int.increment(by: 2) == 7) - assert(genericIncrement(&int, by: 3) == 10) - assert(int.decrement(by: 1) == 9) + precondition(int.increment(by: 2) == 7) + precondition(genericIncrement(&int, by: 3) == 10) + precondition(int.decrement(by: 1) == 9) var incrementable: any Incrementable = int.toIncrementable() @@ -156,13 +153,13 @@ do { let int2 = MutableInt(0x7BB7914B) for (i, expectedByte) in [0x4B, 0x91, 0xB7, 0x7B].enumerated() { - assert(int2[byteAt: i] == expectedByte) + precondition(int2[byteAt: i] == expectedByte) } } do { let zero = ReferenceInt.zero - assert(zero.value == 0) + precondition(zero.value == 0) var int = ReferenceInt(42) @@ -172,13 +169,13 @@ do { do { let copy = int.copy() - assert(int !== copy) - assert(copy.value == 42) + precondition(int !== copy) + precondition(copy.value == 42) } - assert(int.increment(by: 2) == 44) - assert(genericIncrement(&int, by: 3) == 47) - assert(int.decrement(by: 46) == 1) + precondition(int.increment(by: 2) == 44) + precondition(genericIncrement(&int, by: 3) == 47) + precondition(int.decrement(by: 46) == 1) var incrementable: any Incrementable = int.toIncrementable() @@ -188,6 +185,6 @@ do { let int2 = MutableInt(0x08AFAB76) for (i, expectedByte) in [0x76, 0xAB, 0xAF, 0x08].enumerated() { - assert(int2[byteAt: i] == expectedByte) + precondition(int2[byteAt: i] == expectedByte) } } From b6c3ad3ddc66538965966191f55448dda5af007d Mon Sep 17 00:00:00 2001 From: Robert Widmann Date: Tue, 22 Mar 2022 15:35:58 -0700 Subject: [PATCH 66/88] Augment Test for Confusing ExpressibleByNilLiteral Case Add a test for an extremely confusing behavior of switches for ExpressibleByNilLiteral-conforming types. From the looks of the expression tree, one would hope that `case nil` would match such types. Instead, the subject value is up-converted to an optional and compared to `nil` directly with ~=. --- test/stmt/switch_nil.swift | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/test/stmt/switch_nil.swift b/test/stmt/switch_nil.swift index 019d00d655e56..0efb5cbff1349 100644 --- a/test/stmt/switch_nil.swift +++ b/test/stmt/switch_nil.swift @@ -12,3 +12,19 @@ func test() { break } } + +struct Nilable: ExpressibleByNilLiteral { + init(nilLiteral: ()) {} +} + +func testNil() { + // N.B. A deeply confusing case as no conversion is performed on the `nil` + // literal. Instead, the match subject is converted to `Nilable?` and compared + // using ~=. + switch Nilable(nilLiteral: ()) { + case nil: // expected-warning {{type 'Nilable' is not optional, value can never be nil}} + break + default: + break + } +} From 1b910f539bee5479ace202cf8f3a7f2013ca10b1 Mon Sep 17 00:00:00 2001 From: John McCall Date: Tue, 22 Mar 2022 18:12:01 -0400 Subject: [PATCH 67/88] [NFC] Document the fields in an ABI generic context header --- include/swift/ABI/Metadata.h | 50 +++++++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/include/swift/ABI/Metadata.h b/include/swift/ABI/Metadata.h index 711e2eddac80b..946f511cd9795 100644 --- a/include/swift/ABI/Metadata.h +++ b/include/swift/ABI/Metadata.h @@ -2970,12 +2970,60 @@ TargetContextDescriptor::getModuleContext() const { template struct TargetGenericContextDescriptorHeader { - uint16_t NumParams, NumRequirements, NumKeyArguments, NumExtraArguments; + /// The number of (source-written) generic parameters, and thus + /// the number of GenericParamDescriptors associated with this + /// context. The parameter descriptors appear in the order in + /// which they were given in the source. + /// + /// A GenericParamDescriptor corresponds to a type metadata pointer + /// in the arguments layout when isKeyArgument() is true. + /// isKeyArgument() will be false if the parameter has been unified + /// unified with a different parameter or an associated type. + uint16_t NumParams; + + /// The number of GenericRequirementDescriptors in this generic + /// signature. + /// + /// A GenericRequirementDescriptor of kind Protocol corresponds + /// to a witness table pointer in the arguments layout when + /// isKeyArgument() is true. isKeyArgument() will be false if + /// the protocol is an Objective-C protocol. (Unlike generic + /// parameters, redundant conformance requirements can simply be + /// eliminated, and so that case is not impossible.) + uint16_t NumRequirements; + + /// The size of the "key" area of the argument layout, in words. + /// Key arguments include generic parameters and conformance + /// requirements which are part of the identity of the context. + /// + /// The key area of the argument layout considers of a sequence + /// of type metadata pointers (in the same order as the parameter + /// descriptors, for those parameters which satisfy hasKeyArgument()) + /// followed by a sequence of witness table pointers (in the same + /// order as the requirements, for those requirements which satisfy + /// hasKeyArgument()). + uint16_t NumKeyArguments; + + /// In principle, the size of the "extra" area of the argument + /// layout, in words. The idea was that extra arguments would + /// include generic parameters and conformances that are not part + /// of the identity of the context; however, it's unclear why we + /// would ever want such a thing. As a result, this section is + /// unused, and this field is always zero. It can be repurposed + /// as long as it remains zero in code which must be compatible + /// with existing Swift runtimes. + uint16_t NumExtraArguments; uint32_t getNumArguments() const { return NumKeyArguments + NumExtraArguments; } + /// Return the total size of the argument layout, in words. + /// The alignment of the argument layout is the word alignment. + uint32_t getArgumentLayoutSizeInWords() const { + return getNumArguments(); + } + bool hasArguments() const { return getNumArguments() > 0; } From c05e47dd600bf2903f634a695d1131bbedc59200 Mon Sep 17 00:00:00 2001 From: David Smith Date: Tue, 22 Mar 2022 15:48:24 -0700 Subject: [PATCH 68/88] Only use SIMD when stdlib vector types are available --- stdlib/cmake/modules/SwiftSource.cmake | 4 ++++ stdlib/public/core/StringUTF16View.swift | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/stdlib/cmake/modules/SwiftSource.cmake b/stdlib/cmake/modules/SwiftSource.cmake index 183cb10c95c45..128ddde624ca7 100644 --- a/stdlib/cmake/modules/SwiftSource.cmake +++ b/stdlib/cmake/modules/SwiftSource.cmake @@ -300,6 +300,10 @@ function(_add_target_variant_swift_compile_flags if(SWIFT_STDLIB_ENABLE_UNICODE_DATA) list(APPEND result "-D" "SWIFT_STDLIB_ENABLE_UNICODE_DATA") endif() + + if(SWIFT_STDLIB_ENABLE_VECTOR_TYPES) + list(APPEND result "-D" "SWIFT_STDLIB_ENABLE_VECTOR_TYPES") + endif() if(SWIFT_STDLIB_HAS_COMMANDLINE) list(APPEND result "-D" "SWIFT_STDLIB_HAS_COMMANDLINE") diff --git a/stdlib/public/core/StringUTF16View.swift b/stdlib/public/core/StringUTF16View.swift index 326ee05464173..a5cec6b6b9ff7 100644 --- a/stdlib/public/core/StringUTF16View.swift +++ b/stdlib/public/core/StringUTF16View.swift @@ -520,6 +520,7 @@ extension _StringGuts { extension String.UTF16View { +#if SWIFT_STDLIB_ENABLE_VECTOR_TYPES @inline(__always) internal func _utf16Length( readPtr: inout UnsafeRawPointer, @@ -551,6 +552,7 @@ extension String.UTF16View { return utf16Count } +#endif @inline(__always) internal func _utf16Distance(from start: Index, to end: Index) -> Int { @@ -576,6 +578,7 @@ extension String.UTF16View { readPtr += 1 } +#if SWIFT_STDLIB_ENABLE_VECTOR_TYPES // TODO: Currently, using SIMD sizes above SIMD8 is slower // Once that's fixed we should go up to SIMD64 here @@ -605,6 +608,7 @@ extension String.UTF16View { return 0 } } +#endif //trailing bytes while readPtr < endPtr { From 2fd4de411e7128c1326f97b574d9aabb2c5fb22a Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Sat, 5 Mar 2022 17:45:02 -0800 Subject: [PATCH 69/88] [SIL-opaque] Removed [Unconditional]CheckedCastValue --- include/swift/SIL/DynamicCasts.h | 54 ------------ include/swift/SIL/SILBuilder.h | 22 ----- include/swift/SIL/SILCloner.h | 34 -------- include/swift/SIL/SILInstruction.h | 73 ---------------- include/swift/SIL/SILNode.h | 2 - include/swift/SIL/SILNodes.def | 8 +- .../swift/SILOptimizer/Utils/CastOptimizer.h | 8 -- include/swift/SILOptimizer/Utils/SCCVisitor.h | 1 - lib/IRGen/IRGenSIL.cpp | 13 --- lib/SIL/IR/OperandOwnership.cpp | 2 - lib/SIL/IR/SILArgument.cpp | 2 - lib/SIL/IR/SILFunction.cpp | 3 - lib/SIL/IR/SILInstruction.cpp | 2 - lib/SIL/IR/SILInstructions.cpp | 36 -------- lib/SIL/IR/SILPrinter.cpp | 13 --- lib/SIL/IR/ValueOwnership.cpp | 1 - lib/SIL/Parser/ParseSIL.cpp | 28 ------ lib/SIL/Utils/BasicBlockUtils.cpp | 10 --- lib/SIL/Utils/DynamicCasts.cpp | 5 ++ lib/SIL/Utils/InstructionUtils.cpp | 5 -- lib/SIL/Utils/MemAccessUtils.cpp | 3 - lib/SIL/Verifier/SILVerifier.cpp | 26 ------ lib/SILGen/SILGenBuilder.cpp | 25 +----- lib/SILGen/SILGenBuilder.h | 16 ---- lib/SILGen/SILGenDynamicCast.cpp | 45 +++------- lib/SILGen/SILGenLazyConformance.cpp | 7 -- lib/SILOptimizer/ARC/ARCSequenceOptUtils.cpp | 1 - .../Differentiation/VJPCloner.cpp | 14 --- .../Mandatory/AddressLowering.cpp | 16 +--- .../Mandatory/DiagnoseInfiniteRecursion.cpp | 3 +- .../Mandatory/Differentiation.cpp | 1 - .../Mandatory/PredictableMemOpt.cpp | 3 +- .../Transforms/DeadCodeElimination.cpp | 2 - lib/SILOptimizer/Transforms/SimplifyCFG.cpp | 48 ---------- .../UtilityPasses/SerializeSILPass.cpp | 2 - lib/SILOptimizer/Utils/CFGOptUtils.cpp | 17 ---- lib/SILOptimizer/Utils/CastOptimizer.cpp | 87 ------------------- lib/SILOptimizer/Utils/SILInliner.cpp | 2 - lib/Serialization/DeserializeSIL.cpp | 30 ------- lib/Serialization/SerializeSIL.cpp | 37 -------- test/SIL/Parser/opaque_values_parse.sil | 30 ------- .../Serialization/opaque_values_serialize.sil | 30 ------- .../opaque_use_verifier.sil | 20 ----- test/SILOptimizer/latecodemotion.sil | 16 ---- test/SILOptimizer/side-effect.sil | 9 -- 45 files changed, 24 insertions(+), 788 deletions(-) diff --git a/include/swift/SIL/DynamicCasts.h b/include/swift/SIL/DynamicCasts.h index 8b9e0a562f3b6..161011565abde 100644 --- a/include/swift/SIL/DynamicCasts.h +++ b/include/swift/SIL/DynamicCasts.h @@ -187,14 +187,10 @@ struct SILDynamicCastInst { // checked_cast_value_br yet. Should we ever support it, please // review this code. case SILDynamicCastKind::CheckedCastBranchInst: - case SILDynamicCastKind::CheckedCastValueBranchInst: - return CastConsumptionKind::CopyOnSuccess; case SILDynamicCastKind::UnconditionalCheckedCastAddrInst: return CastConsumptionKind::TakeAlways; case SILDynamicCastKind::UnconditionalCheckedCastInst: return CastConsumptionKind::CopyOnSuccess; - case SILDynamicCastKind::UnconditionalCheckedCastValueInst: - llvm_unreachable("unsupported"); } llvm_unreachable("covered switch"); } @@ -203,10 +199,8 @@ struct SILDynamicCastInst { switch (getKind()) { case SILDynamicCastKind::CheckedCastAddrBranchInst: case SILDynamicCastKind::CheckedCastBranchInst: - case SILDynamicCastKind::CheckedCastValueBranchInst: case SILDynamicCastKind::UnconditionalCheckedCastAddrInst: case SILDynamicCastKind::UnconditionalCheckedCastInst: - case SILDynamicCastKind::UnconditionalCheckedCastValueInst: llvm_unreachable("unsupported"); } } @@ -217,13 +211,9 @@ struct SILDynamicCastInst { return cast(inst)->getSuccessBB(); case SILDynamicCastKind::CheckedCastBranchInst: return cast(inst)->getSuccessBB(); - case SILDynamicCastKind::CheckedCastValueBranchInst: - return cast(inst)->getSuccessBB(); case SILDynamicCastKind::UnconditionalCheckedCastAddrInst: case SILDynamicCastKind::UnconditionalCheckedCastInst: return nullptr; - case SILDynamicCastKind::UnconditionalCheckedCastValueInst: - llvm_unreachable("unsupported"); } llvm_unreachable("covered switch"); } @@ -234,13 +224,9 @@ struct SILDynamicCastInst { llvm_unreachable("unsupported"); case SILDynamicCastKind::CheckedCastBranchInst: return cast(inst)->getTrueBBCount(); - case SILDynamicCastKind::CheckedCastValueBranchInst: - llvm_unreachable("unsupported"); case SILDynamicCastKind::UnconditionalCheckedCastAddrInst: case SILDynamicCastKind::UnconditionalCheckedCastInst: return None; - case SILDynamicCastKind::UnconditionalCheckedCastValueInst: - llvm_unreachable("unsupported"); } llvm_unreachable("covered switch"); } @@ -255,13 +241,9 @@ struct SILDynamicCastInst { return cast(inst)->getFailureBB(); case SILDynamicCastKind::CheckedCastBranchInst: return cast(inst)->getFailureBB(); - case SILDynamicCastKind::CheckedCastValueBranchInst: - return cast(inst)->getFailureBB(); case SILDynamicCastKind::UnconditionalCheckedCastAddrInst: case SILDynamicCastKind::UnconditionalCheckedCastInst: return nullptr; - case SILDynamicCastKind::UnconditionalCheckedCastValueInst: - llvm_unreachable("unsupported"); } llvm_unreachable("covered switch"); } @@ -272,13 +254,9 @@ struct SILDynamicCastInst { llvm_unreachable("unsupported"); case SILDynamicCastKind::CheckedCastBranchInst: return cast(inst)->getFalseBBCount(); - case SILDynamicCastKind::CheckedCastValueBranchInst: - llvm_unreachable("unsupported"); case SILDynamicCastKind::UnconditionalCheckedCastAddrInst: case SILDynamicCastKind::UnconditionalCheckedCastInst: return None; - case SILDynamicCastKind::UnconditionalCheckedCastValueInst: - llvm_unreachable("unsupported"); } llvm_unreachable("covered switch"); } @@ -293,14 +271,10 @@ struct SILDynamicCastInst { return cast(inst)->getSrc(); case SILDynamicCastKind::CheckedCastBranchInst: return cast(inst)->getOperand(); - case SILDynamicCastKind::CheckedCastValueBranchInst: - return cast(inst)->getOperand(); case SILDynamicCastKind::UnconditionalCheckedCastAddrInst: return cast(inst)->getSrc(); case SILDynamicCastKind::UnconditionalCheckedCastInst: return cast(inst)->getOperand(); - case SILDynamicCastKind::UnconditionalCheckedCastValueInst: - llvm_unreachable("unsupported"); } llvm_unreachable("covered switch"); } @@ -311,7 +285,6 @@ struct SILDynamicCastInst { case SILDynamicCastKind::CheckedCastAddrBranchInst: return cast(inst)->getDest(); case SILDynamicCastKind::CheckedCastBranchInst: - case SILDynamicCastKind::CheckedCastValueBranchInst: // TODO: Shouldn't this return getSuccessBlock()->getArgument(0)? return SILValue(); case SILDynamicCastKind::UnconditionalCheckedCastAddrInst: @@ -321,8 +294,6 @@ struct SILDynamicCastInst { // // return cast(inst); return SILValue(); - case SILDynamicCastKind::UnconditionalCheckedCastValueInst: - llvm_unreachable("unimplemented"); } llvm_unreachable("covered switch"); } @@ -333,14 +304,10 @@ struct SILDynamicCastInst { return cast(inst)->getSourceFormalType(); case SILDynamicCastKind::CheckedCastBranchInst: return cast(inst)->getSourceFormalType(); - case SILDynamicCastKind::CheckedCastValueBranchInst: - return cast(inst)->getSourceFormalType(); case SILDynamicCastKind::UnconditionalCheckedCastAddrInst: return cast(inst)->getSourceFormalType(); case SILDynamicCastKind::UnconditionalCheckedCastInst: return cast(inst)->getSourceFormalType(); - case SILDynamicCastKind::UnconditionalCheckedCastValueInst: - return cast(inst)->getSourceFormalType(); } llvm_unreachable("covered switch"); } @@ -351,14 +318,10 @@ struct SILDynamicCastInst { return cast(inst)->getSourceLoweredType(); case SILDynamicCastKind::CheckedCastBranchInst: return cast(inst)->getSourceLoweredType(); - case SILDynamicCastKind::CheckedCastValueBranchInst: - return cast(inst)->getSourceLoweredType(); case SILDynamicCastKind::UnconditionalCheckedCastAddrInst: return cast(inst)->getSourceLoweredType(); case SILDynamicCastKind::UnconditionalCheckedCastInst: return cast(inst)->getSourceLoweredType(); - case SILDynamicCastKind::UnconditionalCheckedCastValueInst: - return cast(inst)->getSourceLoweredType(); } llvm_unreachable("covered switch"); } @@ -369,14 +332,10 @@ struct SILDynamicCastInst { return cast(inst)->getTargetFormalType(); case SILDynamicCastKind::CheckedCastBranchInst: return cast(inst)->getTargetFormalType(); - case SILDynamicCastKind::CheckedCastValueBranchInst: - return cast(inst)->getTargetFormalType(); case SILDynamicCastKind::UnconditionalCheckedCastAddrInst: return cast(inst)->getTargetFormalType(); case SILDynamicCastKind::UnconditionalCheckedCastInst: return cast(inst)->getTargetFormalType(); - case SILDynamicCastKind::UnconditionalCheckedCastValueInst: - return cast(inst)->getTargetFormalType(); } llvm_unreachable("covered switch"); } @@ -387,28 +346,21 @@ struct SILDynamicCastInst { return cast(inst)->getDest()->getType(); case SILDynamicCastKind::CheckedCastBranchInst: return cast(inst)->getTargetLoweredType(); - case SILDynamicCastKind::CheckedCastValueBranchInst: - return cast(inst)->getTargetLoweredType(); case SILDynamicCastKind::UnconditionalCheckedCastAddrInst: return cast(inst)->getDest()->getType(); case SILDynamicCastKind::UnconditionalCheckedCastInst: return cast(inst)->getTargetLoweredType(); - case SILDynamicCastKind::UnconditionalCheckedCastValueInst: - return cast(inst)->getTargetLoweredType(); } llvm_unreachable("covered switch"); } bool isSourceTypeExact() const { switch (getKind()) { - case SILDynamicCastKind::CheckedCastValueBranchInst: case SILDynamicCastKind::CheckedCastBranchInst: case SILDynamicCastKind::CheckedCastAddrBranchInst: case SILDynamicCastKind::UnconditionalCheckedCastAddrInst: case SILDynamicCastKind::UnconditionalCheckedCastInst: return isa(getSource()); - case SILDynamicCastKind::UnconditionalCheckedCastValueInst: - llvm_unreachable("unsupported"); } llvm_unreachable("covered switch"); } @@ -476,15 +428,9 @@ struct SILDynamicCastInst { auto f = classifyFeasibility(false /*allow wmo*/); return f == DynamicCastFeasibility::MaySucceed; } - case SILDynamicCastKind::CheckedCastValueBranchInst: { - auto f = classifyFeasibility(false /*allow wmo opts*/); - return f == DynamicCastFeasibility::MaySucceed; - } case SILDynamicCastKind::UnconditionalCheckedCastAddrInst: case SILDynamicCastKind::UnconditionalCheckedCastInst: return false; - case SILDynamicCastKind::UnconditionalCheckedCastValueInst: - llvm_unreachable("unsupported"); } llvm_unreachable("covered switch"); } diff --git a/include/swift/SIL/SILBuilder.h b/include/swift/SIL/SILBuilder.h index 5da1ae39cae7c..ae594dac9fc08 100644 --- a/include/swift/SIL/SILBuilder.h +++ b/include/swift/SIL/SILBuilder.h @@ -1314,16 +1314,6 @@ class SILBuilder { dest, targetFormalType, getFunction())); } - UnconditionalCheckedCastValueInst * - createUnconditionalCheckedCastValue(SILLocation Loc, - SILValue op, CanType srcFormalTy, - SILType destLoweredTy, - CanType destFormalTy) { - return insert(UnconditionalCheckedCastValueInst::create( - getSILDebugLocation(Loc), op, srcFormalTy, - destLoweredTy, destFormalTy, getFunction())); - } - RetainValueInst *createRetainValue(SILLocation Loc, SILValue operand, Atomicity atomicity) { assert(!hasOwnership()); @@ -2324,18 +2314,6 @@ class SILBuilder { ProfileCounter Target1Count = ProfileCounter(), ProfileCounter Target2Count = ProfileCounter()); - CheckedCastValueBranchInst * - createCheckedCastValueBranch(SILLocation Loc, - SILValue op, CanType srcFormalTy, - SILType destLoweredTy, - CanType destFormalTy, - SILBasicBlock *successBB, - SILBasicBlock *failureBB) { - return insertTerminator(CheckedCastValueBranchInst::create( - getSILDebugLocation(Loc), op, srcFormalTy, - destLoweredTy, destFormalTy, successBB, failureBB, getFunction())); - } - CheckedCastAddrBranchInst * createCheckedCastAddrBranch(SILLocation Loc, CastConsumptionKind consumption, SILValue src, CanType sourceFormalType, diff --git a/include/swift/SIL/SILCloner.h b/include/swift/SIL/SILCloner.h index 5f43f2de11d42..aa64fb4aa4f3a 100644 --- a/include/swift/SIL/SILCloner.h +++ b/include/swift/SIL/SILCloner.h @@ -1706,24 +1706,6 @@ SILCloner::visitUnconditionalCheckedCastAddrInst( OpLoc, SrcValue, SrcType, DestValue, TargetType)); } -template -void SILCloner::visitUnconditionalCheckedCastValueInst( - UnconditionalCheckedCastValueInst *Inst) { - SILLocation OpLoc = getOpLocation(Inst->getLoc()); - SILValue OpValue = getOpValue(Inst->getOperand()); - CanType SrcFormalType = getOpASTType(Inst->getSourceFormalType()); - SILType OpLoweredType = getOpType(Inst->getTargetLoweredType()); - CanType OpFormalType = getOpASTType(Inst->getTargetFormalType()); - getBuilder().setCurrentDebugScope(getOpScope(Inst->getDebugScope())); - recordClonedInstruction( - Inst, - getBuilder().createUnconditionalCheckedCastValue(OpLoc, - OpValue, - SrcFormalType, - OpLoweredType, - OpFormalType)); -} - template void SILCloner::visitRetainValueInst(RetainValueInst *Inst) { getBuilder().setCurrentDebugScope(getOpScope(Inst->getDebugScope())); @@ -2737,22 +2719,6 @@ SILCloner::visitCheckedCastBranchInst(CheckedCastBranchInst *Inst) { Inst->getForwardingOwnershipKind(), TrueCount, FalseCount)); } -template -void SILCloner::visitCheckedCastValueBranchInst( - CheckedCastValueBranchInst *Inst) { - SILBasicBlock *OpSuccBB = getOpBasicBlock(Inst->getSuccessBB()); - SILBasicBlock *OpFailBB = getOpBasicBlock(Inst->getFailureBB()); - getBuilder().setCurrentDebugScope(getOpScope(Inst->getDebugScope())); - recordClonedInstruction( - Inst, getBuilder().createCheckedCastValueBranch( - getOpLocation(Inst->getLoc()), - getOpValue(Inst->getOperand()), - getOpASTType(Inst->getSourceFormalType()), - getOpType(Inst->getTargetLoweredType()), - getOpASTType(Inst->getTargetFormalType()), - OpSuccBB, OpFailBB)); -} - template void SILCloner::visitCheckedCastAddrBranchInst( CheckedCastAddrBranchInst *Inst) { diff --git a/include/swift/SIL/SILInstruction.h b/include/swift/SIL/SILInstruction.h index 29dac91d93ff6..82c6a62053f5c 100644 --- a/include/swift/SIL/SILInstruction.h +++ b/include/swift/SIL/SILInstruction.h @@ -5664,39 +5664,6 @@ class UnconditionalCheckedCastInst final SILType getTargetLoweredType() const { return getType(); } }; -/// Perform an unconditional checked cast that aborts if the cast fails. -/// The result of the checked cast is left in the destination. -class UnconditionalCheckedCastValueInst final - : public UnaryInstructionWithTypeDependentOperandsBase< - SILInstructionKind::UnconditionalCheckedCastValueInst, - UnconditionalCheckedCastValueInst, ConversionInst> { - CanType SourceFormalTy; - CanType DestFormalTy; - friend SILBuilder; - - UnconditionalCheckedCastValueInst(SILDebugLocation DebugLoc, - SILValue Operand, CanType SourceFormalTy, - ArrayRef TypeDependentOperands, - SILType DestLoweredTy, CanType DestFormalTy) - : UnaryInstructionWithTypeDependentOperandsBase( - DebugLoc, Operand, TypeDependentOperands, - DestLoweredTy), - SourceFormalTy(SourceFormalTy), - DestFormalTy(DestFormalTy) {} - - static UnconditionalCheckedCastValueInst * - create(SILDebugLocation DebugLoc, - SILValue Operand, CanType SourceFormalTy, - SILType DestLoweredTy, CanType DestFormalTy, SILFunction &F); - -public: - SILType getSourceLoweredType() const { return getOperand()->getType(); } - CanType getSourceFormalType() const { return SourceFormalTy; } - - SILType getTargetLoweredType() const { return getType(); } - CanType getTargetFormalType() const { return DestFormalTy; } -}; - /// StructInst - Represents a constructed loadable struct. class StructInst final : public InstructionBaseWithTrailingOperands< SILInstructionKind::StructInst, StructInst, @@ -8132,7 +8099,6 @@ class TermInst : public NonValueInstruction { case TermKind::SwitchEnumAddrInst: case TermKind::DynamicMethodBranchInst: case TermKind::CheckedCastAddrBranchInst: - case TermKind::CheckedCastValueBranchInst: case TermKind::AwaitAsyncContinuationInst: return false; case TermKind::SwitchEnumInst: @@ -9159,45 +9125,6 @@ class CheckedCastBranchInst final CanType getTargetFormalType() const { return DestFormalTy; } }; -/// Perform a checked cast operation and branch on whether the cast succeeds. -/// The success branch destination block receives the cast result as a BB -/// argument. -class CheckedCastValueBranchInst final - : public UnaryInstructionWithTypeDependentOperandsBase< - SILInstructionKind::CheckedCastValueBranchInst, - CheckedCastValueBranchInst, CastBranchInstBase> { - friend SILBuilder; - - CanType SourceFormalTy; - SILType DestLoweredTy; - CanType DestFormalTy; - - CheckedCastValueBranchInst(SILDebugLocation DebugLoc, SILValue Operand, - CanType SourceFormalTy, - ArrayRef TypeDependentOperands, - SILType DestLoweredTy, CanType DestFormalTy, - SILBasicBlock *SuccessBB, SILBasicBlock *FailureBB) - : UnaryInstructionWithTypeDependentOperandsBase( - DebugLoc, Operand, TypeDependentOperands, SuccessBB, FailureBB, - ProfileCounter(), ProfileCounter()), - SourceFormalTy(SourceFormalTy), DestLoweredTy(DestLoweredTy), - DestFormalTy(DestFormalTy) {} - - static CheckedCastValueBranchInst * - create(SILDebugLocation DebugLoc, - SILValue Operand, CanType SourceFormalTy, - SILType DestLoweredTy, CanType DestFormalTy, - SILBasicBlock *SuccessBB, SILBasicBlock *FailureBB, - SILFunction &F); - -public: - SILType getSourceLoweredType() const { return getOperand()->getType(); } - CanType getSourceFormalType() const { return SourceFormalTy; } - - SILType getTargetLoweredType() const { return DestLoweredTy; } - CanType getTargetFormalType() const { return DestFormalTy; } -}; - /// Perform a checked cast operation and branch on whether the cast succeeds. /// The result of the checked cast is left in the destination address. class CheckedCastAddrBranchInst final diff --git a/include/swift/SIL/SILNode.h b/include/swift/SIL/SILNode.h index 86439cc099afa..d9a1ca77c609d 100644 --- a/include/swift/SIL/SILNode.h +++ b/include/swift/SIL/SILNode.h @@ -375,7 +375,6 @@ class alignas(8) SILNode : UIWTDOB_BITFIELD_EMPTY(UncheckedTrivialBitCastInst, ConversionInst); UIWTDOB_BITFIELD_EMPTY(UncheckedBitwiseCastInst, ConversionInst); UIWTDOB_BITFIELD_EMPTY(ThinToThickFunctionInst, ConversionInst); - UIWTDOB_BITFIELD_EMPTY(UnconditionalCheckedCastValueInst, ConversionInst); UIWTDOB_BITFIELD_EMPTY(InitExistentialAddrInst, SingleValueInstruction); UIWTDOB_BITFIELD_EMPTY(InitExistentialValueInst, SingleValueInstruction); UIWTDOB_BITFIELD_EMPTY(InitExistentialRefInst, SingleValueInstruction); @@ -383,7 +382,6 @@ class alignas(8) SILNode : SWIFT_INLINE_BITFIELD_EMPTY(TermInst, SILInstruction); UIWTDOB_BITFIELD_EMPTY(CheckedCastBranchInst, SingleValueInstruction); - UIWTDOB_BITFIELD_EMPTY(CheckedCastValueBranchInst, SingleValueInstruction); // Ensure that BranchInst bitfield does not overflow. IBWTO_BITFIELD_EMPTY(BranchInst, TermInst); diff --git a/include/swift/SIL/SILNodes.def b/include/swift/SIL/SILNodes.def index 8cf56e480a301..2c37b0bdfe471 100644 --- a/include/swift/SIL/SILNodes.def +++ b/include/swift/SIL/SILNodes.def @@ -425,10 +425,6 @@ ABSTRACT_VALUE_AND_INST(SingleValueInstruction, ValueBase, SILInstruction) ConversionInst, None, DoesNotRelease) BRIDGED_SINGLE_VALUE_INST(ObjCExistentialMetatypeToObjectInst, objc_existential_metatype_to_object, ConversionInst, None, DoesNotRelease) - // unconditional_checked_cast_value reads the source value and produces - // a new value with a potentially different representation. - DYNAMICCAST_SINGLE_VALUE_INST(UnconditionalCheckedCastValueInst, unconditional_checked_cast_value, - ConversionInst, MayRead, MayRelease) // unconditional_checked_cast_inst is only MayRead to prevent a subsequent // release of the cast's source from being hoisted above the cast: // retain X @@ -687,9 +683,7 @@ ABSTRACT_INST(TermInst, SILInstruction) TermInst, None, DoesNotRelease) DYNAMICCAST_TERMINATOR(CheckedCastAddrBranchInst, checked_cast_addr_br, TermInst, MayHaveSideEffects, MayRelease) - DYNAMICCAST_TERMINATOR(CheckedCastValueBranchInst, checked_cast_value_br, - TermInst, None, DoesNotRelease) - INST_RANGE(TermInst, UnreachableInst, CheckedCastValueBranchInst) + INST_RANGE(TermInst, UnreachableInst, CheckedCastAddrBranchInst) // Deallocation instructions. ABSTRACT_INST(DeallocationInst, SILInstruction) diff --git a/include/swift/SILOptimizer/Utils/CastOptimizer.h b/include/swift/SILOptimizer/Utils/CastOptimizer.h index b80938b972ac1..26b50e8125ed4 100644 --- a/include/swift/SILOptimizer/Utils/CastOptimizer.h +++ b/include/swift/SILOptimizer/Utils/CastOptimizer.h @@ -97,10 +97,6 @@ class CastOptimizer { /// Simplify checked_cast_br. It may change the control flow. SILInstruction *simplifyCheckedCastBranchInst(CheckedCastBranchInst *Inst); - /// Simplify checked_cast_value_br. It may change the control flow. - SILInstruction * - simplifyCheckedCastValueBranchInst(CheckedCastValueBranchInst *Inst); - /// Simplify checked_cast_addr_br. It may change the control flow. SILInstruction * simplifyCheckedCastAddrBranchInst(CheckedCastAddrBranchInst *Inst); @@ -108,10 +104,6 @@ class CastOptimizer { /// Optimize checked_cast_br. This cannot change the control flow. SILInstruction *optimizeCheckedCastBranchInst(CheckedCastBranchInst *Inst); - /// Optimize checked_cast_value_br. This cannot change the control flow. - SILInstruction * - optimizeCheckedCastValueBranchInst(CheckedCastValueBranchInst *Inst); - /// Optimize checked_cast_addr_br. This cannot change the control flow. SILInstruction * optimizeCheckedCastAddrBranchInst(CheckedCastAddrBranchInst *Inst); diff --git a/include/swift/SILOptimizer/Utils/SCCVisitor.h b/include/swift/SILOptimizer/Utils/SCCVisitor.h index 2bc658529e355..e2b4417429cdd 100644 --- a/include/swift/SILOptimizer/Utils/SCCVisitor.h +++ b/include/swift/SILOptimizer/Utils/SCCVisitor.h @@ -123,7 +123,6 @@ class SCCVisitor { case TermKind::SwitchEnumInst: case TermKind::SwitchEnumAddrInst: case TermKind::CheckedCastBranchInst: - case TermKind::CheckedCastValueBranchInst: case TermKind::CheckedCastAddrBranchInst: case TermKind::DynamicMethodBranchInst: assert(Index == 0 && "Expected argument index to always be zero!"); diff --git a/lib/IRGen/IRGenSIL.cpp b/lib/IRGen/IRGenSIL.cpp index dbcd1ece22177..5394c2b40a43e 100644 --- a/lib/IRGen/IRGenSIL.cpp +++ b/lib/IRGen/IRGenSIL.cpp @@ -1331,8 +1331,6 @@ class IRGenSILFunction : void visitObjCToThickMetatypeInst(ObjCToThickMetatypeInst *i); void visitUnconditionalCheckedCastInst(UnconditionalCheckedCastInst *i); void visitUnconditionalCheckedCastAddrInst(UnconditionalCheckedCastAddrInst *i); - void - visitUnconditionalCheckedCastValueInst(UnconditionalCheckedCastValueInst *i); void visitObjCMetatypeToObjectInst(ObjCMetatypeToObjectInst *i); void visitObjCExistentialMetatypeToObjectInst( ObjCExistentialMetatypeToObjectInst *i); @@ -1363,7 +1361,6 @@ class IRGenSILFunction : void visitSwitchEnumAddrInst(SwitchEnumAddrInst *i); void visitDynamicMethodBranchInst(DynamicMethodBranchInst *i); void visitCheckedCastBranchInst(CheckedCastBranchInst *i); - void visitCheckedCastValueBranchInst(CheckedCastValueBranchInst *i); void visitCheckedCastAddrBranchInst(CheckedCastAddrBranchInst *i); void visitGetAsyncContinuationInst(GetAsyncContinuationInst *i); @@ -6370,16 +6367,6 @@ void IRGenSILFunction::visitUnconditionalCheckedCastAddrInst( CheckedCastMode::Unconditional); } -void IRGenSILFunction::visitUnconditionalCheckedCastValueInst( - swift::UnconditionalCheckedCastValueInst *i) { - llvm_unreachable("unsupported instruction during IRGen"); -} - -void IRGenSILFunction::visitCheckedCastValueBranchInst( - swift::CheckedCastValueBranchInst *i) { - llvm_unreachable("unsupported instruction during IRGen"); -} - void IRGenSILFunction::visitCheckedCastBranchInst( swift::CheckedCastBranchInst *i) { FailableCastResult castResult; diff --git a/lib/SIL/IR/OperandOwnership.cpp b/lib/SIL/IR/OperandOwnership.cpp index 786669d0b9a98..2d9d3eab3d098 100644 --- a/lib/SIL/IR/OperandOwnership.cpp +++ b/lib/SIL/IR/OperandOwnership.cpp @@ -260,8 +260,6 @@ OPERAND_OWNERSHIP(DestroyingConsume, EndCOWMutation) OPERAND_OWNERSHIP(DestroyingConsume, MoveValue) // Instructions that move an owned value. -OPERAND_OWNERSHIP(ForwardingConsume, CheckedCastValueBranch) -OPERAND_OWNERSHIP(ForwardingConsume, UnconditionalCheckedCastValue) OPERAND_OWNERSHIP(ForwardingConsume, InitExistentialValue) OPERAND_OWNERSHIP(ForwardingConsume, DeinitExistentialValue) OPERAND_OWNERSHIP(ForwardingConsume, MarkUninitialized) diff --git a/lib/SIL/IR/SILArgument.cpp b/lib/SIL/IR/SILArgument.cpp index efedffe41d178..c6c2a675feba3 100644 --- a/lib/SIL/IR/SILArgument.cpp +++ b/lib/SIL/IR/SILArgument.cpp @@ -253,8 +253,6 @@ getSingleTerminatorOperandForPred(const SILBasicBlock *parentBlock, ->getArgForDestBB(parentBlock, argIndex); case TermKind::CheckedCastBranchInst: return cast(predTermInst)->getOperand(); - case TermKind::CheckedCastValueBranchInst: - return cast(predTermInst)->getOperand(); case TermKind::SwitchEnumInst: return cast(predTermInst)->getOperand(); } diff --git a/lib/SIL/IR/SILFunction.cpp b/lib/SIL/IR/SILFunction.cpp index cfe9aa1b4e11f..d5d2072758e54 100644 --- a/lib/SIL/IR/SILFunction.cpp +++ b/lib/SIL/IR/SILFunction.cpp @@ -602,9 +602,6 @@ struct DOTGraphTraits : public DefaultDOTGraphTraits { if (auto *CCBI = dyn_cast(Term)) return (Succ == CCBI->getSuccessBB()) ? "T" : "F"; - if (auto *CCBI = dyn_cast(Term)) - return (Succ == CCBI->getSuccessBB()) ? "T" : "F"; - if (auto *CCBI = dyn_cast(Term)) return (Succ == CCBI->getSuccessBB()) ? "T" : "F"; diff --git a/lib/SIL/IR/SILInstruction.cpp b/lib/SIL/IR/SILInstruction.cpp index 0b05765e085e0..5605df2dd87e3 100644 --- a/lib/SIL/IR/SILInstruction.cpp +++ b/lib/SIL/IR/SILInstruction.cpp @@ -1162,7 +1162,6 @@ bool SILInstruction::mayRelease() const { return true; case SILInstructionKind::UnconditionalCheckedCastAddrInst: - case SILInstructionKind::UnconditionalCheckedCastValueInst: case SILInstructionKind::UncheckedOwnershipConversionInst: return true; @@ -1368,7 +1367,6 @@ bool SILInstruction::mayTrap() const { case SILInstructionKind::CondFailInst: case SILInstructionKind::UnconditionalCheckedCastInst: case SILInstructionKind::UnconditionalCheckedCastAddrInst: - case SILInstructionKind::UnconditionalCheckedCastValueInst: return true; default: return false; diff --git a/lib/SIL/IR/SILInstructions.cpp b/lib/SIL/IR/SILInstructions.cpp index 29319d6f01692..634e41901fb11 100644 --- a/lib/SIL/IR/SILInstructions.cpp +++ b/lib/SIL/IR/SILInstructions.cpp @@ -1546,7 +1546,6 @@ bool TermInst::isFunctionExiting() const { case TermKind::SwitchEnumAddrInst: case TermKind::DynamicMethodBranchInst: case TermKind::CheckedCastBranchInst: - case TermKind::CheckedCastValueBranchInst: case TermKind::CheckedCastAddrBranchInst: case TermKind::UnreachableInst: case TermKind::TryApplyInst: @@ -1571,7 +1570,6 @@ bool TermInst::isProgramTerminating() const { case TermKind::SwitchEnumAddrInst: case TermKind::DynamicMethodBranchInst: case TermKind::CheckedCastBranchInst: - case TermKind::CheckedCastValueBranchInst: case TermKind::CheckedCastAddrBranchInst: case TermKind::ReturnInst: case TermKind::ThrowInst: @@ -2279,22 +2277,6 @@ UnconditionalCheckedCastInst *UnconditionalCheckedCastInst::create( forwardingOwnershipKind); } -UnconditionalCheckedCastValueInst *UnconditionalCheckedCastValueInst::create( - SILDebugLocation DebugLoc, - SILValue Operand, CanType SrcFormalTy, - SILType DestLoweredTy, CanType DestFormalTy, SILFunction &F) { - SILModule &Mod = F.getModule(); - SmallVector TypeDependentOperands; - collectTypeDependentOperands(TypeDependentOperands, F, DestFormalTy); - unsigned size = - totalSizeToAlloc(1 + TypeDependentOperands.size()); - void *Buffer = - Mod.allocateInst(size, alignof(UnconditionalCheckedCastValueInst)); - return ::new (Buffer) UnconditionalCheckedCastValueInst( - DebugLoc, Operand, SrcFormalTy, TypeDependentOperands, - DestLoweredTy, DestFormalTy); -} - CheckedCastBranchInst *CheckedCastBranchInst::create( SILDebugLocation DebugLoc, bool IsExact, SILValue Operand, SILType DestLoweredTy, CanType DestFormalTy, SILBasicBlock *SuccessBB, @@ -2313,24 +2295,6 @@ CheckedCastBranchInst *CheckedCastBranchInst::create( forwardingOwnershipKind); } -CheckedCastValueBranchInst * -CheckedCastValueBranchInst::create(SILDebugLocation DebugLoc, - SILValue Operand, CanType SrcFormalTy, - SILType DestLoweredTy, CanType DestFormalTy, - SILBasicBlock *SuccessBB, SILBasicBlock *FailureBB, - SILFunction &F) { - SILModule &Mod = F.getModule(); - SmallVector TypeDependentOperands; - collectTypeDependentOperands(TypeDependentOperands, F, DestFormalTy); - unsigned size = - totalSizeToAlloc(1 + TypeDependentOperands.size()); - void *Buffer = Mod.allocateInst(size, alignof(CheckedCastValueBranchInst)); - return ::new (Buffer) CheckedCastValueBranchInst( - DebugLoc, Operand, SrcFormalTy, TypeDependentOperands, - DestLoweredTy, DestFormalTy, - SuccessBB, FailureBB); -} - MetatypeInst *MetatypeInst::create(SILDebugLocation Loc, SILType Ty, SILFunction *F) { SILModule &Mod = F->getModule(); diff --git a/lib/SIL/IR/SILPrinter.cpp b/lib/SIL/IR/SILPrinter.cpp index eab187d728645..a1e497a8f778b 100644 --- a/lib/SIL/IR/SILPrinter.cpp +++ b/lib/SIL/IR/SILPrinter.cpp @@ -1747,25 +1747,12 @@ class SILPrinter : public SILInstructionVisitor { printForwardingOwnershipKind(CI, CI->getOperand()); } - void visitCheckedCastValueBranchInst(CheckedCastValueBranchInst *CI) { - *this << CI->getSourceFormalType() << " in " - << getIDAndType(CI->getOperand()) << " to " << CI->getTargetFormalType() - << ", " << Ctx.getID(CI->getSuccessBB()) << ", " - << Ctx.getID(CI->getFailureBB()); - } - void visitUnconditionalCheckedCastAddrInst(UnconditionalCheckedCastAddrInst *CI) { *this << CI->getSourceFormalType() << " in " << getIDAndType(CI->getSrc()) << " to " << CI->getTargetFormalType() << " in " << getIDAndType(CI->getDest()); } - void visitUnconditionalCheckedCastValueInst( - UnconditionalCheckedCastValueInst *CI) { - *this << CI->getSourceFormalType() << " in " << getIDAndType(CI->getOperand()) - << " to " << CI->getTargetFormalType(); - } - void visitCheckedCastAddrBranchInst(CheckedCastAddrBranchInst *CI) { *this << getCastConsumptionKindName(CI->getConsumptionKind()) << ' ' << CI->getSourceFormalType() << " in " << getIDAndType(CI->getSrc()) diff --git a/lib/SIL/IR/ValueOwnership.cpp b/lib/SIL/IR/ValueOwnership.cpp index 42c11d4f00ac4..ac7e4ae7b58d8 100644 --- a/lib/SIL/IR/ValueOwnership.cpp +++ b/lib/SIL/IR/ValueOwnership.cpp @@ -179,7 +179,6 @@ CONSTANT_OR_NONE_OWNERSHIP_INST(Guaranteed, LinearFunctionExtract) // be borrowed sub-objects of the parent CoW box. CONSTANT_OR_NONE_OWNERSHIP_INST(Guaranteed, OpenExistentialValue) CONSTANT_OR_NONE_OWNERSHIP_INST(Guaranteed, OpenExistentialBoxValue) -CONSTANT_OR_NONE_OWNERSHIP_INST(Owned, UnconditionalCheckedCastValue) // Given an owned value, mark_uninitialized always forwards an owned value since // we want to make sure that all destroys of that value must come through the diff --git a/lib/SIL/Parser/ParseSIL.cpp b/lib/SIL/Parser/ParseSIL.cpp index d16c45e0d9a27..74df229ce9076 100644 --- a/lib/SIL/Parser/ParseSIL.cpp +++ b/lib/SIL/Parser/ParseSIL.cpp @@ -3849,19 +3849,6 @@ bool SILParser::parseSpecificSILInstruction(SILBuilder &B, InstLoc, SourceAddr, SourceType, DestAddr, TargetType); break; - case SILInstructionKind::UnconditionalCheckedCastValueInst: { - if (parseASTType(SourceType) || parseVerbatim("in") || - parseTypedValueRef(Val, B) || parseVerbatim("to") || - parseASTType(TargetType) || parseSILDebugLocation(InstLoc, B)) - return true; - - auto opaque = Lowering::AbstractionPattern::getOpaque(); - ResultVal = B.createUnconditionalCheckedCastValue( - InstLoc, Val, SourceType, F->getLoweredType(opaque, TargetType), - TargetType); - break; - } - case SILInstructionKind::UnconditionalCheckedCastInst: { if (parseTypedValueRef(Val, B) || parseVerbatim("to") || parseASTType(TargetType)) @@ -3902,21 +3889,6 @@ bool SILParser::parseSpecificSILInstruction(SILBuilder &B, getBBForReference(FailureBBName, FailureBBLoc), forwardingOwnership); break; } - case SILInstructionKind::CheckedCastValueBranchInst: { - if (parseASTType(SourceType) || parseVerbatim("in") - || parseTypedValueRef(Val, B) || parseVerbatim("to") - || parseASTType(TargetType) || parseConditionalBranchDestinations() - || parseSILDebugLocation(InstLoc, B)) - return true; - - auto opaque = Lowering::AbstractionPattern::getOpaque(); - ResultVal = B.createCheckedCastValueBranch( - InstLoc, Val, SourceType, F->getLoweredType(opaque, TargetType), - TargetType, getBBForReference(SuccessBBName, SuccessBBLoc), - getBBForReference(FailureBBName, FailureBBLoc)); - break; - } - case SILInstructionKind::MarkUninitializedInst: { if (P.parseToken(tok::l_square, diag::expected_tok_in_sil_instr, "[")) return true; diff --git a/lib/SIL/Utils/BasicBlockUtils.cpp b/lib/SIL/Utils/BasicBlockUtils.cpp index 3a1f46bdd532f..a0a522270cb6a 100644 --- a/lib/SIL/Utils/BasicBlockUtils.cpp +++ b/lib/SIL/Utils/BasicBlockUtils.cpp @@ -214,16 +214,6 @@ void swift::getEdgeArgs(TermInst *T, unsigned edgeIdx, SILBasicBlock *newEdgeBB, succBB->getArgument(0)->getOwnershipKind())); return; } - case SILInstructionKind::CheckedCastValueBranchInst: { - auto CBI = cast(T); - auto succBB = edgeIdx == 0 ? CBI->getSuccessBB() : CBI->getFailureBB(); - if (!succBB->getNumArguments()) - return; - args.push_back(newEdgeBB->createPhiArgument( - succBB->getArgument(0)->getType(), - succBB->getArgument(0)->getOwnershipKind())); - return; - } case SILInstructionKind::TryApplyInst: { auto *TAI = cast(T); diff --git a/lib/SIL/Utils/DynamicCasts.cpp b/lib/SIL/Utils/DynamicCasts.cpp index 72232a123c26b..b2b43d05e56ce 100644 --- a/lib/SIL/Utils/DynamicCasts.cpp +++ b/lib/SIL/Utils/DynamicCasts.cpp @@ -1205,6 +1205,11 @@ bool swift::emitSuccessfulIndirectUnconditionalCast( /// Can the given cast be performed by the scalar checked-cast /// instructions? +/// +/// TODO: in OSSA-with-opaque-values SIL, all casts could be modeled using +/// scalar casts by setting 'OwnershipForwardingMixin::directlyForwards = +/// false'. This would simplify SIL analysis. Temporaries would be emitted +/// during address lowering. bool swift::canUseScalarCheckedCastInstructions(SILModule &M, CanType sourceFormalType, CanType targetFormalType) { diff --git a/lib/SIL/Utils/InstructionUtils.cpp b/lib/SIL/Utils/InstructionUtils.cpp index d445966c27e37..25733595d0e68 100644 --- a/lib/SIL/Utils/InstructionUtils.cpp +++ b/lib/SIL/Utils/InstructionUtils.cpp @@ -569,7 +569,6 @@ RuntimeEffect swift::getRuntimeEffect(SILInstruction *inst, SILType &impactType) //return RuntimeEffect::NoEffect; } - case SILInstructionKind::UnconditionalCheckedCastValueInst: case SILInstructionKind::UnconditionalCheckedCastInst: impactType = inst->getOperand(0)->getType(); return RuntimeEffect::Casting | metadataEffect(impactType) | @@ -584,10 +583,6 @@ RuntimeEffect swift::getRuntimeEffect(SILInstruction *inst, SILType &impactType) impactType = inst->getOperand(0)->getType(); return RuntimeEffect::Casting | metadataEffect(impactType) | metadataEffect(cast(inst)->getTargetLoweredType()); - case SILInstructionKind::CheckedCastValueBranchInst: - impactType = inst->getOperand(0)->getType(); - return RuntimeEffect::Casting | metadataEffect(impactType) | - metadataEffect(cast(inst)->getTargetLoweredType()); case SILInstructionKind::AllocStackInst: case SILInstructionKind::ProjectBoxInst: diff --git a/lib/SIL/Utils/MemAccessUtils.cpp b/lib/SIL/Utils/MemAccessUtils.cpp index 438ed3a56ff5b..d7f3013b27596 100644 --- a/lib/SIL/Utils/MemAccessUtils.cpp +++ b/lib/SIL/Utils/MemAccessUtils.cpp @@ -759,7 +759,6 @@ bool swift::isIdentityAndOwnershipPreservingRefCast( case SILInstructionKind::BridgeObjectToRefInst: return true; case SILInstructionKind::UnconditionalCheckedCastInst: - case SILInstructionKind::UnconditionalCheckedCastValueInst: return SILDynamicCastInst(svi).isRCIdentityPreserving(); // Ignore markers case SILInstructionKind::MarkUninitializedInst: @@ -2620,7 +2619,6 @@ void swift::visitAccessedAddress(SILInstruction *I, case SILInstructionKind::BeginUnpairedAccessInst: case SILInstructionKind::BindMemoryInst: case SILInstructionKind::RebindMemoryInst: - case SILInstructionKind::CheckedCastValueBranchInst: case SILInstructionKind::CondFailInst: case SILInstructionKind::CopyBlockInst: case SILInstructionKind::CopyBlockWithoutEscapingInst: @@ -2652,7 +2650,6 @@ void swift::visitAccessedAddress(SILInstruction *I, case SILInstructionKind::UncheckedOwnershipConversionInst: case SILInstructionKind::UncheckedRefCastAddrInst: case SILInstructionKind::UnconditionalCheckedCastAddrInst: - case SILInstructionKind::UnconditionalCheckedCastValueInst: case SILInstructionKind::ValueMetatypeInst: // TODO: Is this correct? case SILInstructionKind::GetAsyncContinuationInst: diff --git a/lib/SIL/Verifier/SILVerifier.cpp b/lib/SIL/Verifier/SILVerifier.cpp index 8863f2ebd4c59..d36cc258727d0 100644 --- a/lib/SIL/Verifier/SILVerifier.cpp +++ b/lib/SIL/Verifier/SILVerifier.cpp @@ -4025,13 +4025,6 @@ class SILVerifier : public SILVerifierBase { verifyOpenedArchetype(CI, CI->getType().getASTType()); } - void checkUnconditionalCheckedCastValueInst( - UnconditionalCheckedCastValueInst *CI) { - verifyCheckedCast(/*exact*/ false, CI->getOperand()->getType(), - CI->getType(), true); - verifyOpenedArchetype(CI, CI->getType().getASTType()); - } - // Make sure that opcodes handled by isRCIdentityPreservingCast cannot cast // from a trivial to a reference type. Such a cast may dynamically // instantiate a new reference-counted object. @@ -4135,25 +4128,6 @@ class SILVerifier : public SILVerifierBase { } } - void checkCheckedCastValueBranchInst(CheckedCastValueBranchInst *CBI) { - verifyCheckedCast(false, - CBI->getSourceLoweredType(), - CBI->getTargetLoweredType(), - true); - verifyOpenedArchetype(CBI, CBI->getTargetFormalType()); - - require(CBI->getSuccessBB()->args_size() == 1, - "success dest of checked_cast_value_br must take one argument"); - requireSameType( - CBI->getSuccessBB()->args_begin()[0]->getType(), - CBI->getTargetLoweredType(), - "success dest block argument of checked_cast_value_br must match " - "type of cast"); - require(F.hasOwnership() || CBI->getFailureBB()->args_empty(), - "failure dest of checked_cast_value_br in unqualified ownership " - "sil must take no arguments"); - } - void checkCheckedCastAddrBranchInst(CheckedCastAddrBranchInst *CCABI) { require(CCABI->getSrc()->getType().isAddress(), "checked_cast_addr_br src must be an address"); diff --git a/lib/SILGen/SILGenBuilder.cpp b/lib/SILGen/SILGenBuilder.cpp index 780e9e55033df..81ed188bdfee7 100644 --- a/lib/SILGen/SILGenBuilder.cpp +++ b/lib/SILGen/SILGenBuilder.cpp @@ -408,7 +408,8 @@ ManagedValue SILGenBuilder::createLoadTake(SILLocation loc, ManagedValue v, lowering.emitLoadOfCopy(*this, loc, v.forward(SGF), IsTake); if (lowering.isTrivial()) return ManagedValue::forUnmanaged(result); - assert(!lowering.isAddressOnly() && "cannot retain an unloadable type"); + assert((!lowering.isAddressOnly() || !SGF.silConv.useLoweredAddresses()) && + "cannot retain an unloadable type"); return SGF.emitManagedRValueWithCleanup(result, lowering); } @@ -509,16 +510,6 @@ ManagedValue SILGenBuilder::createEnum(SILLocation loc, ManagedValue payload, return SGF.emitManagedRValueWithCleanup(result); } -ManagedValue SILGenBuilder::createUnconditionalCheckedCastValue( - SILLocation loc, ManagedValue op, CanType srcFormalTy, - SILType destLoweredTy, CanType destFormalTy) { - SILValue result = - createUnconditionalCheckedCastValue(loc, op.forward(SGF), - srcFormalTy, destLoweredTy, - destFormalTy); - return SGF.emitManagedRValueWithCleanup(result); -} - ManagedValue SILGenBuilder::createUnconditionalCheckedCast( SILLocation loc, ManagedValue op, SILType destLoweredTy, CanType destFormalTy) { @@ -548,18 +539,6 @@ void SILGenBuilder::createCheckedCastBranch(SILLocation loc, bool isExact, Target1Count, Target2Count); } -void SILGenBuilder::createCheckedCastValueBranch(SILLocation loc, - ManagedValue op, - CanType srcFormalTy, - SILType destLoweredTy, - CanType destFormalTy, - SILBasicBlock *trueBlock, - SILBasicBlock *falseBlock) { - createCheckedCastValueBranch(loc, op.forward(SGF), srcFormalTy, - destLoweredTy, destFormalTy, - trueBlock, falseBlock); -} - ManagedValue SILGenBuilder::createUpcast(SILLocation loc, ManagedValue original, SILType type) { CleanupCloner cloner(*this, original); diff --git a/lib/SILGen/SILGenBuilder.h b/lib/SILGen/SILGenBuilder.h index 9b1dc72fa0a3b..6c2c255a7decd 100644 --- a/lib/SILGen/SILGenBuilder.h +++ b/lib/SILGen/SILGenBuilder.h @@ -248,13 +248,6 @@ class SILGenBuilder : public SILBuilder { const TypeLowering &lowering, SGFContext context, llvm::function_ref rvalueEmitter); - using SILBuilder::createUnconditionalCheckedCastValue; - ManagedValue - createUnconditionalCheckedCastValue(SILLocation loc, - ManagedValue op, - CanType srcFormalTy, - SILType destLoweredTy, - CanType destFormalTy); using SILBuilder::createUnconditionalCheckedCast; ManagedValue createUnconditionalCheckedCast(SILLocation loc, ManagedValue op, @@ -271,15 +264,6 @@ class SILGenBuilder : public SILBuilder { ProfileCounter Target1Count, ProfileCounter Target2Count); - using SILBuilder::createCheckedCastValueBranch; - void createCheckedCastValueBranch(SILLocation loc, - ManagedValue op, - CanType srcFormalTy, - SILType destLoweredTy, - CanType destFormalTy, - SILBasicBlock *trueBlock, - SILBasicBlock *falseBlock); - using SILBuilder::createUpcast; ManagedValue createUpcast(SILLocation loc, ManagedValue original, SILType type); diff --git a/lib/SILGen/SILGenDynamicCast.cpp b/lib/SILGen/SILGenDynamicCast.cpp index 1db750a48cff6..13bf42c10ce9f 100644 --- a/lib/SILGen/SILGenDynamicCast.cpp +++ b/lib/SILGen/SILGenDynamicCast.cpp @@ -55,7 +55,7 @@ namespace { SGFContext ctx; std::unique_ptr temporary; - if (isOperandIndirect() && SGF.silConv.useLoweredAddresses()) { + if (isOperandIndirect()) { temporary = SGF.emitTemporary(Loc, origSourceTL); ctx = SGFContext(temporary.get()); } @@ -63,7 +63,7 @@ namespace { auto result = SGF.emitRValueAsOrig(operand, mostGeneral, origSourceTL, ctx); - if (isOperandIndirect() && SGF.silConv.useLoweredAddresses()) { + if (isOperandIndirect()) { // Force the result into the temporary if it's not already there. if (!result.isInContext()) { result.forwardInto(SGF, Loc, temporary->getAddress()); @@ -86,8 +86,7 @@ namespace { // If we're using checked_cast_addr, take the operand (which // should be an address) and build into the destination buffer. - if (Strategy == CastStrategy::Address && - SGF.silConv.useLoweredAddresses()) { + if (Strategy == CastStrategy::Address) { SILValue resultBuffer = createAbstractResultBuffer(hasAbstraction, origTargetTL, ctx); SGF.B.createUnconditionalCheckedCastAddr(Loc, @@ -98,17 +97,10 @@ namespace { abstraction, origTargetTL, ctx)); } - ManagedValue result; - if (Strategy == CastStrategy::Address) { - result = SGF.B.createUnconditionalCheckedCastValue( - Loc, operand, SourceType, - origTargetTL.getLoweredType(), TargetType); - } else { - result = SGF.B.createUnconditionalCheckedCast( - Loc, operand, - origTargetTL.getLoweredType(), TargetType); - } - + ManagedValue result = + SGF.B.createUnconditionalCheckedCast(Loc, operand, + origTargetTL.getLoweredType(), + TargetType); return RValue(SGF, Loc, TargetType, finishFromResultScalar(hasAbstraction, result, CastConsumptionKind::TakeAlways, @@ -137,21 +129,13 @@ namespace { // Emit the branch. ManagedValue operandValue; SILValue resultBuffer; - if (Strategy == CastStrategy::Address && - SGF.silConv.useLoweredAddresses()) { + if (Strategy == CastStrategy::Address) { assert(operand.getType().isAddress()); resultBuffer = createAbstractResultBuffer(hasAbstraction, origTargetTL, ctx); SGF.B.createCheckedCastAddrBranch( Loc, consumption, operand.forward(SGF), SourceType, resultBuffer, TargetType, trueBB, falseBB, TrueCount, FalseCount); - } else if (Strategy == CastStrategy::Address) { - // Opaque value mode - operandValue = std::move(operand); - SGF.B.createCheckedCastValueBranch( - Loc, operandValue, SourceType, - origTargetTL.getLoweredType(), TargetType, - trueBB, falseBB); } else { // Tolerate being passed an address here. It comes up during switch // emission. @@ -176,8 +160,7 @@ namespace { FullExpr scope(SGF.Cleanups, CleanupLocation(Loc)); ManagedValue result; - if (Strategy == CastStrategy::Address && - SGF.silConv.useLoweredAddresses()) { + if (Strategy == CastStrategy::Address) { result = finishFromResultBuffer(hasAbstraction, resultBuffer, abstraction, origTargetTL, ctx); } else { @@ -386,11 +369,8 @@ adjustForConditionalCheckedCastOperand(SILLocation loc, ManagedValue src, bool hasAbstraction = (src.getType() != srcAbstractTL.getLoweredType()); // Fast path: no re-abstraction required. - if (!hasAbstraction && - (!requiresAddress || - (src.getType().isAddress() || !SGF.silConv.useLoweredAddresses()))) { + if (!hasAbstraction && (!requiresAddress || src.getType().isAddress())) return src; - } std::unique_ptr init; if (requiresAddress) { @@ -470,8 +450,9 @@ RValue Lowering::emitConditionalCheckedCast( SILValue resultObjectBuffer; Optional resultObjectTemp; SGFContext resultObjectCtx; - if ((resultTL.isAddressOnly() && SGF.silConv.useLoweredAddresses()) || - (C.getEmitInto() && C.getEmitInto()->canPerformInPlaceInitialization())) { + if ((resultTL.isAddressOnly()) + || (C.getEmitInto() + && C.getEmitInto()->canPerformInPlaceInitialization())) { SILType resultTy = resultTL.getLoweredType(); resultBuffer = SGF.getBufferForExprResult(loc, resultTy, C); resultObjectBuffer = SGF.B.createInitEnumDataAddr( diff --git a/lib/SILGen/SILGenLazyConformance.cpp b/lib/SILGen/SILGenLazyConformance.cpp index 227672b578d95..a50be17715fa9 100644 --- a/lib/SILGen/SILGenLazyConformance.cpp +++ b/lib/SILGen/SILGenLazyConformance.cpp @@ -183,13 +183,6 @@ class LazyConformanceEmitter : public SILInstructionVisitorgetTargetFormalType()); } - void visitCheckedCastValueBranchInst(CheckedCastValueBranchInst *CCVBI) { - SGM.useConformancesFromType(CCVBI->getSourceFormalType()); - SGM.useConformancesFromType(CCVBI->getTargetFormalType()); - SGM.useConformancesFromObjectiveCType(CCVBI->getSourceFormalType()); - SGM.useConformancesFromObjectiveCType(CCVBI->getTargetFormalType()); - } - void visitCopyAddrInst(CopyAddrInst *CAI) { SGM.useConformancesFromType(CAI->getSrc()->getType().getASTType()); SGM.useConformancesFromType(CAI->getDest()->getType().getASTType()); diff --git a/lib/SILOptimizer/ARC/ARCSequenceOptUtils.cpp b/lib/SILOptimizer/ARC/ARCSequenceOptUtils.cpp index 62c501669051b..e29e1ea47ac16 100644 --- a/lib/SILOptimizer/ARC/ARCSequenceOptUtils.cpp +++ b/lib/SILOptimizer/ARC/ARCSequenceOptUtils.cpp @@ -38,7 +38,6 @@ bool isARCSignificantTerminator(TermInst *TI) { case TermKind::SwitchEnumAddrInst: case TermKind::DynamicMethodBranchInst: case TermKind::CheckedCastBranchInst: - case TermKind::CheckedCastValueBranchInst: case TermKind::CheckedCastAddrBranchInst: return true; } diff --git a/lib/SILOptimizer/Differentiation/VJPCloner.cpp b/lib/SILOptimizer/Differentiation/VJPCloner.cpp index 2e07f82ef1767..5a274a32b4c3f 100644 --- a/lib/SILOptimizer/Differentiation/VJPCloner.cpp +++ b/lib/SILOptimizer/Differentiation/VJPCloner.cpp @@ -377,20 +377,6 @@ class VJPCloner::Implementation final ccbi->getTrueBBCount(), ccbi->getFalseBBCount()); } - void visitCheckedCastValueBranchInst(CheckedCastValueBranchInst *ccvbi) { - Builder.setCurrentDebugScope(getOpScope(ccvbi->getDebugScope())); - // Build pullback struct value for original block. - auto *pbStructVal = buildPullbackValueStructValue(ccvbi); - // Create a new `checked_cast_value_branch` instruction. - getBuilder().createCheckedCastValueBranch( - ccvbi->getLoc(), getOpValue(ccvbi->getOperand()), - getOpASTType(ccvbi->getSourceFormalType()), - getOpType(ccvbi->getTargetLoweredType()), - getOpASTType(ccvbi->getTargetFormalType()), - createTrampolineBasicBlock(ccvbi, pbStructVal, ccvbi->getSuccessBB()), - createTrampolineBasicBlock(ccvbi, pbStructVal, ccvbi->getFailureBB())); - } - void visitCheckedCastAddrBranchInst(CheckedCastAddrBranchInst *ccabi) { Builder.setCurrentDebugScope(getOpScope(ccabi->getDebugScope())); // Build pullback struct value for original block. diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index f82a66fce22da..1c72876d762d9 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -808,7 +808,7 @@ static Operand *getReusedStorageOperand(SILValue value) { /// user's storage. The user may compose an aggregate from its operands or /// forwards its operands to arguments. /// -/// TODO: Handle SwitchValueInst, CheckedCastValueBranchInst. +/// TODO: Handle SwitchValueInst static SILValue getProjectedUseValue(Operand *operand) { auto *user = operand->getUser(); switch (user->getKind()) { @@ -2418,13 +2418,6 @@ class UseRewriter : SILInstructionVisitor { use->set(SILUndef::get(use->get()->getType(), *pass.function)); } - // Opaque checked cast source. - void visitCheckedCastValueBranchInst( - CheckedCastValueBranchInst *checkedBranchInst) { - // FIXME: Unimplemented - llvm::report_fatal_error("Unimplemented CheckCastValueBranch use."); - } - // Copy from an opaque source operand. void visitCopyValueInst(CopyValueInst *copyInst) { SILValue srcVal = copyInst->getOperand(); @@ -2540,13 +2533,6 @@ class UseRewriter : SILInstructionVisitor { } void visitUncheckedEnumDataInst(UncheckedEnumDataInst *enumDataInst); - - void visitUnconditionalCheckedCastValueInst( - UnconditionalCheckedCastValueInst *checkedCastInst) { - - // FIXME: Unimplemented - llvm::report_fatal_error("Unimplemented UnconditionalCheckedCast use."); - } }; } // end anonymous namespace diff --git a/lib/SILOptimizer/Mandatory/DiagnoseInfiniteRecursion.cpp b/lib/SILOptimizer/Mandatory/DiagnoseInfiniteRecursion.cpp index d1985258f5d44..2c853ee9e98b2 100644 --- a/lib/SILOptimizer/Mandatory/DiagnoseInfiniteRecursion.cpp +++ b/lib/SILOptimizer/Mandatory/DiagnoseInfiniteRecursion.cpp @@ -225,8 +225,7 @@ class Invariants { case TermKind::CondBranchInst: case TermKind::SwitchValueInst: case TermKind::SwitchEnumInst: - case TermKind::CheckedCastBranchInst: - case TermKind::CheckedCastValueBranchInst: { + case TermKind::CheckedCastBranchInst: { SmallPtrSet visited; return isInvariantValue(term->getOperand(0), visited); } diff --git a/lib/SILOptimizer/Mandatory/Differentiation.cpp b/lib/SILOptimizer/Mandatory/Differentiation.cpp index f7f6579f9404a..572659e6e2b81 100644 --- a/lib/SILOptimizer/Mandatory/Differentiation.cpp +++ b/lib/SILOptimizer/Mandatory/Differentiation.cpp @@ -171,7 +171,6 @@ static bool diagnoseUnsupportedControlFlow(ADContext &context, if (isa(term) || isa(term) || isa(term) || isa(term) || isa(term) || - isa(term) || isa(term) || isa(term)) continue; // If terminator is an unsupported branching terminator, emit an error. diff --git a/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp b/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp index 52f236bb01adf..51b4fc66e6d36 100644 --- a/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp +++ b/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp @@ -2768,8 +2768,7 @@ bool AllocOptimize::tryToRemoveDeadAllocation() { case TermKind::DynamicMethodBranchInst: case TermKind::AwaitAsyncContinuationInst: case TermKind::CheckedCastBranchInst: - case TermKind::CheckedCastAddrBranchInst: - case TermKind::CheckedCastValueBranchInst: { + case TermKind::CheckedCastAddrBranchInst: { // Otherwise, we insert the destroy_addr /after/ the // terminator. All of these are guaranteed to have each successor // to have the block as its only predecessor block. diff --git a/lib/SILOptimizer/Transforms/DeadCodeElimination.cpp b/lib/SILOptimizer/Transforms/DeadCodeElimination.cpp index 55b0eb266747b..7fafb8b91be9c 100644 --- a/lib/SILOptimizer/Transforms/DeadCodeElimination.cpp +++ b/lib/SILOptimizer/Transforms/DeadCodeElimination.cpp @@ -367,7 +367,6 @@ void DCE::markTerminatorArgsLive(SILBasicBlock *Pred, case TermKind::DynamicMethodBranchInst: case TermKind::SwitchEnumInst: case TermKind::CheckedCastBranchInst: - case TermKind::CheckedCastValueBranchInst: assert(ArgIndex == 0 && "Expected a single argument!"); // We do not need to do anything with these. If the resulting @@ -472,7 +471,6 @@ void DCE::propagateLiveness(SILInstruction *I) { case TermKind::AwaitAsyncContinuationInst: case TermKind::CheckedCastBranchInst: - case TermKind::CheckedCastValueBranchInst: case TermKind::CheckedCastAddrBranchInst: case TermKind::TryApplyInst: case TermKind::SwitchValueInst: diff --git a/lib/SILOptimizer/Transforms/SimplifyCFG.cpp b/lib/SILOptimizer/Transforms/SimplifyCFG.cpp index 7f88de028524f..5d2ad690911c8 100644 --- a/lib/SILOptimizer/Transforms/SimplifyCFG.cpp +++ b/lib/SILOptimizer/Transforms/SimplifyCFG.cpp @@ -239,7 +239,6 @@ class SimplifyCFG { bool simplifyBranchBlock(BranchInst *BI); bool simplifyCondBrBlock(CondBranchInst *BI); bool simplifyCheckedCastBranchBlock(CheckedCastBranchInst *CCBI); - bool simplifyCheckedCastValueBranchBlock(CheckedCastValueBranchInst *CCBI); bool simplifyCheckedCastAddrBranchBlock(CheckedCastAddrBranchInst *CCABI); bool simplifyTryApplyBlock(TryApplyInst *TAI); bool simplifySwitchValueBlock(SwitchValueInst *SVI); @@ -2471,49 +2470,6 @@ bool SimplifyCFG::simplifyCheckedCastBranchBlock(CheckedCastBranchInst *CCBI) { return MadeChange; } -bool SimplifyCFG::simplifyCheckedCastValueBranchBlock( - CheckedCastValueBranchInst *CCBI) { - // TODO: OSSA; handle cleanups for opaque cases (simplify_cfg_opaque.sil). - if (!EnableOSSARewriteTerminator && Fn.hasOwnership()) { - return false; - } - - auto SuccessBB = CCBI->getSuccessBB(); - auto FailureBB = CCBI->getFailureBB(); - auto ThisBB = CCBI->getParent(); - - bool MadeChange = false; - CastOptimizer CastOpt( - FuncBuilder, nullptr /*SILBuilderContext*/, - /* replaceValueUsesAction */ - [&MadeChange](SILValue oldValue, SILValue newValue) { - MadeChange = true; - }, - /* replaceInstUsesAction */ - [&MadeChange](SILInstruction *I, ValueBase *V) { MadeChange = true; }, - /* eraseInstAction */ - [&MadeChange](SILInstruction *I) { - MadeChange = true; - I->eraseFromParent(); - }, - /* willSucceedAction */ - [&]() { - MadeChange |= removeIfDead(FailureBB); - addToWorklist(ThisBB); - }, - /* willFailAction */ - [&]() { - MadeChange |= removeIfDead(SuccessBB); - addToWorklist(ThisBB); - }); - - MadeChange |= bool(CastOpt.simplifyCheckedCastValueBranchInst(CCBI)); - - LLVM_DEBUG(if (MadeChange) - llvm::dbgs() << "simplify checked_cast_value block\n"); - return MadeChange; -} - bool SimplifyCFG:: simplifyCheckedCastAddrBranchBlock(CheckedCastAddrBranchInst *CCABI) { @@ -2908,10 +2864,6 @@ bool SimplifyCFG::simplifyBlocks() { case TermKind::CheckedCastBranchInst: Changed |= simplifyCheckedCastBranchBlock(cast(TI)); break; - case TermKind::CheckedCastValueBranchInst: - Changed |= simplifyCheckedCastValueBranchBlock( - cast(TI)); - break; case TermKind::CheckedCastAddrBranchInst: Changed |= simplifyCheckedCastAddrBranchBlock(cast(TI)); break; diff --git a/lib/SILOptimizer/UtilityPasses/SerializeSILPass.cpp b/lib/SILOptimizer/UtilityPasses/SerializeSILPass.cpp index 6bfcc4fb7943e..67e6d47204b0e 100644 --- a/lib/SILOptimizer/UtilityPasses/SerializeSILPass.cpp +++ b/lib/SILOptimizer/UtilityPasses/SerializeSILPass.cpp @@ -195,7 +195,6 @@ static bool hasOpaqueArchetype(TypeExpansionContext context, case SILInstructionKind::ObjCToThickMetatypeInst: case SILInstructionKind::ObjCMetatypeToObjectInst: case SILInstructionKind::ObjCExistentialMetatypeToObjectInst: - case SILInstructionKind::UnconditionalCheckedCastValueInst: case SILInstructionKind::UnconditionalCheckedCastInst: case SILInstructionKind::ClassifyBridgeObjectInst: case SILInstructionKind::ValueToBridgeObjectInst: @@ -275,7 +274,6 @@ static bool hasOpaqueArchetype(TypeExpansionContext context, case SILInstructionKind::DynamicMethodBranchInst: case SILInstructionKind::CheckedCastBranchInst: case SILInstructionKind::CheckedCastAddrBranchInst: - case SILInstructionKind::CheckedCastValueBranchInst: case SILInstructionKind::DeallocStackInst: case SILInstructionKind::DeallocStackRefInst: case SILInstructionKind::DeallocRefInst: diff --git a/lib/SILOptimizer/Utils/CFGOptUtils.cpp b/lib/SILOptimizer/Utils/CFGOptUtils.cpp index 3a01c94a3a572..aaca172bc27dc 100644 --- a/lib/SILOptimizer/Utils/CFGOptUtils.cpp +++ b/lib/SILOptimizer/Utils/CFGOptUtils.cpp @@ -380,22 +380,6 @@ void swift::replaceBranchTarget(TermInst *t, SILBasicBlock *oldDest, return; } - case TermKind::CheckedCastValueBranchInst: { - auto cbi = cast(t); - assert(oldDest == cbi->getSuccessBB() - || oldDest == cbi->getFailureBB() && "Invalid edge index"); - auto successBB = - oldDest == cbi->getSuccessBB() ? newDest : cbi->getSuccessBB(); - auto failureBB = - oldDest == cbi->getFailureBB() ? newDest : cbi->getFailureBB(); - builder.createCheckedCastValueBranch( - cbi->getLoc(), cbi->getOperand(), cbi->getSourceFormalType(), - cbi->getTargetLoweredType(), cbi->getTargetFormalType(), - successBB, failureBB); - cbi->eraseFromParent(); - return; - } - case TermKind::CheckedCastAddrBranchInst: { auto cbi = cast(t); assert(oldDest == cbi->getSuccessBB() @@ -738,7 +722,6 @@ static bool isSafeNonExitTerminator(TermInst *ti) { case TermKind::SwitchEnumAddrInst: case TermKind::DynamicMethodBranchInst: case TermKind::CheckedCastBranchInst: - case TermKind::CheckedCastValueBranchInst: case TermKind::CheckedCastAddrBranchInst: return true; case TermKind::UnreachableInst: diff --git a/lib/SILOptimizer/Utils/CastOptimizer.cpp b/lib/SILOptimizer/Utils/CastOptimizer.cpp index 1d8fbc5c5732e..0f2109a0023a7 100644 --- a/lib/SILOptimizer/Utils/CastOptimizer.cpp +++ b/lib/SILOptimizer/Utils/CastOptimizer.cpp @@ -1085,87 +1085,6 @@ CastOptimizer::simplifyCheckedCastBranchInst(CheckedCastBranchInst *Inst) { return NewI; } -SILInstruction *CastOptimizer::simplifyCheckedCastValueBranchInst( - CheckedCastValueBranchInst *Inst) { - if (auto *I = optimizeCheckedCastValueBranchInst(Inst)) - Inst = dyn_cast(I); - - if (!Inst) - return nullptr; - - SILDynamicCastInst dynamicCast(Inst); - auto SourceFormalType = dynamicCast.getSourceFormalType(); - auto TargetLoweredType = dynamicCast.getTargetLoweredType(); - auto TargetFormalType = dynamicCast.getTargetFormalType(); - auto Loc = dynamicCast.getLocation(); - auto *SuccessBB = dynamicCast.getSuccessBlock(); - auto *FailureBB = dynamicCast.getFailureBlock(); - auto Op = dynamicCast.getSource(); - auto *F = dynamicCast.getFunction(); - - // Check if we can statically predict the outcome of the cast. - auto Feasibility = dynamicCast.classifyFeasibility(false /*allow wmo opts*/); - - SILBuilderWithScope Builder(Inst, builderContext); - - if (Feasibility == DynamicCastFeasibility::WillFail) { - auto *NewI = Builder.createBranch(Loc, FailureBB); - eraseInstAction(Inst); - willFailAction(); - return NewI; - } - - // Casting will succeed. - - bool ResultNotUsed = SuccessBB->getArgument(0)->use_empty(); - SILValue CastedValue; - if (Op->getType() != TargetLoweredType) { - // Apply the bridged cast optimizations. - // TODO: Bridged casts cannot be expressed by checked_cast_value_br yet. - // Once the support for opaque values has landed, please review this - // code. - auto *BridgedI = optimizeBridgedCasts(dynamicCast); - if (BridgedI) { - llvm_unreachable( - "Bridged casts cannot be expressed by checked_cast_value_br yet"); - } else { - // If the cast may succeed or fail and can't be turned into a bridging - // call, then let it be. - if (Feasibility == DynamicCastFeasibility::MaySucceed) { - return nullptr; - } - - assert(Feasibility == DynamicCastFeasibility::WillSucceed); - - // Replace by unconditional_cast, followed by a branch. - // The unconditional_cast can be skipped, if the result of a cast - // is not used afterwards. - - if (!dynamicCast.canUseScalarCheckedCastInstructions()) - return nullptr; - - if (!ResultNotUsed) { - CastedValue = - emitSuccessfulScalarUnconditionalCast(Builder, Loc, dynamicCast); - } else { - CastedValue = SILUndef::get(TargetLoweredType, *F); - } - } - if (!CastedValue) - CastedValue = Builder.createUnconditionalCheckedCastValue( - Loc, Op, SourceFormalType, - TargetLoweredType, TargetFormalType); - } else { - // No need to cast. - CastedValue = Op; - } - - auto *NewI = Builder.createBranch(Loc, SuccessBB, CastedValue); - eraseInstAction(Inst); - willSucceedAction(); - return NewI; -} - SILInstruction *CastOptimizer::optimizeCheckedCastAddrBranchInst( CheckedCastAddrBranchInst *Inst) { auto Loc = Inst->getLoc(); @@ -1251,12 +1170,6 @@ SILInstruction *CastOptimizer::optimizeCheckedCastAddrBranchInst( return nullptr; } -SILInstruction *CastOptimizer::optimizeCheckedCastValueBranchInst( - CheckedCastValueBranchInst *Inst) { - // TODO - return nullptr; -} - SILInstruction * CastOptimizer::optimizeCheckedCastBranchInst(CheckedCastBranchInst *Inst) { if (Inst->isExact()) diff --git a/lib/SILOptimizer/Utils/SILInliner.cpp b/lib/SILOptimizer/Utils/SILInliner.cpp index 01e9bc5497aaa..4e674246d7906 100644 --- a/lib/SILOptimizer/Utils/SILInliner.cpp +++ b/lib/SILOptimizer/Utils/SILInliner.cpp @@ -876,7 +876,6 @@ InlineCost swift::instructionInlineCost(SILInstruction &I) { case SILInstructionKind::AssignInst: case SILInstructionKind::AssignByWrapperInst: case SILInstructionKind::CheckedCastBranchInst: - case SILInstructionKind::CheckedCastValueBranchInst: case SILInstructionKind::CheckedCastAddrBranchInst: case SILInstructionKind::ClassMethodInst: case SILInstructionKind::ObjCMethodInst: @@ -946,7 +945,6 @@ InlineCost swift::instructionInlineCost(SILInstruction &I) { case SILInstructionKind::UncheckedTakeEnumDataAddrInst: case SILInstructionKind::UnconditionalCheckedCastInst: case SILInstructionKind::UnconditionalCheckedCastAddrInst: - case SILInstructionKind::UnconditionalCheckedCastValueInst: case SILInstructionKind::IsEscapingClosureInst: case SILInstructionKind::IsUniqueInst: case SILInstructionKind::BeginCOWMutationInst: diff --git a/lib/Serialization/DeserializeSIL.cpp b/lib/Serialization/DeserializeSIL.cpp index 5a9f105096954..f3771114907f0 100644 --- a/lib/Serialization/DeserializeSIL.cpp +++ b/lib/Serialization/DeserializeSIL.cpp @@ -2623,36 +2623,6 @@ bool SILDeserializer::readSILInstruction(SILFunction *Fn, forwardingOwnership); break; } - case SILInstructionKind::CheckedCastValueBranchInst: { - CanType srcFormalType = MF->getType(ListOfValues[0])->getCanonicalType(); - SILType srcLoweredType = getSILType(MF->getType(ListOfValues[2]), - (SILValueCategory)ListOfValues[3], Fn); - SILValue op = getLocalValue(ListOfValues[1], srcLoweredType); - SILType targetLoweredType = - getSILType(MF->getType(TyID), (SILValueCategory)TyCategory, Fn); - CanType targetFormalType = - MF->getType(ListOfValues[4])->getCanonicalType(); - auto *successBB = getBBForReference(Fn, ListOfValues[5]); - auto *failureBB = getBBForReference(Fn, ListOfValues[6]); - - ResultInst = Builder.createCheckedCastValueBranch( - Loc, op, srcFormalType, targetLoweredType, targetFormalType, successBB, - failureBB); - break; - } - case SILInstructionKind::UnconditionalCheckedCastValueInst: { - CanType srcFormalType = MF->getType(ListOfValues[0])->getCanonicalType(); - SILType srcLoweredType = getSILType(MF->getType(ListOfValues[2]), - (SILValueCategory)ListOfValues[3], Fn); - SILValue src = getLocalValue(ListOfValues[1], srcLoweredType); - - SILType targetLoweredType = - getSILType(MF->getType(TyID), (SILValueCategory)TyCategory, Fn); - CanType targetFormalType = MF->getType(ListOfValues[4])->getCanonicalType(); - ResultInst = Builder.createUnconditionalCheckedCastValue( - Loc, src, srcFormalType, targetLoweredType, targetFormalType); - break; - } case SILInstructionKind::UnconditionalCheckedCastAddrInst: { // ignore attr. CanType srcFormalType = MF->getType(ListOfValues[0])->getCanonicalType(); diff --git a/lib/Serialization/SerializeSIL.cpp b/lib/Serialization/SerializeSIL.cpp index 6dd3be56ef52a..2da03dcca8e26 100644 --- a/lib/Serialization/SerializeSIL.cpp +++ b/lib/Serialization/SerializeSIL.cpp @@ -1810,22 +1810,6 @@ void SILSerializer::writeSILInstruction(const SILInstruction &SI) { llvm::makeArrayRef(listOfValues)); break; } - case SILInstructionKind::UnconditionalCheckedCastValueInst: { - auto CI = cast(&SI); - ValueID listOfValues[] = { - S.addTypeRef(CI->getSourceFormalType()), - addValueRef(CI->getOperand()), - S.addTypeRef(CI->getSourceLoweredType().getASTType()), - (unsigned)CI->getSourceLoweredType().getCategory(), - S.addTypeRef(CI->getTargetFormalType()) - }; - SILOneTypeValuesLayout::emitRecord(Out, ScratchRecord, - SILAbbrCodes[SILOneTypeValuesLayout::Code], (unsigned)SI.getKind(), - S.addTypeRef(CI->getTargetLoweredType().getASTType()), - (unsigned)CI->getTargetLoweredType().getCategory(), - llvm::makeArrayRef(listOfValues)); - break; - } case SILInstructionKind::UncheckedRefCastAddrInst: { auto CI = cast(&SI); ValueID listOfValues[] = { @@ -2262,27 +2246,6 @@ void SILSerializer::writeSILInstruction(const SILInstruction &SI) { llvm::makeArrayRef(listOfValues)); break; } - case SILInstructionKind::CheckedCastValueBranchInst: { - const CheckedCastValueBranchInst *CBI = - cast(&SI); - ValueID listOfValues[] = { - S.addTypeRef(CBI->getSourceFormalType()), - addValueRef(CBI->getOperand()), - S.addTypeRef(CBI->getSourceLoweredType().getASTType()), - (unsigned)CBI->getSourceLoweredType().getCategory(), - S.addTypeRef(CBI->getTargetFormalType()), - BasicBlockMap[CBI->getSuccessBB()], - BasicBlockMap[CBI->getFailureBB()] - }; - - SILOneTypeValuesLayout::emitRecord( - Out, ScratchRecord, SILAbbrCodes[SILOneTypeValuesLayout::Code], - (unsigned)SI.getKind(), - S.addTypeRef(CBI->getTargetLoweredType().getASTType()), - (unsigned)CBI->getTargetLoweredType().getCategory(), - llvm::makeArrayRef(listOfValues)); - break; - } case SILInstructionKind::CheckedCastAddrBranchInst: { auto CBI = cast(&SI); ValueID listOfValues[] = { diff --git a/test/SIL/Parser/opaque_values_parse.sil b/test/SIL/Parser/opaque_values_parse.sil index 2ca772047fdae..ff31edd08c819 100644 --- a/test/SIL/Parser/opaque_values_parse.sil +++ b/test/SIL/Parser/opaque_values_parse.sil @@ -14,36 +14,6 @@ struct S : Foo { init() } -// CHECK-LABEL: sil @castOpaque : $@convention(thin) (Int) -> () { -// CHECK: bb0([[ARG:%.*]] : $Int): -// CHECK: unconditional_checked_cast_value Int in [[ARG]] : $Int to Foo -// CHECK-LABEL: } // end sil function 'castOpaque' -sil @castOpaque : $@convention(thin) (Int) -> () { -bb0(%0 : $Int): - %c = unconditional_checked_cast_value Int in %0 : $Int to Foo - %t = tuple () - return %t : $() -} - -// CHECK-LABEL: sil @condCastOpaque : $@convention(thin) (Int) -> () { -// CHECK: bb0([[ARG:%.*]] : $Int): -// CHECK: checked_cast_value_br Int in [[ARG]] : $Int to Int -// CHECK-LABEL: } // end sil function 'condCastOpaque' -sil @condCastOpaque : $@convention(thin) (Int) -> () { -bb0(%0 : $Int): - checked_cast_value_br Int in %0 : $Int to Int, bb2, bb1 - -bb1: - br bb3 - -bb2(%i : $Int): - br bb3 - -bb3: - %t = tuple () - return %t : $() -} - // CHECK-LABEL: sil @initDeinitExistentialValue : $@convention(thin) (@in T) -> () { // CHECK: bb0([[ARG:%.*]] : $T): // CHECK: [[IE:%.*]] = init_existential_value [[ARG]] : $T, $T, $Any diff --git a/test/SIL/Serialization/opaque_values_serialize.sil b/test/SIL/Serialization/opaque_values_serialize.sil index 976dd7683b82a..73f62f5e60c57 100644 --- a/test/SIL/Serialization/opaque_values_serialize.sil +++ b/test/SIL/Serialization/opaque_values_serialize.sil @@ -19,36 +19,6 @@ struct S : Foo { init() } -// CHECK-LABEL: sil [serialized] @castOpaque : $@convention(thin) (Int) -> () { -// CHECK: bb0([[ARG:%.*]] : $Int): -// CHECK: unconditional_checked_cast_value Int in [[ARG]] : $Int to Foo -// CHECK-LABEL: } // end sil function 'castOpaque' -sil [serialized] @castOpaque : $@convention(thin) (Int) -> () { -bb0(%0 : $Int): - %c = unconditional_checked_cast_value Int in %0 : $Int to Foo - %t = tuple () - return %t : $() -} - -// CHECK-LABEL: sil [serialized] @condCastOpaque : $@convention(thin) (Int) -> () { -// CHECK: bb0([[ARG:%.*]] : $Int): -// CHECK: checked_cast_value_br Int in [[ARG]] : $Int to Int -// CHECK-LABEL: } // end sil function 'condCastOpaque' -sil [serialized] @condCastOpaque : $@convention(thin) (Int) -> () { -bb0(%0 : $Int): - checked_cast_value_br Int in %0 : $Int to Int, bb2, bb1 - -bb1: - br bb3 - -bb2(%i : $Int): - br bb3 - -bb3: - %t = tuple () - return %t : $() -} - // CHECK-LABEL: sil [serialized] @initDeinitExistentialValue : $@convention(thin) (@in T) -> () { // CHECK: bb0([[ARG:%.*]] : $T): // CHECK: [[IE:%.*]] = init_existential_value [[ARG]] : $T, $T, $Any diff --git a/test/SIL/ownership-verifier/opaque_use_verifier.sil b/test/SIL/ownership-verifier/opaque_use_verifier.sil index 90c4131a3bced..64f71496d0bdd 100644 --- a/test/SIL/ownership-verifier/opaque_use_verifier.sil +++ b/test/SIL/ownership-verifier/opaque_use_verifier.sil @@ -9,12 +9,6 @@ sil_stage raw import Builtin -sil [ossa] @unconditional_checked_cast_value_test : $@convention(thin) (Builtin.Int32) -> @out T { -bb0(%0 : $Builtin.Int32): - %1 = unconditional_checked_cast_value Builtin.Int32 in %0 : $Builtin.Int32 to T - return %1 : $T -} - sil [ossa] @opaque_identity : $@convention(thin) (@in T) -> @out T { bb0(%0 : @owned $T): return %0 : $T @@ -66,20 +60,6 @@ bb0(%0 : $@thick AnyObject.Type): return %18 : $() } -// Test an unconditional cast from an owned value to a trivial value. -sil [ossa] @castToTrivial : $@convention(thin) (@owned AnyObject) -> () { -bb0(%0 : @owned $AnyObject): - %6 = function_ref @takeType : $@convention(thin) (@thick AnyObject.Type) -> () - %8 = begin_borrow %0 : $AnyObject - %9 = copy_value %8 : $AnyObject - %10 = unconditional_checked_cast_value AnyObject in %9 : $AnyObject to @thick AnyObject.Type - %11 = apply %6(%10) : $@convention(thin) (@thick AnyObject.Type) -> () - end_borrow %8 : $AnyObject - destroy_value %0 : $AnyObject - %18 = tuple () - return %18 : $() -} - sil [ossa] @passTrivialAsOpaqueValue : $@convention(thin) (Builtin.Int64) -> () { bb0(%0 : $Builtin.Int64): %1 = function_ref @opaque_copy : $@convention(thin) (@in_guaranteed T) -> @out T diff --git a/test/SILOptimizer/latecodemotion.sil b/test/SILOptimizer/latecodemotion.sil index e364e93472961..d9e8843a4c623 100644 --- a/test/SILOptimizer/latecodemotion.sil +++ b/test/SILOptimizer/latecodemotion.sil @@ -1386,19 +1386,3 @@ bb0(%0 : $Builtin.NativeObject, %1: $Builtin.NativeObject): %5 = tuple() return %5 : $() } - -// CHECK: sil @dont_hoist_release_accross_cast_value -// CHECK: retain -// CHECK: apply -// CHECK: unconditional_checked_cast -// CHECK: release -sil @dont_hoist_release_accross_cast_value : $@convention(thin) (Builtin.NativeObject, Builtin.NativeObject) -> () { -bb0(%0 : $Builtin.NativeObject, %1: $Builtin.NativeObject): - strong_retain %0: $Builtin.NativeObject - %2 = function_ref @blocker : $@convention(thin) () -> () - apply %2() : $@convention(thin) () -> () - %c = unconditional_checked_cast_value Builtin.NativeObject in %0 : $Builtin.NativeObject to B - strong_release %0: $Builtin.NativeObject - %5 = tuple() - return %5 : $() -} diff --git a/test/SILOptimizer/side-effect.sil b/test/SILOptimizer/side-effect.sil index 91ef07e40f407..f1472a8d2bec2 100644 --- a/test/SILOptimizer/side-effect.sil +++ b/test/SILOptimizer/side-effect.sil @@ -219,15 +219,6 @@ bb0(%0 : $Builtin.NativeObject, %1 : $X): return %r : $() } -// CHECK-LABEL: sil @checkedcastvalue -// CHECK: -sil @checkedcastvalue : $@convention(thin) (Builtin.NativeObject) -> () { -bb0(%0 : $Builtin.NativeObject): - %1 = unconditional_checked_cast_value Builtin.NativeObject in %0 : $Builtin.NativeObject to X - %r = tuple () - return %r : $() -} - sil_global public @sil_global1 : $Int32 // Test the propagation of side-effects through the call graph. From caaad424d81b4883e6cf5abaeb75acf7743d339b Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Fri, 4 Mar 2022 23:08:51 -0800 Subject: [PATCH 70/88] [SIL-opaque] Various SILGen fixes --- lib/SILGen/SILGenApply.cpp | 2 +- lib/SILGen/SILGenDynamicCast.cpp | 4 ++-- lib/SILGen/SILGenExpr.cpp | 2 +- lib/SILGen/SILGenFunction.cpp | 23 ++++++++++++++--------- lib/SILGen/SILGenPoly.cpp | 9 ++++++++- lib/SILGen/SILGenProlog.cpp | 15 +++++++++++---- 6 files changed, 37 insertions(+), 18 deletions(-) diff --git a/lib/SILGen/SILGenApply.cpp b/lib/SILGen/SILGenApply.cpp index 88c602a75b754..9ab9cc59b366e 100644 --- a/lib/SILGen/SILGenApply.cpp +++ b/lib/SILGen/SILGenApply.cpp @@ -3964,7 +3964,7 @@ SILGenFunction::emitBeginApply(SILLocation loc, ManagedValue fn, yields.push_back(ManagedValue::forLValue(value)); } else if (info.isConsumed()) { yields.push_back(emitManagedRValueWithCleanup(value)); - } else if (info.isDirectGuaranteed()) { + } else if (info.isGuaranteed()) { yields.push_back(ManagedValue::forBorrowedRValue(value)); } else { yields.push_back(ManagedValue::forTrivialRValue(value)); diff --git a/lib/SILGen/SILGenDynamicCast.cpp b/lib/SILGen/SILGenDynamicCast.cpp index 13bf42c10ce9f..017ed7d724207 100644 --- a/lib/SILGen/SILGenDynamicCast.cpp +++ b/lib/SILGen/SILGenDynamicCast.cpp @@ -252,7 +252,7 @@ namespace { } ManagedValue result; - if (!origTargetTL.isAddressOnly()) { + if (!origTargetTL.isAddressOnly() || !SGF.useLoweredAddresses()) { result = SGF.emitLoad(Loc, buffer, origTargetTL, ctx, IsTake); } else { result = SGF.emitManagedBufferWithCleanup(buffer, origTargetTL); @@ -450,7 +450,7 @@ RValue Lowering::emitConditionalCheckedCast( SILValue resultObjectBuffer; Optional resultObjectTemp; SGFContext resultObjectCtx; - if ((resultTL.isAddressOnly()) + if ((resultTL.isAddressOnly() && SGF.useLoweredAddresses()) || (C.getEmitInto() && C.getEmitInto()->canPerformInPlaceInitialization())) { SILType resultTy = resultTL.getLoweredType(); diff --git a/lib/SILGen/SILGenExpr.cpp b/lib/SILGen/SILGenExpr.cpp index 13eda8f003612..b1e64d299681c 100644 --- a/lib/SILGen/SILGenExpr.cpp +++ b/lib/SILGen/SILGenExpr.cpp @@ -4726,7 +4726,7 @@ ManagedValue SILGenFunction::emitBindOptional(SILLocation loc, // If optValue was loadable, we emitted a switch_enum. In such a case, return // the argument from hasValueBB. - if (optValue.getType().isLoadable(F)) { + if (optValue.getType().isLoadable(F) || !silConv.useLoweredAddresses()) { return emitManagedRValueWithCleanup(hasValueBB->getArgument(0)); } diff --git a/lib/SILGen/SILGenFunction.cpp b/lib/SILGen/SILGenFunction.cpp index 27dc9410142a5..7b30081a9b4da 100644 --- a/lib/SILGen/SILGenFunction.cpp +++ b/lib/SILGen/SILGenFunction.cpp @@ -271,6 +271,7 @@ void SILGenFunction::emitCaptures(SILLocation loc, break; case CaptureKind::Immutable: case CaptureKind::StorageAddress: + // FIXME_addrlower: only call getAddressType for M.useLoweredAddresses() capturedArgs.push_back(emitUndef(getLoweredType(type).getAddressType())); break; case CaptureKind::Box: { @@ -290,11 +291,13 @@ void SILGenFunction::emitCaptures(SILLocation loc, // Get an address value for a SILValue if it is address only in an type // expansion context without opaque archetype substitution. auto getAddressValue = [&](SILValue entryValue) -> SILValue { - if (SGM.Types.getTypeLowering( - valueType, - TypeExpansionContext::noOpaqueTypeArchetypesSubstitution( - expansion.getResilienceExpansion())) - .isAddressOnly() + if (SGM.M.useLoweredAddresses() + && SGM.Types + .getTypeLowering( + valueType, + TypeExpansionContext::noOpaqueTypeArchetypesSubstitution( + expansion.getResilienceExpansion())) + .isAddressOnly() && !entryValue->getType().isAddress()) { auto addr = emitTemporaryAllocation(vd, entryValue->getType()); @@ -342,13 +345,15 @@ void SILGenFunction::emitCaptures(SILLocation loc, } case CaptureKind::Immutable: { if (canGuarantee) { - auto entryValue = getAddressValue(Entry.value); // No-escaping stored declarations are captured as the // address of the value. - assert(entryValue->getType().isAddress() && "no address for captured var!"); - capturedArgs.push_back(ManagedValue::forLValue(entryValue)); + auto entryValue = getAddressValue(Entry.value); + capturedArgs.push_back(ManagedValue::forBorrowedRValue(entryValue)); } - else { + else if (!silConv.useLoweredAddresses()) { + capturedArgs.push_back( + B.createCopyValue(loc, ManagedValue::forUnmanaged(Entry.value))); + } else { auto entryValue = getAddressValue(Entry.value); // We cannot pass a valid SILDebugVariable while creating the temp here // See rdar://60425582 diff --git a/lib/SILGen/SILGenPoly.cpp b/lib/SILGen/SILGenPoly.cpp index 220d6755dd9d1..167156fa081f6 100644 --- a/lib/SILGen/SILGenPoly.cpp +++ b/lib/SILGen/SILGenPoly.cpp @@ -1167,6 +1167,10 @@ namespace { outputOrigEltType, outputEltType, elt, loweredOutputEltTy); + // Aggregation of address-only values requires ownership. + if (loweredOutputTy.isAddressOnly(SGF.F)) { + elt = elt.ensurePlusOne(SGF, Loc); + } elements.push_back(elt); } @@ -1175,7 +1179,10 @@ namespace { forwarded.push_back(elt.forward(SGF)); auto tuple = SGF.B.createTuple(Loc, loweredOutputTy, forwarded); - return SGF.emitManagedRValueWithCleanup(tuple); + if (tuple->getOwnershipKind() == OwnershipKind::Owned) + return SGF.emitManagedRValueWithCleanup(tuple); + + return ManagedValue::forUnmanaged(tuple); } /// Handle a tuple that has been exploded in the input but wrapped in diff --git a/lib/SILGen/SILGenProlog.cpp b/lib/SILGen/SILGenProlog.cpp index 4168d7599fc3a..6c2de16c6ad77 100644 --- a/lib/SILGen/SILGenProlog.cpp +++ b/lib/SILGen/SILGenProlog.cpp @@ -456,11 +456,18 @@ static void emitCaptureArguments(SILGenFunction &SGF, case CaptureKind::StorageAddress: { // Non-escaping stored decls are captured as the address of the value. auto type = getVarTypeInCaptureContext(); - SILType ty = SGF.getLoweredType(type).getAddressType(); - SILValue addr = SGF.F.begin()->createFunctionArgument(ty, VD); - SGF.VarLocs[VD] = SILGenFunction::VarLoc::get(addr); + SILType ty = SGF.getLoweredType(type); + if (SGF.SGM.M.useLoweredAddresses()) { + ty = ty.getAddressType(); + } + SILValue arg = SGF.F.begin()->createFunctionArgument(ty, VD); + SGF.VarLocs[VD] = SILGenFunction::VarLoc::get(arg); SILDebugVariable DbgVar(VD->isLet(), ArgNo); - SGF.B.createDebugValueAddr(Loc, addr, DbgVar); + if (ty.isAddress()) { + SGF.B.createDebugValueAddr(Loc, arg, DbgVar); + } else { + SGF.B.createDebugValue(Loc, arg, DbgVar); + } break; } } From 4e3f0dfecc4b12b8ad163c378a6df1f4ede8509e Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 15 Mar 2022 23:45:05 -0700 Subject: [PATCH 71/88] [SIL-opaque] use generated SILLocations Anywhere that code is not obviously inserted immediately adjacent to the origin instruction. --- .../Mandatory/AddressLowering.cpp | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index 1c72876d762d9..3421ceac87c3f 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -1269,6 +1269,9 @@ class AddressMaterialization { /// /// If the operand projects into its use, then the memory was already /// initialized when visiting the use. +/// +/// It's ok for the builder to reuse the user's SILLocation because +/// initializeComposingUse always inserts code immediately before the user. void AddressMaterialization::initializeComposingUse(Operand *operand) { SILValue def = operand->get(); if (def->getType().isAddressOnly(*pass.function)) { @@ -1405,7 +1408,7 @@ SILValue AddressMaterialization::materializeStructExtract( SILValue srcAddr = pass.getMaterializedAddress(structVal); auto *structType = structVal->getType().getStructOrBoundGenericStruct(); auto *varDecl = structType->getStoredProperties()[fieldIdx]; - return B.createStructElementAddr(extractInst->getLoc(), srcAddr, varDecl, + return B.createStructElementAddr(pass.genLoc(), srcAddr, varDecl, elementValue->getType().getAddressType()); } @@ -1413,7 +1416,7 @@ SILValue AddressMaterialization::materializeStructExtract( SILValue AddressMaterialization::materializeTupleExtract( SILInstruction *extractInst, SILValue elementValue, unsigned fieldIdx) { SILValue srcAddr = pass.getMaterializedAddress(extractInst->getOperand(0)); - return B.createTupleElementAddr(extractInst->getLoc(), srcAddr, fieldIdx, + return B.createTupleElementAddr(pass.genLoc(), srcAddr, fieldIdx, elementValue->getType().getAddressType()); } @@ -1431,7 +1434,7 @@ AddressMaterialization::materializeProjectionIntoUse(Operand *operand, case SILInstructionKind::EnumInst: { auto *enumInst = cast(user); SILValue enumAddr = materializeComposingUser(enumInst, intoPhiOperand); - return B.createInitEnumDataAddr(enumInst->getLoc(), enumAddr, + return B.createInitEnumDataAddr(pass.genLoc(), enumAddr, enumInst->getElement(), operand->get()->getType().getAddressType()); } @@ -1443,8 +1446,8 @@ AddressMaterialization::materializeProjectionIntoUse(Operand *operand, auto opaque = Lowering::AbstractionPattern::getOpaque(); auto &concreteTL = pass.function->getTypeLowering(opaque, canTy); return B.createInitExistentialAddr( - initExistentialValue->getLoc(), containerAddr, canTy, - concreteTL.getLoweredType(), initExistentialValue->getConformances()); + pass.genLoc(), containerAddr, canTy, + concreteTL.getLoweredType(), initExistentialValue->getConformances()); } case SILInstructionKind::StructInst: { auto *structInst = cast(user); @@ -1454,8 +1457,8 @@ AddressMaterialization::materializeProjectionIntoUse(Operand *operand, SILValue structAddr = materializeComposingUser(structInst, intoPhiOperand); return B.createStructElementAddr( - structInst->getLoc(), structAddr, *fieldIter, - operand->get()->getType().getAddressType()); + pass.genLoc(), structAddr, *fieldIter, + operand->get()->getType().getAddressType()); } case SILInstructionKind::TupleInst: { auto *tupleInst = cast(user); @@ -1467,7 +1470,7 @@ AddressMaterialization::materializeProjectionIntoUse(Operand *operand, return pass.function->getArguments()[resultIdx]; } SILValue tupleAddr = materializeComposingUser(tupleInst, intoPhiOperand); - return B.createTupleElementAddr(tupleInst->getLoc(), tupleAddr, + return B.createTupleElementAddr(pass.genLoc(), tupleAddr, operand->getOperandNumber(), operand->get()->getType().getAddressType()); } @@ -2739,7 +2742,7 @@ void UseRewriter::visitSwitchEnumInst(SwitchEnumInst * switchEnum) { auto *caseAddr = caseBuilder.createUncheckedTakeEnumDataAddr(loc, enumAddr, caseDecl); auto *caseLoad = caseBuilder.createTrivialLoadOr( - switchEnum->getLoc(), caseAddr, LoadOwnershipQualifier::Take); + loc, caseAddr, LoadOwnershipQualifier::Take); caseArg->replaceAllUsesWith(caseLoad); if (caseArg->getType().isAddressOnly(*pass.function)) { // Remap caseArg to the new dummy load which will be deleted during From a133549e00c84acf71eb4c44c3ea0c815bf71268 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Sun, 20 Mar 2022 00:04:28 -0700 Subject: [PATCH 72/88] [SIL-opaque] Add @in_guaranteed function argument support. Temporarily map storage to a fake load_borrow. --- .../Mandatory/AddressLowering.cpp | 39 ++++++++++++------- test/SILOptimizer/address_lowering.sil | 20 ++++++++-- 2 files changed, 43 insertions(+), 16 deletions(-) diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index 3421ceac87c3f..e01303b8c3b38 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -425,7 +425,12 @@ struct AddressLoweringState { AddressLoweringState(SILFunction *function, DominanceInfo *domInfo) : function(function), loweredFnConv(getLoweredFnConv(function)), - domInfo(domInfo) {} + domInfo(domInfo) { + for (auto &block : *function) { + if (block.getTerminator()->isFunctionExiting()) + exitingInsts.push_back(block.getTerminator()); + } + } SILModule *getModule() const { return &function->getModule(); } @@ -489,28 +494,39 @@ static void convertDirectToIndirectFunctionArgs(AddressLoweringState &pass) { if (param.isFormalIndirect() && !fnConv.isSILIndirect(param)) { SILArgument *arg = pass.function->getArgument(argIdx); SILType addrType = arg->getType().getAddressType(); - LoadInst *loadArg = argBuilder.createTrivialLoadOr( - SILValue(arg).getLoc(), SILUndef::get(addrType, *pass.function), - LoadOwnershipQualifier::Take); - - arg->replaceAllUsesWith(loadArg); + auto loc = SILValue(arg).getLoc(); + SILValue undefAddress = SILUndef::get(addrType, *pass.function); + SingleValueInstruction *load; + if (param.isConsumed()) { + load = argBuilder.createTrivialLoadOr(loc, undefAddress, + LoadOwnershipQualifier::Take); + } else { + load = cast( + argBuilder.emitLoadBorrowOperation(loc, undefAddress)); + for (SILInstruction *termInst : pass.exitingInsts) { + pass.getBuilder(termInst->getIterator()) + .createEndBorrow(pass.genLoc(), load); + } + } + arg->replaceAllUsesWith(load); assert(!pass.valueStorageMap.contains(arg)); arg = arg->getParent()->replaceFunctionArgument( arg->getIndex(), addrType, OwnershipKind::None, arg->getDecl()); - loadArg->setOperand(arg); + assert(isa(load) || isa(load)); + load->setOperand(0, arg); // Indirect calling convention may be used for loadable types. In that // case, generating the argument loads is sufficient. if (addrType.isAddressOnly(*pass.function)) { - pass.valueStorageMap.insertValue(loadArg, arg); + pass.valueStorageMap.insertValue(load, arg); } } ++argIdx; } - assert(argIdx - == fnConv.getSILArgIndexOfFirstParam() + fnConv.getNumSILArguments()); + assert(argIdx == + fnConv.getSILArgIndexOfFirstParam() + fnConv.getNumSILArguments()); } /// Before populating the ValueStorageMap, insert function arguments for any @@ -575,9 +591,6 @@ class OpaqueValueVisitor { /// to valueStorageMap in RPO. void OpaqueValueVisitor::mapValueStorage() { for (auto *block : postorderInfo.getReversePostOrder()) { - if (block->getTerminator()->isFunctionExiting()) - pass.exitingInsts.push_back(block->getTerminator()); - // Opaque function arguments have already been replaced. if (block != pass.function->getEntryBlock()) { for (auto *arg : block->getArguments()) { diff --git a/test/SILOptimizer/address_lowering.sil b/test/SILOptimizer/address_lowering.sil index 5ee49ed89b145..12d17e6ec4938 100644 --- a/test/SILOptimizer/address_lowering.sil +++ b/test/SILOptimizer/address_lowering.sil @@ -272,12 +272,12 @@ bb0(%0 : @owned $C): sil [ossa] @f044_indirectGuaranteed : $@convention(thin) (@in_guaranteed T) -> () -// CHECK-LABEL: sil [ossa] @f045_indirectGuaranteedArg : $@convention(thin) (@in T) -> () { +// CHECK-LABEL: sil [ossa] @f045_indirectGuaranteedCallArg : $@convention(thin) (@in T) -> () { // CHECK: bb0(%0 : $*T): // CHECK: apply %{{.*}}(%0) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> () // CHECK: destroy_addr %0 : $*T -// CHECK-LABEL: } // end sil function 'f045_indirectGuaranteedArg' -sil [ossa] @f045_indirectGuaranteedArg : $@convention(thin) (@in T) -> () { +// CHECK-LABEL: } // end sil function 'f045_indirectGuaranteedCallArg' +sil [ossa] @f045_indirectGuaranteedCallArg : $@convention(thin) (@in T) -> () { bb0(%0 : @owned $T): %1 = function_ref @f044_indirectGuaranteed : $@convention(thin) <τ_0_0>(@in_guaranteed τ_0_0) -> () %2 = apply %1(%0) : $@convention(thin) <τ_0_0>(@in_guaranteed τ_0_0) -> () @@ -286,6 +286,20 @@ bb0(%0 : @owned $T): return %6 : $() } +// CHECK-LABEL: sil [ossa] @f046_indirectGuaranteedFunctionArg : $@convention(thin) (@in_guaranteed T) -> () { +// CHECK: bb0(%0 : $*T): +// CHECK-NOT: load +// CHECK: apply %{{.*}}(%0) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> () +// CHECK-NOT: end_borrow +// CHECK-LABEL: } // end sil function 'f046_indirectGuaranteedFunctionArg' +sil [ossa] @f046_indirectGuaranteedFunctionArg : $@convention(thin) (@in_guaranteed T) -> () { +bb0(%0 : @guaranteed $T): + %1 = function_ref @f044_indirectGuaranteed : $@convention(thin) <τ_0_0>(@in_guaranteed τ_0_0) -> () + %2 = apply %1(%0) : $@convention(thin) <τ_0_0>(@in_guaranteed τ_0_0) -> () + %6 = tuple () + return %6 : $() +} + // CHECK-LABEL: sil [ossa] @f050_storeinout : $@convention(thin) (@inout T, @inout T, @in T) -> () { // CHECK: bb0(%0 : $*T, %1 : $*T, %2 : $*T): // CHECK: %[[PREV1:.*]] = alloc_stack $T From 3ec96fa6f73dbde51f2c436a2a1d0889647425bb Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Sun, 20 Mar 2022 00:05:49 -0700 Subject: [PATCH 73/88] [SIL-opaque] Add an assert for open_existential_value. Add comments. Add a basic dominance sanity check. --- .../Mandatory/AddressLowering.cpp | 24 +++++++++++++------ test/SILOptimizer/address_lowering.sil | 2 +- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index e01303b8c3b38..f1055e00d3765 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -792,9 +792,16 @@ static Operand *getProjectedDefOperand(SILValue value) { /// If \p value is a an existential or enum, then return the existential or enum /// operand. These operations are always rewritten by the UseRewriter and always -/// destructively reuse the same storage as their operand. Note that if the -/// operation's result is address-only, then the operand must be address-only -/// and therefore must mapped to ValueStorage. +/// reuse the same storage as their operand. Note that if the operation's result +/// is address-only, then the operand must be address-only and therefore must +/// mapped to ValueStorage. +/// +/// open_existential_value must reuse storage because the boxed value is shared +/// with other instances of the existential. An explicit copy is needed to +/// obtain an owned value. +/// +/// unchecked_enum_data and switch_enum must reuse storage because extracting +/// the payload destroys the enum value. static Operand *getReusedStorageOperand(SILValue value) { switch (value->getKind()) { default: @@ -1180,15 +1187,18 @@ createStackAllocation(SILValue value) { auto *openingInst = openingVal->getDefiningInstruction(); assert(openingVal && "all opened archetypes should be resolved"); - if (latestOpeningInst - && pass.domInfo->dominates(openingInst, latestOpeningInst)) { - return; + if (latestOpeningInst) { + if (pass.domInfo->dominates(openingInst, latestOpeningInst)) + return; + + assert(pass.domInfo->dominates(latestOpeningInst, openingInst) && + "opened archetypes must dominate their uses"); } latestOpeningInst = openingInst; } }); auto allocPt = latestOpeningInst ? std::next(latestOpeningInst->getIterator()) - : pass.function->begin()->begin(); + : pass.function->begin()->begin(); auto allocBuilder = pass.getBuilder(allocPt); AllocStackInst *alloc = allocBuilder.createAllocStack(pass.genLoc(), allocTy); diff --git a/test/SILOptimizer/address_lowering.sil b/test/SILOptimizer/address_lowering.sil index 12d17e6ec4938..a12914463e6ab 100644 --- a/test/SILOptimizer/address_lowering.sil +++ b/test/SILOptimizer/address_lowering.sil @@ -933,7 +933,7 @@ bb0(%0 : @owned $P): %9 = witness_method $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P, #P.foo, %8 : $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P : $@convention(witness_method: P) <τ_0_0 where τ_0_0 : P> (@in_guaranteed τ_0_0) -> () %cpy = copy_value %8 : $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P end_borrow %b : $P - // This optional is an aggregate that contains an opened exsitential. May sure it's allocated after open_existential_addr. + // This optional is an aggregate that contains an opened existential. Make sure it is allocated after open_existential_addr. %opt = enum $Optional<@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P>, #Optional.some!enumelt, %cpy : $@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P %some = unchecked_enum_data %opt : $Optional<@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P>, #Optional.some!enumelt %11 = apply %9<@opened("EF755EF2-B636-11E7-B7B4-A45E60ECC541") P>(%some) : $@convention(witness_method: P) <τ_0_0 where τ_0_0 : P> (@in_guaranteed τ_0_0) -> () From 1625a44ded5d021d2cc25bc7aa502f6c025cc3e7 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Sun, 20 Mar 2022 00:07:39 -0700 Subject: [PATCH 74/88] [SIL-opaque] [NFC] clang-format AddressLowering.cpp --- .../Mandatory/AddressLowering.cpp | 102 +++++++++--------- 1 file changed, 49 insertions(+), 53 deletions(-) diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index f1055e00d3765..b922d5f99fc3a 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -1089,9 +1089,8 @@ bool OpaqueStorageAllocation::findProjectionIntoUseImpl( return false; } -bool OpaqueStorageAllocation:: -checkStorageDominates(AllocStackInst *allocInst, - ArrayRef incomingValues) { +bool OpaqueStorageAllocation::checkStorageDominates( + AllocStackInst *allocInst, ArrayRef incomingValues) { for (SILValue incomingValue : incomingValues) { if (auto *defInst = incomingValue->getDefiningInstruction()) { @@ -1102,8 +1101,8 @@ checkStorageDominates(AllocStackInst *allocInst, // Handle both phis and terminator results. auto *bbArg = cast(incomingValue); // The storage block must strictly dominate the phi. - if (!pass.domInfo->properlyDominates( - allocInst->getParent(), bbArg->getParent())) { + if (!pass.domInfo->properlyDominates(allocInst->getParent(), + bbArg->getParent())) { return false; } } @@ -1157,9 +1156,8 @@ void OpaqueStorageAllocation::removeAllocation(SILValue value) { // Any value that may be used by a return instruction must be deallocated // immediately before the return. This allows the return to be rewritten by // loading from storage. -AllocStackInst *OpaqueStorageAllocation:: -createStackAllocation(SILValue value) { - assert(value.getOwnershipKind() != OwnershipKind::Guaranteed && +AllocStackInst *OpaqueStorageAllocation::createStackAllocation(SILValue value) { + assert(value.getOwnershipKind() != OwnershipKind::Guaranteed && "creating storage for a guaranteed value implies a copy"); // Instructions that produce an opened type never reach here because they @@ -1354,7 +1352,7 @@ SILValue AddressMaterialization::recursivelyMaterializeStorage( SILValue useVal = useStorage.value; if (auto *defInst = useVal->getDefiningInstruction()) { Operand *useOper = - &defInst->getAllOperands()[storage.projectedOperandNum]; + &defInst->getAllOperands()[storage.projectedOperandNum]; return recordAddress( materializeProjectionIntoUse(useOper, intoPhiOperand)); } @@ -1368,8 +1366,8 @@ SILValue AddressMaterialization::recursivelyMaterializeStorage( pass.valueStorageMap.getProjectedStorage(storage).storage, /*intoPhiOperand*/ true)); } - assert(!storage.isProjection() - && "a composing user may not also be a def projection"); + assert(!storage.isProjection() && + "a composing user may not also be a def projection"); return storage.storageAddress; } @@ -1468,9 +1466,9 @@ AddressMaterialization::materializeProjectionIntoUse(Operand *operand, auto canTy = initExistentialValue->getFormalConcreteType(); auto opaque = Lowering::AbstractionPattern::getOpaque(); auto &concreteTL = pass.function->getTypeLowering(opaque, canTy); - return B.createInitExistentialAddr( - pass.genLoc(), containerAddr, canTy, - concreteTL.getLoweredType(), initExistentialValue->getConformances()); + return B.createInitExistentialAddr(pass.genLoc(), containerAddr, canTy, + concreteTL.getLoweredType(), + initExistentialValue->getConformances()); } case SILInstructionKind::StructInst: { auto *structInst = cast(user); @@ -1480,8 +1478,8 @@ AddressMaterialization::materializeProjectionIntoUse(Operand *operand, SILValue structAddr = materializeComposingUser(structInst, intoPhiOperand); return B.createStructElementAddr( - pass.genLoc(), structAddr, *fieldIter, - operand->get()->getType().getAddressType()); + pass.genLoc(), structAddr, *fieldIter, + operand->get()->getType().getAddressType()); } case SILInstructionKind::TupleInst: { auto *tupleInst = cast(user); @@ -1584,8 +1582,8 @@ void PhiRewriter::materializeOperand(PhiOperand phiOper) { auto &operStorage = pass.valueStorageMap.getStorage(phiOper.getOperand()->get()); if (operStorage.isPhiProjection()) { - if (operStorage.projectedStorageID - == pass.valueStorageMap.getOrdinal(phiOper.getValue())) { + if (operStorage.projectedStorageID == + pass.valueStorageMap.getOrdinal(phiOper.getValue())) { // This operand was coalesced with this particular phi. No move needed. return; } @@ -1646,8 +1644,8 @@ PhiRewriter::MovePosition PhiRewriter::findPhiMovePosition(PhiOperand phiOper) { if (!phiMove || !phiMoves.contains(phiMove)) break; - if (!foundEarliestInsertPoint - && getAccessBase(phiMove->getSrc()) == phiBaseAddress) { + if (!foundEarliestInsertPoint && + getAccessBase(phiMove->getSrc()) == phiBaseAddress) { // Anti-dependence from the phi move to the phi value. Do not move into // the phi storage before this point. foundEarliestInsertPoint = true; @@ -1698,8 +1696,8 @@ bool CallArgRewriter::rewriteArguments() { bool changed = false; auto origConv = apply.getSubstCalleeConv(); - assert(apply.getNumArguments() == origConv.getNumParameters() - && "results should not yet be rewritten"); + assert(apply.getNumArguments() == origConv.getNumParameters() && + "results should not yet be rewritten"); for (unsigned argIdx = apply.getCalleeArgIndexOfFirstAppliedArg(), endArgIdx = argIdx + apply.getNumArguments(); @@ -1968,8 +1966,8 @@ void ApplyRewriter::makeIndirectArgs(MutableArrayRef newCallArgs) { loweredCalleeConv.getSILArgIndexOfFirstIndirectResult(); auto visitCallResult = [&](SILValue result, SILResultInfo resultInfo) { - assert(!opaqueCalleeConv.isSILIndirect(resultInfo) - && "canonical call results are always direct"); + assert(!opaqueCalleeConv.isSILIndirect(resultInfo) && + "canonical call results are always direct"); if (loweredCalleeConv.isSILIndirect(resultInfo)) { SILValue indirectResultAddr = materializeIndirectResultAddress( @@ -2048,8 +2046,8 @@ void ApplyRewriter::rewriteApply(ArrayRef newCallArgs) { auto *oldCall = cast(apply.getInstruction()); auto *newCall = argBuilder.createApply( - callLoc, apply.getCallee(), apply.getSubstitutionMap(), newCallArgs, - oldCall->getApplyOptions(), oldCall->getSpecializationInfo()); + callLoc, apply.getCallee(), apply.getSubstitutionMap(), newCallArgs, + oldCall->getApplyOptions(), oldCall->getSpecializationInfo()); this->apply = FullApplySite(newCall); @@ -2134,9 +2132,8 @@ void ApplyRewriter::rewriteTryApply(ArrayRef newCallArgs) { auto replaceTermResult = [&](SILValue newResultVal) { SILType resultTy = loweredCalleeConv.getSILResultType(typeCtx); - auto ownership = resultTy.isTrivial(*pass.function) - ? OwnershipKind::None - : OwnershipKind::Owned; + auto ownership = resultTy.isTrivial(*pass.function) ? OwnershipKind::None + : OwnershipKind::Owned; resultArg->replaceAllUsesWith(newResultVal); assert(resultArg->getIndex() == 0); @@ -2209,8 +2206,8 @@ void ApplyRewriter::replaceDirectResults(DestructureTupleInst *oldDestructure) { unsigned newDirectResultIdx = 0; auto visitOldCallResult = [&](SILValue result, SILResultInfo resultInfo) { - assert(!opaqueCalleeConv.isSILIndirect(resultInfo) - && "canonical call results are always direct"); + assert(!opaqueCalleeConv.isSILIndirect(resultInfo) && + "canonical call results are always direct"); if (loweredCalleeConv.isSILIndirect(resultInfo)) { if (result->getType().isAddressOnly(*pass.function)) { @@ -2283,8 +2280,8 @@ void ReturnRewriter::rewriteReturn(ReturnInst *returnInst) { // Find the point before allocated storage has been deallocated. auto insertPt = SILBasicBlock::iterator(returnInst); - for (auto bbStart = returnInst->getParent()->begin(); - insertPt != bbStart; --insertPt) { + for (auto bbStart = returnInst->getParent()->begin(); insertPt != bbStart; + --insertPt) { if (!isa(*std::prev(insertPt))) break; } @@ -2308,23 +2305,22 @@ void ReturnRewriter::rewriteReturn(ReturnInst *returnInst) { pass.loweredFnConv.getSILArgIndexOfFirstIndirectResult(); // Initialize the indirect result arguments and populate newDirectResults. - for_each( - pass.function->getLoweredFunctionType()->getResults(), oldResults, - [&](SILResultInfo resultInfo, SILValue oldResult) { - // Assume that all original results are direct in SIL. - assert(!opaqueFnConv.isSILIndirect(resultInfo)); - if (!pass.loweredFnConv.isSILIndirect(resultInfo)) { - newDirectResults.push_back(oldResult); - return; - } - SILArgument *newResultArg = - pass.function->getArgument(newResultArgIdx); - rewriteElement(oldResult, newResultArg, returnBuilder); - ++newResultArgIdx; - }); - - assert(newDirectResults.size() - == pass.loweredFnConv.getNumDirectSILResults()); + for_each(pass.function->getLoweredFunctionType()->getResults(), oldResults, + [&](SILResultInfo resultInfo, SILValue oldResult) { + // Assume that all original results are direct in SIL. + assert(!opaqueFnConv.isSILIndirect(resultInfo)); + if (!pass.loweredFnConv.isSILIndirect(resultInfo)) { + newDirectResults.push_back(oldResult); + return; + } + SILArgument *newResultArg = + pass.function->getArgument(newResultArgIdx); + rewriteElement(oldResult, newResultArg, returnBuilder); + ++newResultArgIdx; + }); + + assert(newDirectResults.size() == + pass.loweredFnConv.getNumDirectSILResults()); assert(newResultArgIdx == pass.loweredFnConv.getSILArgIndexOfFirstParam()); // Generate a new return_inst for the new direct results. @@ -2335,9 +2331,9 @@ void ReturnRewriter::rewriteReturn(ReturnInst *returnInst) { } else if (newDirectResults.size() == 1) { newReturnVal = newDirectResults[0]; } else { - newReturnVal = returnBuilder.createTuple(pass.genLoc(), - pass.loweredFnConv.getSILResultType(typeCtx), - newDirectResults); + newReturnVal = returnBuilder.createTuple( + pass.genLoc(), pass.loweredFnConv.getSILResultType(typeCtx), + newDirectResults); } // Rewrite the returned value. SILValue origFullResult = returnInst->getOperand(); From 439b36277c45e2f2355bc18422ff4ea00891138a Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 1 Mar 2022 23:02:57 -0800 Subject: [PATCH 75/88] [SIL-opaque] Cleanup and reenable SILGen unit tests CHECK lines still need to be updated for OSSA --- test/SILGen/opaque_ownership.swift | 277 ---- test/SILGen/opaque_values_silgen.swift | 1399 +++-------------- test/SILGen/opaque_values_silgen_lib.swift | 1094 ++++++++++++- test/SILGen/opaque_values_silgen_todo.swift | 2 - test/SILGen/opaque_values_silgen_vtable.swift | 53 + 5 files changed, 1362 insertions(+), 1463 deletions(-) delete mode 100644 test/SILGen/opaque_ownership.swift delete mode 100644 test/SILGen/opaque_values_silgen_todo.swift create mode 100644 test/SILGen/opaque_values_silgen_vtable.swift diff --git a/test/SILGen/opaque_ownership.swift b/test/SILGen/opaque_ownership.swift deleted file mode 100644 index 7ee241a5572f2..0000000000000 --- a/test/SILGen/opaque_ownership.swift +++ /dev/null @@ -1,277 +0,0 @@ - -// RUN: %target-swift-emit-silgen -enable-sil-opaque-values -emit-sorted-sil -Xllvm -sil-full-demangle -parse-stdlib -parse-as-library -module-name Swift %s | %FileCheck %s -// RUN: %target-swift-emit-silgen -target x86_64-apple-macosx10.9 -enable-sil-opaque-values -emit-sorted-sil -Xllvm -sil-full-demangle -parse-stdlib -parse-as-library -module-name Swift %s | %FileCheck --check-prefix=CHECK-OSX %s - -public typealias AnyObject = Builtin.AnyObject - -precedencegroup AssignmentPrecedence {} -precedencegroup CastingPrecedence {} -precedencegroup ComparisonPrecedence {} - -public protocol _ObjectiveCBridgeable {} - -public protocol UnkeyedDecodingContainer { - var isAtEnd: Builtin.Int1 { get } -} - -public protocol Decoder { - func unkeyedContainer() throws -> UnkeyedDecodingContainer -} - -// Test open_existential_value ownership -// --- -// CHECK-LABEL: sil [ossa] @$ss11takeDecoder4fromBi1_s0B0_p_tKF : $@convention(thin) (@in_guaranteed Decoder) -> (Builtin.Int1, @error Error) { -// CHECK: bb0(%0 : @guaranteed $Decoder): -// CHECK: [[OPENED:%.*]] = open_existential_value %0 : $Decoder to $@opened("{{.*}}") Decoder -// CHECK: [[WT:%.*]] = witness_method $@opened("{{.*}}") Decoder, #Decoder.unkeyedContainer : (Self) -> () throws -> UnkeyedDecodingContainer, %3 : $@opened("{{.*}}") Decoder : $@convention(witness_method: Decoder) <τ_0_0 where τ_0_0 : Decoder> (@in_guaranteed τ_0_0) -> (@out UnkeyedDecodingContainer, @error Error) -// CHECK: try_apply [[WT]]<@opened("{{.*}}") Decoder>([[OPENED]]) : $@convention(witness_method: Decoder) <τ_0_0 where τ_0_0 : Decoder> (@in_guaranteed τ_0_0) -> (@out UnkeyedDecodingContainer, @error Error), normal bb2, error bb1 -// -// CHECK:bb{{.*}}([[RET1:%.*]] : @owned $UnkeyedDecodingContainer): -// CHECK: [[BORROW2:%.*]] = begin_borrow [lexical] [[RET1]] : $UnkeyedDecodingContainer -// CHECK: [[OPENED2:%.*]] = open_existential_value [[BORROW2]] : $UnkeyedDecodingContainer to $@opened("{{.*}}") UnkeyedDecodingContainer -// CHECK: [[WT2:%.*]] = witness_method $@opened("{{.*}}") UnkeyedDecodingContainer, #UnkeyedDecodingContainer.isAtEnd!getter : (Self) -> () -> Builtin.Int1, [[OPENED2]] : $@opened("{{.*}}") UnkeyedDecodingContainer : $@convention(witness_method: UnkeyedDecodingContainer) <τ_0_0 where τ_0_0 : UnkeyedDecodingContainer> (@in_guaranteed τ_0_0) -> Builtin.Int1 -// CHECK: [[RET2:%.*]] = apply [[WT2]]<@opened("{{.*}}") UnkeyedDecodingContainer>([[OPENED2]]) : $@convention(witness_method: UnkeyedDecodingContainer) <τ_0_0 where τ_0_0 : UnkeyedDecodingContainer> (@in_guaranteed τ_0_0) -> Builtin.Int1 -// CHECK: end_borrow [[BORROW2]] : $UnkeyedDecodingContainer -// CHECK: destroy_value [[RET1]] : $UnkeyedDecodingContainer -// CHECK-NOT: destroy_value %0 : $Decoder -// CHECK: return [[RET2]] : $Builtin.Int1 -// CHECK-LABEL: } // end sil function '$ss11takeDecoder4fromBi1_s0B0_p_tKF' -public func takeDecoder(from decoder: Decoder) throws -> Builtin.Int1 { - let container = try decoder.unkeyedContainer() - return container.isAtEnd -} - -// Test unsafe_bitwise_cast nontrivial ownership. -// --- -// CHECK-LABEL: sil [ossa] @$ss13unsafeBitCast_2toq_x_q_mtr0_lF : $@convention(thin) (@in_guaranteed T, @thick U.Type) -> @out U { -// CHECK: bb0([[ARG0:%.*]] : @guaranteed $T, [[ARG1:%.*]] : $@thick U.Type): -// CHECK: [[ARG_COPY:%.*]] = copy_value [[ARG0]] : $T -// CHECK: [[RESULT:%.*]] = unchecked_bitwise_cast [[ARG_COPY]] : $T to $U -// CHECK: [[RESULT_COPY:%.*]] = copy_value [[RESULT]] : $U -// CHECK: destroy_value [[ARG_COPY]] : $T -// CHECK: return [[RESULT_COPY]] : $U -// CHECK-LABEL: } // end sil function '$ss13unsafeBitCast_2toq_x_q_mtr0_lF' -public func unsafeBitCast(_ x: T, to type: U.Type) -> U { - return Builtin.reinterpretCast(x) -} - -// A lot of standard library support is necessary to support raw enums. -// -------------------------------------------------------------------- - -infix operator == : ComparisonPrecedence -infix operator ~= : ComparisonPrecedence - -public struct Bool { - var _value: Builtin.Int1 - - public init() { - let zero: Int64 = 0 - self._value = Builtin.trunc_Int64_Int1(zero._value) - } - - internal init(_ v: Builtin.Int1) { self._value = v } - - public init(_ value: Bool) { - self = value - } -} - -extension Bool { - public func _getBuiltinLogicValue() -> Builtin.Int1 { - return _value - } -} - -public protocol Equatable { - /// Returns a Boolean value indicating whether two values are equal. - /// - /// Equality is the inverse of inequality. For any values `a` and `b`, - /// `a == b` implies that `a != b` is `false`. - /// - /// - Parameters: - /// - lhs: A value to compare. - /// - rhs: Another value to compare. - static func == (lhs: Self, rhs: Self) -> Bool -} - -public func ~= (a: T, b: T) -> Bool { - return a == b -} - -public protocol RawRepresentable { - associatedtype RawValue - - init?(rawValue: RawValue) - - var rawValue: RawValue { get } -} - -public func == (lhs: T, rhs: T) -> Bool - where T.RawValue : Equatable { - return lhs.rawValue == rhs.rawValue -} - -public typealias _MaxBuiltinIntegerType = Builtin.IntLiteral - -public protocol _ExpressibleByBuiltinIntegerLiteral { - init(_builtinIntegerLiteral value: _MaxBuiltinIntegerType) -} - -public protocol ExpressibleByIntegerLiteral { - associatedtype IntegerLiteralType : _ExpressibleByBuiltinIntegerLiteral - - init(integerLiteral value: IntegerLiteralType) -} - -extension ExpressibleByIntegerLiteral - where Self : _ExpressibleByBuiltinIntegerLiteral { - @_transparent - public init(integerLiteral value: Self) { - self = value - } -} - -public protocol ExpressibleByStringLiteral {} -public protocol ExpressibleByFloatLiteral {} -public protocol ExpressibleByUnicodeScalarLiteral {} -public protocol ExpressibleByExtendedGraphemeClusterLiteral {} - -public struct Int64 : ExpressibleByIntegerLiteral, _ExpressibleByBuiltinIntegerLiteral, Equatable { - public var _value: Builtin.Int64 - public init(_builtinIntegerLiteral x: _MaxBuiltinIntegerType) { - _value = Builtin.s_to_s_checked_trunc_IntLiteral_Int64(x).0 - } - public typealias IntegerLiteralType = Int64 - public init(integerLiteral value: Int64) { - self = value - } - public static func ==(_ lhs: Int64, rhs: Int64) -> Bool { - return Bool(Builtin.cmp_eq_Int64(lhs._value, rhs._value)) - } -} - -public struct Int : _ExpressibleByBuiltinIntegerLiteral, ExpressibleByIntegerLiteral, Equatable { - var _value: Builtin.Int64 - public init() { - self = 0 - } - public typealias IntegerLiteralType = Int - public init(_builtinIntegerLiteral x: _MaxBuiltinIntegerType) { - _value = Builtin.s_to_s_checked_trunc_IntLiteral_Int64(x).0 - } - - public init(integerLiteral value: Int) { - self = value - } - - public static func ==(_ lhs: Int, rhs: Int) -> Bool { - return Bool(Builtin.cmp_eq_Int64(lhs._value, rhs._value)) - } -} - -// Test ownership of multi-case Enum values in the context of to @in thunks. -// --- -// CHECK-LABEL: sil shared [transparent] [serialized] [thunk] [ossa] @$ss17FloatingPointSignOSQsSQ2eeoiySbx_xtFZTW : -// CHECK: bb0(%0 : $FloatingPointSign, %1 : $FloatingPointSign, %2 : $@thick FloatingPointSign.Type): -// CHECK: %3 = metatype $@thin FloatingPointSign.Type // user: %5 -// CHECK: %4 = function_ref @$ss17FloatingPointSignO21__derived_enum_equalsySbAB_ABtFZ : $@convention(method) (FloatingPointSign, FloatingPointSign, @thin FloatingPointSign.Type) -> Bool // user: %5 -// CHECK: %5 = apply %4(%0, %1, %3) : $@convention(method) (FloatingPointSign, FloatingPointSign, @thin FloatingPointSign.Type) -> Bool // user: %6 -// CHECK: return %5 : $Bool -// CHECK-LABEL: } // end sil function '$ss17FloatingPointSignOSQsSQ2eeoiySbx_xtFZTW' -public enum FloatingPointSign { - /// The sign for a positive value. - case plus - - /// The sign for a negative value. - case minus -} - -#if os(macOS) -// Test open_existential_value used in a conversion context. -// (the actual bridging call is dropped because we don't import Swift). -// --- -// CHECK-OSX-LABEL: sil [ossa] @$ss26_unsafeDowncastToAnyObject04fromD0yXlyp_tF : $@convention(thin) (@in_guaranteed Any) -> @owned AnyObject { -// CHECK-OSX: bb0(%0 : @guaranteed $Any): -// CHECK-OSX: [[COPY:%.*]] = copy_value %0 : $Any -// CHECK-OSX: [[BORROW2:%.*]] = begin_borrow [[COPY]] : $Any -// CHECK-OSX: [[VAL:%.*]] = open_existential_value [[BORROW2]] : $Any to $@opened -// CHECK-OSX: [[COPY2:%.*]] = copy_value [[VAL]] : $@opened -// CHECK-OSX: end_borrow [[BORROW2]] : $Any -// CHECK-OSX: destroy_value [[COPY2]] : $@opened -// CHECK-OSX: destroy_value [[COPY]] : $Any -// CHECK-OSX-NOT: destroy_value %0 : $Any -// CHECK-OSX: return undef : $AnyObject -// CHECK-OSX-LABEL: } // end sil function '$ss26_unsafeDowncastToAnyObject04fromD0yXlyp_tF' -public func _unsafeDowncastToAnyObject(fromAny any: Any) -> AnyObject { - return any as AnyObject -} -#endif - -public protocol Error {} - -#if os(macOS) -// Test open_existential_box_value in a conversion context. -// --- -// CHECK-OSX-LABEL: sil [ossa] @$ss3foo1eys5Error_pSg_tF : $@convention(thin) (@guaranteed Optional) -> () { -// CHECK-OSX: [[BORROW:%.*]] = begin_borrow [lexical] %{{.*}} : $Error -// CHECK-OSX: [[VAL:%.*]] = open_existential_box_value [[BORROW]] : $Error to $@opened -// CHECK-OSX: [[COPY:%.*]] = copy_value [[VAL]] : $@opened -// CHECK-OSX: [[ANY:%.*]] = init_existential_value [[COPY]] : $@opened -// CHECK-OSX: end_borrow [[BORROW]] : $Error -// CHECK-OSX-LABEL: } // end sil function '$ss3foo1eys5Error_pSg_tF' -public func foo(e: Error?) { - if let u = e { - let a: Any = u - _ = a - } -} -#endif - -public enum Optional { - case none - case some(Wrapped) -} - -public protocol IP {} - -public protocol Seq { - associatedtype Iterator : IP - - func makeIterator() -> Iterator -} - -extension Seq where Self.Iterator == Self { - public func makeIterator() -> Self { - return self - } -} - -public struct EnumIter : IP, Seq { - internal var _base: Base - - public typealias Iterator = EnumIter -} - -// Test passing a +1 RValue to @in_guaranteed. -// --- -// CHECK-LABEL: sil [ossa] @$ss7EnumSeqV12makeIterators0A4IterVy0D0QzGyF : $@convention(method) (@in_guaranteed EnumSeq) -> @out EnumIter { -// CHECK: bb0(%0 : @guaranteed $EnumSeq): -// CHECK: [[MT:%.*]] = metatype $@thin EnumIter.Type -// CHECK: [[FIELD:%.*]] = struct_extract %0 : $EnumSeq, #EnumSeq._base -// CHECK: [[COPY:%.*]] = copy_value [[FIELD]] : $Base -// CHECK: [[WT:%.*]] = witness_method $Base, #Seq.makeIterator : (Self) -> () -> Self.Iterator : $@convention(witness_method: Seq) <τ_0_0 where τ_0_0 : Seq> (@in_guaranteed τ_0_0) -> @out τ_0_0.Iterator -// CHECK: [[ITER:%.*]] = apply [[WT]]([[COPY]]) : $@convention(witness_method: Seq) <τ_0_0 where τ_0_0 : Seq> (@in_guaranteed τ_0_0) -> @out τ_0_0.Iterator -// CHECK: destroy_value [[COPY]] : $Base -// CHECK: [[FN:%.*]] = function_ref @$ss8EnumIterV5_baseAByxGx_tcfC : $@convention(method) <τ_0_0 where τ_0_0 : IP> (@in τ_0_0, @thin EnumIter<τ_0_0>.Type) -> @out EnumIter<τ_0_0> -// CHECK: [[RET:%.*]] = apply [[FN]]([[ITER]], [[MT]]) : $@convention(method) <τ_0_0 where τ_0_0 : IP> (@in τ_0_0, @thin EnumIter<τ_0_0>.Type) -> @out EnumIter<τ_0_0> -// CHECK: return [[RET]] : $EnumIter -// CHECK-LABEL: } // end sil function '$ss7EnumSeqV12makeIterators0A4IterVy0D0QzGyF' -public struct EnumSeq : Seq { - public typealias Iterator = EnumIter - - internal var _base: Base - - public func makeIterator() -> Iterator { - return EnumIter(_base: _base.makeIterator()) - } -} diff --git a/test/SILGen/opaque_values_silgen.swift b/test/SILGen/opaque_values_silgen.swift index 3f95233c63f53..a9dc74bfa7d94 100644 --- a/test/SILGen/opaque_values_silgen.swift +++ b/test/SILGen/opaque_values_silgen.swift @@ -1,915 +1,157 @@ -// XFAIL: * +// RUN: %target-swift-emit-silgen -enable-sil-opaque-values -Xllvm -sil-full-demangle %s | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-runtime -// RUN: %target-swift-emit-silgen -enable-sil-opaque-values -emit-sorted-sil -Xllvm -sil-full-demangle %s | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-runtime +// Test SILGen -enable-sil-opaque-values with tests that depend on the stdlib. -struct TrivialStruct { - var x: Int -} - -protocol Foo { - func foo() -} - -protocol P { - var x : Int { get } -} - -protocol P2 : P {} - -extension TrivialStruct : P2 {} - -struct Box { - let t: T -} - -protocol EmptyP {} - -struct AddressOnlyStruct : EmptyP {} - -struct AnyStruct { - let a: Any -} - -protocol Clonable { - func maybeClone() -> Self? -} - -indirect enum IndirectEnum { - case Nil - case Node(T) -} - -protocol SubscriptableGet { - subscript(a : Int) -> Int { get } -} - -protocol SubscriptableGetSet { - subscript(a : Int) -> Int { get set } -} - -var subscriptableGet : SubscriptableGet -var subscriptableGetSet : SubscriptableGetSet - -class OpaqueClass { - typealias ObnoxiousTuple = (T, (T.Type, (T) -> T)) - - func inAndOut(x: T) -> T { return x } - func variantOptionalityTuples(x: ObnoxiousTuple) -> ObnoxiousTuple? { return x } -} - -class StillOpaqueClass: OpaqueClass { - override func variantOptionalityTuples(x: ObnoxiousTuple?) -> ObnoxiousTuple { return x! } -} - -class OpaqueTupleClass: OpaqueClass<(U, U)> { - override func inAndOut(x: (U, U)) -> (U, U) { return x } -} - -func unreachableF() -> (Int, T)? { } - -func s010_hasVarArg(_ args: Any...) {} - -// Tests Address only enums's construction -// CHECK-LABEL: sil shared [transparent] @$s20opaque_values_silgen15AddressOnlyEnumO4mereyAcA6EmptyP_pcACmF : $@convention(method) (@in EmptyP, @thin AddressOnlyEnum.Type) -> @out AddressOnlyEnum { -// CHECK: bb0([[ARG0:%.*]] : $EmptyP, [[ARG1:%.*]] : $@thin AddressOnlyEnum.Type): -// CHECK: [[RETVAL:%.*]] = enum $AddressOnlyEnum, #AddressOnlyEnum.mere!enumelt, [[ARG0]] : $EmptyP -// CHECK: return [[RETVAL]] : $AddressOnlyEnum -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen15AddressOnlyEnumO4mereyAcA6EmptyP_pcACmF' - -// CHECK-LABEL: sil shared [transparent] [thunk] @$s20opaque_values_silgen15AddressOnlyEnumO4mereyAcA6EmptyP_pcACmFTc : $@convention(thin) (@thin AddressOnlyEnum.Type) -> @owned @callee_guaranteed (@in_guaranteed EmptyP) -> @out AddressOnlyEnum { -// CHECK: bb0([[ARG:%.*]] : $@thin AddressOnlyEnum.Type): -// CHECK: [[RETVAL:%.*]] = partial_apply {{.*}}([[ARG]]) : $@convention(method) (@in EmptyP, @thin AddressOnlyEnum.Type) -> @out AddressOnlyEnum -// CHECK: [[CANONICAL_THUNK_FN:%.*]] = function_ref @$s20opaque_values_silgen6EmptyP_pAA15AddressOnlyEnumOIegir_AaB_pADIegnr_TR : $@convention(thin) (@in_guaranteed EmptyP, @guaranteed @callee_guaranteed (@in EmptyP) -> @out AddressOnlyEnum) -> @out AddressOnlyEnum -// CHECK: [[CANONICAL_THUNK:%.*]] = partial_apply [callee_guaranteed] [[CANONICAL_THUNK_FN]]([[RETVAL]]) -// CHECK: return [[CANONICAL_THUNK]] : $@callee_guaranteed (@in_guaranteed EmptyP) -> @out AddressOnlyEnum -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen15AddressOnlyEnumO4mereyAcA6EmptyP_pcACmFTc' -enum AddressOnlyEnum { - case nought - case mere(EmptyP) - case phantom(AddressOnlyStruct) -} - -// Test vtables - OpaqueTupleClass -// --- -// CHECK-LABEL: sil private @$s20opaque_values_silgen16OpaqueTupleClassC8inAndOut1xx_xtx_xt_tFAA0dF0CAdExx_tFTV : $@convention(method) (@in_guaranteed (U, U), @guaranteed OpaqueTupleClass) -> @out (U, U) { -// CHECK: bb0([[ARG0:%.*]] : $(U, U), [[ARG1:%.*]] : $OpaqueTupleClass): -// CHECK: ([[TELEM0:%.*]], [[TELEM1:%.*]]) = destructure_tuple [[ARG0]] : $(U, U) -// CHECK: [[APPLY:%.*]] = apply {{.*}}([[TELEM0]], [[TELEM1]], [[ARG1]]) : $@convention(method) <τ_0_0> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_0, @guaranteed OpaqueTupleClass<τ_0_0>) -> (@out τ_0_0, @out τ_0_0) -// CHECK: [[BORROWED_CALL:%.*]] = begin_borrow [[APPLY]] -// CHECK: [[BORROWED_CALL_EXT0:%.*]] = tuple_extract [[BORROWED_CALL]] : $(U, U), 0 -// CHECK: [[RETVAL0:%.*]] = copy_value [[BORROWED_CALL_EXT0]] : $U -// CHECK: [[BORROWED_CALL_EXT1:%.*]] = tuple_extract [[BORROWED_CALL]] : $(U, U), 1 -// CHECK: [[RETVAL1:%.*]] = copy_value [[BORROWED_CALL_EXT1]] : $U -// CHECK: end_borrow [[BORROWED_CALL]] -// CHECK: [[RETVAL:%.*]] = tuple ([[RETVAL0]] : $U, [[RETVAL1]] : $U) -// CHECK: return [[RETVAL]] -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen16OpaqueTupleClassC8inAndOut1xx_xtx_xt_tFAA0dF0CAdExx_tFTV' +// FIXME: "HECK" lines all need to be updated for OSSA. -// Test vtables - StillOpaqueClass -// --- -// CHECK-LABEL: sil private @$s20opaque_values_silgen16StillOpaqueClassC24variantOptionalityTuples1xx_xm_xxcttx_xm_xxcttSg_tFAA0eF0CAdeFx_xm_xxctt_tFTV : $@convention(method) (@in_guaranteed T, @thick T.Type, @guaranteed @callee_guaranteed (@in_guaranteed T) -> @out T, @guaranteed StillOpaqueClass) -> @out Optional<(T, (@thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T))> { -// CHECK: bb0([[ARG0:%.*]] : $T, [[ARG1:%.*]] : $@thick T.Type, [[ARG2:%.*]] : $@callee_guaranteed (@in_guaranteed T) -> @out T, [[ARG3:%.*]] : $StillOpaqueClass): -// CHECK: [[TELEM0:%.*]] = tuple ([[ARG1]] : $@thick T.Type, [[ARG2]] : $@callee_guaranteed (@in_guaranteed T) -> @out T) -// CHECK: [[TELEM1:%.*]] = tuple ([[ARG0]] : $T, [[TELEM0]] : $(@thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T)) -// CHECK: [[ENUMOPT0:%.*]] = enum $Optional<(T, (@thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T))>, #Optional.some!enumelt, [[TELEM1]] : $(T, (@thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T)) -// CHECK: [[APPLY:%.*]] = apply {{.*}}([[ENUMOPT0]], [[ARG3]]) : $@convention(method) <τ_0_0> (@in_guaranteed Optional<(τ_0_0, (@thick τ_0_0.Type, @callee_guaranteed (@in_guaranteed τ_0_0) -> @out τ_0_0))>, @guaranteed StillOpaqueClass<τ_0_0>) -> (@out τ_0_0, @thick τ_0_0.Type, @owned @callee_guaranteed (@in_guaranteed τ_0_0) -> @out τ_0_0) -// CHECK: [[BORROWED_T:%.*]] = begin_borrow [[APPLY]] -// CHECK: [[BORROWED_T_EXT0:%.*]] = tuple_extract [[BORROWED_T]] : $(T, @thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T), 0 -// CHECK: [[RETVAL0:%.*]] = copy_value [[BORROWED_T_EXT0]] -// CHECK: [[BORROWED_T_EXT1:%.*]] = tuple_extract [[BORROWED_T]] : $(T, @thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T), 1 -// CHECK: [[BORROWED_T_EXT2:%.*]] = tuple_extract [[BORROWED_T]] : $(T, @thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T), 2 -// CHECK: [[RETVAL1:%.*]] = copy_value [[BORROWED_T_EXT2]] -// CHECK: end_borrow [[BORROWED_T]] -// CHECK: [[RETTUPLE0:%.*]] = tuple ([[BORROWED_T_EXT1]] : $@thick T.Type, [[RETVAL1]] : $@callee_guaranteed (@in_guaranteed T) -> @out T) -// CHECK: [[RETTUPLE1:%.*]] = tuple ([[RETVAL0]] : $T, [[RETTUPLE0]] : $(@thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T)) -// CHECK: [[RETVAL:%.*]] = enum $Optional<(T, (@thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T))>, #Optional.some!enumelt, [[RETTUPLE1]] : $(T, (@thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T)) -// CHECK: return [[RETVAL]] -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen16StillOpaqueClassC24variantOptionalityTuples1xx_xm_xxcttx_xm_xxcttSg_tFAA0eF0CAdeFx_xm_xxctt_tFTV' - - -// part of s280_convExistTrivial: conversion between existential types - reabstraction thunk -// --- -// CHECK-LABEL: sil shared [transparent] [serialized] [reabstraction_thunk] @$s20opaque_values_silgen1P_pAA13TrivialStructVIegnd_AA2P2_pAaE_pIegnr_TR : $@convention(thin) (@in_guaranteed P2, @guaranteed @callee_guaranteed (@in_guaranteed P) -> TrivialStruct) -> @out P2 { -// CHECK: bb0([[ARG0:%.*]] : $P2, [[ARG1:%.*]] : $@callee_guaranteed (@in_guaranteed P) -> TrivialStruct): -// CHECK: [[OPENED_ARG:%.*]] = open_existential_value [[ARG]] : $P2 to $@opened({{.*}}) P2 -// CHECK: [[COPIED_VAL:%.*]] = copy_value [[OPENED_ARG]] -// CHECK: [[INIT_P:%.*]] = init_existential_value [[COPIED_VAL]] : $@opened({{.*}}) P2, $@opened({{.*}}) P2, $P -// CHECK: [[BORROWED_INIT_P:%.*]] = begin_borrow [[INIT_P]] -// CHECK: [[APPLY_P:%.*]] = apply [[ARG1]]([[BORROWED_INIT_P]]) : $@callee_guaranteed (@in_guaranteed P) -> TrivialStruct -// CHECK: [[RETVAL:%.*]] = init_existential_value [[APPLY_P]] : $TrivialStruct, $TrivialStruct, $P2 -// CHECK: end_borrow [[BORROWED_INIT_P]] -// CHECK-NOT: destroy_value [[ARG0]] -// CHECK: return [[RETVAL]] : $P2 -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen1P_pAA13TrivialStructVIegnd_AA2P2_pAaE_pIegnr_TR' +func genericInout(_: inout T) {} -// part of s290_convOptExistTriv: conversion between existential types - reabstraction thunk - optionals case -// --- -// CHECK-LABEL: sil shared [transparent] [serialized] [reabstraction_thunk] @$s20opaque_values_silgen1P_pSgAA13TrivialStructVIegnd_AESgAA2P2_pIegyr_TR : $@convention(thin) (Optional, @guaranteed @callee_guaranteed (@in_guaranteed Optional

) -> TrivialStruct) -> @out P2 { -// CHECK: bb0([[ARG0:%.*]] : $Optional, [[ARG1:%.*]] : $@callee_guaranteed (@in_guaranteed Optional

) -> TrivialStruct): -// CHECK: switch_enum [[ARG0]] : $Optional, case #Optional.some!enumelt: bb2, case #Optional.none!enumelt: bb1 -// CHECK: bb1: -// CHECK: [[ONONE:%.*]] = enum $Optional

, #Optional.none!enumelt -// CHECK: br bb3([[ONONE]] : $Optional

) -// CHECK: bb2([[OSOME:%.*]] : $TrivialStruct): -// CHECK: [[INIT_S:%.*]] = init_existential_value [[OSOME]] : $TrivialStruct, $TrivialStruct, $P -// CHECK: [[ENUM_S:%.*]] = enum $Optional

, #Optional.some!enumelt, [[INIT_S]] : $P -// CHECK: br bb3([[ENUM_S]] : $Optional

) -// CHECK: bb3([[OPT_S:%.*]] : $Optional

): -// CHECK: [[BORROWED_OPT_S:%.*]] = begin_borrow [[OPT_S]] -// CHECK: [[APPLY_P:%.*]] = apply [[ARG1]]([[BORROWED_OPT_S]]) : $@callee_guaranteed (@in_guaranteed Optional

) -> TrivialStruct -// CHECK: [[RETVAL:%.*]] = init_existential_value [[APPLY_P]] : $TrivialStruct, $TrivialStruct, $P2 -// CHECK: return [[RETVAL]] : $P2 -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen1P_pSgAA13TrivialStructVIegnd_AESgAA2P2_pIegyr_TR' +func hasVarArg(_ args: Any...) {} // Test array initialization - we are still (somewhat) using addresses // --- -// CHECK-LABEL: sil @$s20opaque_values_silgen21s020_______callVarArgyyF : $@convention(thin) () -> () { -// CHECK: %[[APY:.*]] = apply %{{.*}}(%{{.*}}) : $@convention(thin) <τ_0_0> (Builtin.Word) -> (@owned Array<τ_0_0>, Builtin.RawPointer) -// CHECK: %[[BRW:.*]] = begin_borrow %[[APY]] -// CHECK: %[[TPL:.*]] = tuple_extract %[[BRW]] : $(Array, Builtin.RawPointer), 1 -// CHECK: end_borrow %[[BRW]] : $(Array, Builtin.RawPointer) -// CHECK: destroy_value %[[APY]] -// CHECK: %[[PTR:.*]] = pointer_to_address %[[TPL]] : $Builtin.RawPointer to [strict] $*Any -// CHECK: [[IOPAQUE:%.*]] = init_existential_value %{{.*}} : $Int, $Int, $Any -// CHECK: store [[IOPAQUE]] to [init] %[[PTR]] : $*Any -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s020_______callVarArgyyF' -public func s020_______callVarArg() { - s010_hasVarArg(3) -} - -// Test emitSemanticStore. -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s030______assigninoutyyxz_xtlF : $@convention(thin) (@inout T, @in_guaranteed T) -> () { -// CHECK: bb0([[ARG0:%.*]] : $*T, [[ARG1:%.*]] : $T): -// CHECK: [[CPY:%.*]] = copy_value [[ARG1]] : $T -// CHECK: [[READ:%.*]] = begin_access [modify] [unknown] [[ARG0]] : $*T -// CHECK: assign [[CPY]] to [[READ]] : $*T -// CHECK-NOT: destroy_value [[ARG1]] : $T -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s030______assigninoutyyxz_xtlF' -func s030______assigninout(_ a: inout T, _ b: T) { - a = b -} - -// Test that we no longer use copy_addr or tuple_element_addr when copy by value is possible -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s040___tupleReturnIntyS2i_xt_tlF : $@convention(thin) (Int, @in_guaranteed T) -> Int { -// CHECK: bb0([[ARG0:%.*]] : $Int, [[ARG1:%.*]] : $T): -// CHECK: [[ARG1_COPY:%.*]] = copy_value [[ARG1]] -// CHECK: [[TPL:%.*]] = tuple ([[ARG0]] : $Int, [[ARG1_COPY]] : $T) -// CHECK: [[BORROWED_ARG1:%.*]] = begin_borrow [[TPL]] : $(Int, T) -// CHECK: [[CPY:%.*]] = copy_value [[BORROWED_ARG1]] : $(Int, T) -// CHECK: [[BORROWED_CPY:%.*]] = begin_borrow [[CPY]] -// CHECK: [[INT:%.*]] = tuple_extract [[BORROWED_CPY]] : $(Int, T), 0 -// CHECK: [[GEN:%.*]] = tuple_extract [[BORROWED_CPY]] : $(Int, T), 1 -// CHECK: [[COPY_GEN:%.*]] = copy_value [[GEN]] -// CHECK: destroy_value [[COPY_GEN]] -// CHECK: end_borrow [[BORROWED_CPY]] -// CHECK: destroy_value [[CPY]] -// CHECK: end_borrow [[BORROWED_ARG1]] : $(Int, T) -// CHECK: destroy_value [[TPL]] : $(Int, T) -// CHECK: return [[INT]] -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s040___tupleReturnIntyS2i_xt_tlF' -func s040___tupleReturnInt(_ x: (Int, T)) -> Int { - let y = x.0 - return y -} - -// Test returning an opaque tuple of tuples. -// --- -// CHECK-LABEL: sil hidden [noinline] @$s20opaque_values_silgen21s050______multiResultyx_x_xttxlF : $@convention(thin) (@in_guaranteed T) -> (@out T, @out T, @out T) { -// CHECK: bb0(%0 : $T): -// CHECK: %[[CP1:.*]] = copy_value %{{.*}} : $T -// CHECK: %[[CP2:.*]] = copy_value %{{.*}} : $T -// CHECK: %[[CP3:.*]] = copy_value %{{.*}} : $T -// CHECK-NOT: destroy_value %0 : $T -// CHECK: %[[TPL:.*]] = tuple (%[[CP1]] : $T, %[[CP2]] : $T, %[[CP3]] : $T) -// CHECK: return %[[TPL]] : $(T, T, T) -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s050______multiResultyx_x_xttxlF' -@inline(never) -func s050______multiResult(_ t: T) -> (T, (T, T)) { - return (t, (t, t)) -} - -// Test returning an opaque tuple of tuples as a concrete tuple. -// --- -// CHECK-LABEL: sil @$s20opaque_values_silgen21s060__callMultiResult1iSi_Si_SittSi_tF : $@convention(thin) (Int) -> (Int, Int, Int) { -// CHECK: bb0(%0 : $Int): -// CHECK: %[[FN:.*]] = function_ref @$s20opaque_values_silgen21s050______multiResultyx_x_xttxlF : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) -// CHECK: %[[TPL:.*]] = apply %[[FN]](%0) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) -// CHECK: %[[I1:.*]] = tuple_extract %[[TPL]] : $(Int, Int, Int), 0 -// CHECK: %[[I2:.*]] = tuple_extract %[[TPL]] : $(Int, Int, Int), 1 -// CHECK: %[[I3:.*]] = tuple_extract %[[TPL]] : $(Int, Int, Int), 2 -// CHECK: %[[R:.*]] = tuple (%[[I1]] : $Int, %[[I2]] : $Int, %[[I3]] : $Int) -// CHECK: return %[[R]] : $(Int, Int, Int) -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s060__callMultiResult1iSi_Si_SittSi_tF' -public func s060__callMultiResult(i: Int) -> (Int, (Int, Int)) { - return s050______multiResult(i) -} - -// SILGen, prepareArchetypeCallee. Materialize a -// non-class-constrainted self from a class-constrained archetype. -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s070__materializeSelf1tyx_tRlzCAA3FooRzlF : $@convention(thin) (@guaranteed T) -> () { -// CHECK: bb0([[ARG:%.*]] : $T): -// CHECK: [[WITNESS_METHOD:%.*]] = witness_method $T, #Foo.foo : (Self) -> () -> () : $@convention(witness_method: Foo) <τ_0_0 where τ_0_0 : Foo> (@in_guaranteed τ_0_0) -> () -// CHECK: apply [[WITNESS_METHOD]]([[ARG]]) : $@convention(witness_method: Foo) <τ_0_0 where τ_0_0 : Foo> (@in_guaranteed τ_0_0) -> () -// CHECK-NOT: destroy_value [[ARG]] : $T -// CHECK: return %{{[0-9]+}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s070__materializeSelf1tyx_tRlzCAA3FooRzlF' -func s070__materializeSelf(t: T) where T: AnyObject { - t.foo() -} - -// Test open existential with opaque values -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s080______________bar1pSiAA1P_p_tF : $@convention(thin) (@in_guaranteed P) -> Int { -// CHECK: bb0([[ARG:%.*]] : $P): -// CHECK: [[OPENED_ARG:%.*]] = open_existential_value [[ARG]] : $P to $@opened -// CHECK: [[WITNESS_FUNC:%.*]] = witness_method $@opened -// CHECK: [[RESULT:%.*]] = apply [[WITNESS_FUNC]]<{{.*}}>([[OPENED_ARG]]) : $@convention(witness_method: P) <τ_0_0 where τ_0_0 : P> (@in_guaranteed τ_0_0) -> Int -// CHECK-NOT: destroy_value [[ARG]] : $P -// CHECK: return [[RESULT]] : $Int -func s080______________bar(p: P) -> Int { - return p.x -} - -// Test OpaqueTypeLowering copyValue and destroyValue. -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s090___________calleryxxlF : $@convention(thin) (@in_guaranteed T) -> @out T { -// CHECK: bb0([[ARG:%.*]] : $T): -// CHECK-NOT: copy_value -// CHECK: [[RESULT:%.*]] = apply {{%.*}}([[ARG]]) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> @out τ_0_0 -// CHECK-NOT: destroy_value [[ARG]] : $T -// CHECK: return %{{.*}} : $T -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s090___________calleryxxlF' -func s090___________caller(_ t: T) -> T { - return s090___________caller(t) -} - -// Test a simple opaque parameter and return value. -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s100_________identityyxxlF : $@convention(thin) (@in_guaranteed T) -> @out T { -// CHECK: bb0([[ARG:%.*]] : $T): -// CHECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] : $T -// CHECK-NOT: destroy_value [[ARG]] : $T -// CHECK: return [[COPY_ARG]] : $T -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s100_________identityyxxlF' -func s100_________identity(_ t: T) -> T { - return t -} - -// Test a guaranteed opaque parameter. -// --- -// CHECK-LABEL: sil private [transparent] [thunk] @$s20opaque_values_silgen21s110___GuaranteedSelfVAA3FooA2aDP3fooyyFTW : $@convention(witness_method: Foo) (@in_guaranteed s110___GuaranteedSelf) -> () { -// CHECK: bb0(%0 : $s110___GuaranteedSelf): -// CHECK: %[[F:.*]] = function_ref @$s20opaque_values_silgen21s110___GuaranteedSelfV3fooyyF : $@convention(method) (s110___GuaranteedSelf) -> () -// CHECK: apply %[[F]](%0) : $@convention(method) (s110___GuaranteedSelf) -> () -// CHECK: return -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s110___GuaranteedSelfVAA3FooA2aDP3fooyyFTW' -struct s110___GuaranteedSelf : Foo { - func foo() {} -} - -// Tests a corner case wherein we used to do a temporary and return a pointer to T instead of T -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s120______returnValueyxxlF : $@convention(thin) (@in_guaranteed T) -> @out T { -// CHECK: bb0([[ARG:%.*]] : $T): -// CHECK: [[COPY_ARG1:%.*]] = copy_value [[ARG]] : $T -// CHECK: [[BORROWED_ARG2:%.*]] = begin_borrow [[COPY_ARG1]] -// CHECK: [[COPY_ARG2:%.*]] = copy_value [[BORROWED_ARG2]] : $T -// CHECK: end_borrow [[BORROWED_ARG2]] -// CHECK: return [[COPY_ARG2]] : $T -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s120______returnValueyxxlF' -func s120______returnValue(_ x: T) -> T { - let y = x - return y -} - -// Tests Optional initialization by value -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s130_____________wrapyxSgxlF : $@convention(thin) (@in_guaranteed T) -> @out Optional { -// CHECK: bb0([[ARG:%.*]] : $T): -// CHECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] : $T -// CHECK: [[OPTIONAL_ARG:%.*]] = enum $Optional, #Optional.some!enumelt, [[COPY_ARG]] : $T -// CHECK-NOT: destroy_value [[ARG]] : $T -// CHECK: return [[OPTIONAL_ARG]] : $Optional -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s130_____________wrapyxSgxlF' -func s130_____________wrap(_ x: T) -> T? { - return x +// CHECK-LABEL: sil [ossa] @$s20opaque_values_silgen10callVarArgyyF : $@convention(thin) () -> () { +// HECK: %[[APY:.*]] = apply %{{.*}}(%{{.*}}) : $@convention(thin) <τ_0_0> (Builtin.Word) -> (@owned Array<τ_0_0>, Builtin.RawPointer) +// HECK: %[[BRW:.*]] = begin_borrow %[[APY]] +// HECK: %[[TPL:.*]] = tuple_extract %[[BRW]] : $(Array, Builtin.RawPointer), 1 +// HECK: end_borrow %[[BRW]] : $(Array, Builtin.RawPointer) +// HECK: destroy_value %[[APY]] +// HECK: %[[PTR:.*]] = pointer_to_address %[[TPL]] : $Builtin.RawPointer to [strict] $*Any +// HECK: [[IOPAQUE:%.*]] = init_existential_value %{{.*}} : $Int, $Int, $Any +// HECK: store [[IOPAQUE]] to [init] %[[PTR]] : $*Any +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen10callVarArgyyF' +public func callVarArg() { + hasVarArg(3) } // Tests For-each statements // --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s140______forEachStmtyyF : $@convention(thin) () -> () { -// CHECK: bb0: -// CHECK: [[PROJ_BOX_ARG:%.*]] = project_box %{{.*}} : ${ var IndexingIterator> } -// CHECK: [[APPLY_ARG1:%.*]] = apply -// CHECK-NOT: alloc_stack $Int -// CHECK-NOT: store [[APPLY_ARG1]] to [trivial] -// CHECK-NOT: alloc_stack $Range -// CHECK-NOT: dealloc_stack -// CHECK: [[APPLY_ARG2:%.*]] = apply %{{.*}}> -// CHECK: store [[APPLY_ARG2]] to [trivial] [[PROJ_BOX_ARG]] -// CHECK: br bb1 -// CHECK: bb1: +// CHECK-LABEL: sil hidden [ossa] @$s20opaque_values_silgen11forEachStmtyyF : $@convention(thin) () -> () { +// HECK: bb0: +// HECK: [[PROJ_BOX_ARG:%.*]] = project_box %{{.*}} : ${ var IndexingIterator> } +// HECK: [[APPLY_ARG1:%.*]] = apply +// HECK-NOT: alloc_stack $Int +// HECK-NOT: store [[APPLY_ARG1]] to [trivial] +// HECK-NOT: alloc_stack $Range +// HECK-NOT: dealloc_stack +// HECK: [[APPLY_ARG2:%.*]] = apply %{{.*}}> +// HECK: store [[APPLY_ARG2]] to [trivial] [[PROJ_BOX_ARG]] +// HECK: br bb1 +// HECK: bb1: // CHECK-NOT: alloc_stack $Optional -// CHECK: [[APPLY_ARG3:%.*]] = apply %{{.*}}> +// HECK: [[APPLY_ARG3:%.*]] = apply %{{.*}}> // CHECK-NOT: dealloc_stack -// CHECK: switch_enum [[APPLY_ARG3]] -// CHECK: bb2: -// CHECK: br bb3 -// CHECK: bb3: -// CHECK: return %{{.*}} : $() -// CHECK: bb4([[ENUM_ARG:%.*]] : $Int): +// HECK: switch_enum [[APPLY_ARG3]] +// HECK: bb2: +// HECK: br bb3 +// HECK: bb3: +// HECK: return %{{.*}} : $() +// HECK: bb4([[ENUM_ARG:%.*]] : $Int): // CHECK-NOT: unchecked_enum_data -// CHECK: br bb1 -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s140______forEachStmtyyF' -func s140______forEachStmt() { +// HECK: br bb1 +// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen11forEachStmtyyF' +func forEachStmt() { for _ in 1..<42 { } } -func s150___________anyArg(_: Any) {} - -// Tests init of opaque existentials -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s160_______callAnyArgyyF : $@convention(thin) () -> () { -// CHECK: bb0: -// CHECK: [[INT_TYPE:%.*]] = metatype $@thin Int.Type -// CHECK: [[INT_LIT:%.*]] = integer_literal $Builtin.IntLiteral, 42 -// CHECK: [[INT_ARG:%.*]] = apply %{{.*}}([[INT_LIT]], [[INT_TYPE]]) : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int -// CHECK: [[INIT_OPAQUE:%.*]] = init_existential_value [[INT_ARG]] : $Int, $Int, $Any -// CHECK: apply %{{.*}}([[INIT_OPAQUE]]) : $@convention(thin) (@in_guaranteed Any) -> () -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s160_______callAnyArgyyF' -func s160_______callAnyArg() { - s150___________anyArg(42) -} - -// Tests unconditional_checked_cast for opaque values -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s170____force_convertxylF : $@convention(thin) () -> @out T { -// CHECK: bb0: -// CHECK-NOT: alloc_stack -// CHECK: [[INT_TYPE:%.*]] = metatype $@thin Int.Type -// CHECK: [[INT_LIT:%.*]] = integer_literal $Builtin.IntLiteral, 42 -// CHECK: [[INT_ARG:%.*]] = apply %{{.*}}([[INT_LIT]], [[INT_TYPE]]) : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int -// CHECK: [[INT_CAST:%.*]] = unconditional_checked_cast_value [[INT_ARG]] : $Int to $T -// CHECK: [[CAST_BORROW:%.*]] = begin_borrow [[INT_CAST]] : $T -// CHECK: [[RETURN_VAL:%.*]] = copy_value [[CAST_BORROW]] : $T -// CHECK: end_borrow [[CAST_BORROW]] : $T -// CHECK: destroy_value [[INT_CAST]] : $T -// CHECK: return [[RETURN_VAL]] : $T -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s170____force_convertxylF' -func s170____force_convert() -> T { - let x : T = 42 as! T - return x -} - -// Tests supporting function for s190___return_foo_var - cast and return of protocol -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s180_______return_fooAA3Foo_pyF : $@convention(thin) () -> @out Foo { -// CHECK: bb0: -// CHECK: [[INT_LIT:%.*]] = integer_literal $Builtin.IntLiteral, 42 -// CHECK: [[INT_ARG:%.*]] = apply %{{.*}}([[INT_LIT]], [[INT_TYPE]]) : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int -// CHECK: [[INT_CAST:%.*]] = unconditional_checked_cast_value [[INT_ARG]] : $Int to $Foo -// CHECK: return [[INT_CAST]] : $Foo -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s180_______return_fooAA3Foo_pyF' -func s180_______return_foo() -> Foo { - return 42 as! Foo -} -var foo_var : Foo = s180_______return_foo() - -// Tests return of global variables by doing a load of copy -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s190___return_foo_varAA3Foo_pyF : $@convention(thin) () -> @out Foo { -// CHECK: bb0: -// CHECK: [[GLOBAL:%.*]] = global_addr {{.*}} : $*Foo -// CHECK: [[READ:%.*]] = begin_access [read] [dynamic] [[GLOBAL]] : $*Foo -// CHECK: [[LOAD_GLOBAL:%.*]] = load [copy] [[READ]] : $*Foo -// CHECK: return [[LOAD_GLOBAL]] : $Foo -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s190___return_foo_varAA3Foo_pyF' -func s190___return_foo_var() -> Foo { - return foo_var -} - -// Tests deinit of opaque existentials -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s200______use_foo_varyyF : $@convention(thin) () -> () { -// CHECK: bb0: -// CHECK: [[GLOBAL:%.*]] = global_addr {{.*}} : $*Foo -// CHECK: [[READ:%.*]] = begin_access [read] [dynamic] [[GLOBAL]] : $*Foo -// CHECK: [[LOAD_GLOBAL:%.*]] = load [copy] [[READ]] : $*Foo -// CHECK: [[BORROW:%.*]] = begin_borrow [[LOAD_GLOBAL]] : $Foo -// CHECK: [[OPEN_VAR:%.*]] = open_existential_value [[BORROW]] : $Foo -// CHECK: [[WITNESS:%.*]] = witness_method $@opened -// CHECK: apply [[WITNESS]] -// CHECK: end_borrow [[BORROW]] -// CHECK: destroy_value [[LOAD_GLOBAL]] -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s200______use_foo_varyyF' -func s200______use_foo_var() { - foo_var.foo() -} - -// Tests composition erasure of opaque existentials + copy into of opaques -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s210______compErasureys5Error_psAC_AA3FoopF : $@convention(thin) (@in_guaranteed Error & Foo) -> @owned Error { -// CHECK: bb0([[ARG:%.*]] : $Error & Foo): -// CHECK: [[OPAQUE_ARG:%.*]] = open_existential_value [[ARG]] : $Error & Foo to $@opened({{.*}}) Error & Foo -// CHECK: [[EXIST_BOX:%.*]] = alloc_existential_box $Error, $@opened({{.*}}) Error & Foo -// CHECK: [[PROJ_BOX:%.*]] = project_existential_box $@opened({{.*}}) Error & Foo in [[EXIST_BOX]] -// CHECK: [[COPY_OPAQUE:%.*]] = copy_value [[OPAQUE_ARG]] : $@opened({{.*}}) Error & Foo -// CHECK: store [[COPY_OPAQUE]] to [init] [[PROJ_BOX]] : $*@opened({{.*}}) Error & Foo -// CHECK-NOT: destroy_value [[ARG]] : $Error & Foo -// CHECK: return [[EXIST_BOX]] : $Error -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s210______compErasureys5Error_psAC_AA3FoopF' -func s210______compErasure(_ x: Foo & Error) -> Error { - return x -} - // Tests that existential boxes can contain opaque types // --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s220_____openExistBoxySSs5Error_pF : $@convention(thin) (@guaranteed Error) -> @owned String { -// CHECK: bb0([[ARG:%.*]] : $Error): -// CHECK: [[OPAQUE_ARG:%.*]] = open_existential_box_value [[ARG]] : $Error to $@opened({{.*}}) Error -// CHECK: [[ALLOC_OPEN:%.*]] = alloc_stack $@opened({{.*}}) Error -// CHECK: store_borrow [[OPAQUE_ARG]] to [[ALLOC_OPEN]] -// CHECK: dealloc_stack [[ALLOC_OPEN]] +// CHECK-LABEL: sil hidden [ossa] @$s20opaque_values_silgen12openExistBoxySSs5Error_pF : $@convention(thin) (@guaranteed Error) -> @owned String { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $Error): +// HECK: [[OPAQUE_ARG:%.*]] = open_existential_box_value [[ARG]] : $Error to $@opened({{.*}}) Error +// HECK: [[ALLOC_OPEN:%.*]] = alloc_stack $@opened({{.*}}) Error +// HECK: store_borrow [[OPAQUE_ARG]] to [[ALLOC_OPEN]] +// HECK: dealloc_stack [[ALLOC_OPEN]] // CHECK-NOT: destroy_value [[ARG]] : $Error -// CHECK: return {{.*}} : $String -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s220_____openExistBoxySSs5Error_pF' -func s220_____openExistBox(_ x: Error) -> String { +// HECK: return {{.*}} : $String +// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen12openExistBoxySSs5Error_pF' +func openExistBox(_ x: Error) -> String { return x._domain } // Tests conditional value casts and correspondingly generated reabstraction thunk // --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s230______condFromAnyyyypF : $@convention(thin) (@in_guaranteed Any) -> () { -// CHECK: bb0([[ARG:%.*]] : $Any): -// CHECK: [[COPY__ARG:%.*]] = copy_value [[ARG]] -// CHECK: checked_cast_value_br [[COPY__ARG]] : $Any to $@callee_guaranteed (@in_guaranteed (Int, (Int, (Int, Int)), Int)) -> @out (Int, (Int, (Int, Int)), Int), bb2, bb1 -// CHECK: bb2([[THUNK_PARAM:%.*]] : $@callee_guaranteed (@in_guaranteed (Int, (Int, (Int, Int)), Int)) -> @out (Int, (Int, (Int, Int)), Int)): -// CHECK: [[THUNK_REF:%.*]] = function_ref @{{.*}} : $@convention(thin) (Int, Int, Int, Int, Int, @guaranteed @callee_guaranteed (@in_guaranteed (Int, (Int, (Int, Int)), Int)) -> @out (Int, (Int, (Int, Int)), Int)) -> (Int, Int, Int, Int, Int) -// CHECK: partial_apply [callee_guaranteed] [[THUNK_REF]]([[THUNK_PARAM]]) -// CHECK: bb6: -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s230______condFromAnyyyypF' -func s230______condFromAny(_ x: Any) { +// CHECK-LABEL: sil hidden [ossa] @$s20opaque_values_silgen11condFromAnyyyypF : $@convention(thin) (@in_guaranteed Any) -> () { +// HECK: bb0([[ARG:%.*]] : $Any): +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] +// HECK: checked_cast_br [[COPY_ARG]] : $Any to $@callee_guaranteed (@in_guaranteed (Int, (Int, (Int, Int)), Int)) -> @out (Int, (Int, (Int, Int)), Int), bb2, bb1 +// HECK: bb2([[THUNK_PARAM:%.*]] : $@callee_guaranteed (@in_guaranteed (Int, (Int, (Int, Int)), Int)) -> @out (Int, (Int, (Int, Int)), Int)): +// HECK: [[THUNK_REF:%.*]] = function_ref @{{.*}} : $@convention(thin) (Int, Int, Int, Int, Int, @guaranteed @callee_guaranteed (@in_guaranteed (Int, (Int, (Int, Int)), Int)) -> @out (Int, (Int, (Int, Int)), Int)) -> (Int, Int, Int, Int, Int) +// HECK: partial_apply [callee_guaranteed] [[THUNK_REF]]([[THUNK_PARAM]]) +// HECK: bb6: +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen11condFromAnyyyypF' +func condFromAny(_ x: Any) { if let f = x as? (Int, (Int, (Int, Int)), Int) -> (Int, (Int, (Int, Int)), Int) { _ = f(24, (4,(2, 42)), 42) } } -// Tests LValue of error types / existential boxes -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s240_____propOfLValueySSs5Error_pF : $@convention(thin) (@guaranteed Error) -> @owned String { -// CHECK: bb0([[ARG:%.*]] : $Error): -// CHECK: [[ALLOC_OF_BOX:%.*]] = alloc_box ${ var Error } -// CHECK: [[PROJ_BOX:%.*]] = project_box [[ALLOC_OF_BOX]] -// CHECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] -// CHECK: store [[COPY_ARG]] to [init] [[PROJ_BOX]] -// CHECK: [[READ:%.*]] = begin_access [read] [unknown] [[PROJ_BOX]] : $*Error -// CHECK: [[LOAD_BOX:%.*]] = load [copy] [[READ]] -// CHECK: [[OPAQUE_ARG:%.*]] = open_existential_box [[LOAD_BOX]] : $Error to $*@opened({{.*}}) Error -// CHECK: [[LOAD_OPAQUE:%.*]] = load [copy] [[OPAQUE_ARG]] -// CHECK: [[ALLOC_OPEN:%.*]] = alloc_stack $@opened({{.*}}) Error -// CHECK: store [[LOAD_OPAQUE]] to [init] [[ALLOC_OPEN]] -// CHECK: [[RET_VAL:%.*]] = apply {{.*}}<@opened({{.*}}) Error>([[ALLOC_OPEN]]) -// CHECK: return [[RET_VAL]] : $String -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s240_____propOfLValueySSs5Error_pF' -func s240_____propOfLValue(_ x: Error) -> String { - var x = x - return x._domain -} - -// Tests Implicit Value Construction under Opaque value mode -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s250_________testBoxTyyF : $@convention(thin) () -> () { -// CHECK: bb0: -// CHECK: [[BOX_MTYPE:%.*]] = metatype $@thin Box.Type -// CHECK: [[MTYPE:%.*]] = metatype $@thin Int.Type -// CHECK: [[INTLIT:%.*]] = integer_literal $Builtin.IntLiteral, 42 -// CHECK: [[AINT:%.*]] = apply {{.*}}([[INTLIT]], [[MTYPE]]) : $@convention(method) (Builtin.IntLiteral, @thin Int.Type) -> Int -// CHECK: apply {{.*}}([[AINT]], [[BOX_MTYPE]]) : $@convention(method) <τ_0_0> (@in τ_0_0, @thin Box<τ_0_0>.Type) -> @out Box<τ_0_0> -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s250_________testBoxTyyF' -func s250_________testBoxT() { - let _ = Box(t: 42) -} - -// Tests Address only enums -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s260_______AOnly_enumyyAA17AddressOnlyStructVF : $@convention(thin) (AddressOnlyStruct) -> () { -// CHECK: bb0([[ARG:%.*]] : $AddressOnlyStruct): -// CHECK: [[MTYPE1:%.*]] = metatype $@thin AddressOnlyEnum.Type -// CHECK: [[APPLY1:%.*]] = apply {{.*}}([[MTYPE1]]) : $@convention(thin) (@thin AddressOnlyEnum.Type) -> @owned @callee_guaranteed (@in_guaranteed EmptyP) -> @out AddressOnlyEnum -// CHECK: destroy_value [[APPLY1]] -// CHECK: [[MTYPE2:%.*]] = metatype $@thin AddressOnlyEnum.Type -// CHECK: [[ENUM1:%.*]] = enum $AddressOnlyEnum, #AddressOnlyEnum.nought!enumelt -// CHECK: [[MTYPE3:%.*]] = metatype $@thin AddressOnlyEnum.Type -// CHECK: [[INIT_OPAQUE:%.*]] = init_existential_value [[ARG]] : $AddressOnlyStruct, $AddressOnlyStruct, $EmptyP -// CHECK: [[ENUM2:%.*]] = enum $AddressOnlyEnum, #AddressOnlyEnum.mere!enumelt, [[INIT_OPAQUE]] : $EmptyP -// CHECK: destroy_value [[ENUM2]] -// CHECK: [[MTYPE4:%.*]] = metatype $@thin AddressOnlyEnum.Type -// CHECK: [[ENUM3:%.*]] = enum $AddressOnlyEnum, #AddressOnlyEnum.phantom!enumelt, [[ARG]] : $AddressOnlyStruct -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s260_______AOnly_enumyyAA17AddressOnlyStructVF' -func s260_______AOnly_enum(_ s: AddressOnlyStruct) { - _ = AddressOnlyEnum.mere - - _ = AddressOnlyEnum.nought - - _ = AddressOnlyEnum.mere(s) - - _ = AddressOnlyEnum.phantom(s) -} - -// Tests InjectOptional for opaque value types + conversion of opaque structs -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s270_convOptAnyStructyyAA0gH0VADSgcF : $@convention(thin) (@guaranteed @callee_guaranteed (@in_guaranteed Optional) -> @out AnyStruct) -> () { -// CHECK: bb0([[ARG:%.*]] : $@callee_guaranteed (@in_guaranteed Optional) -> @out AnyStruct): -// CHECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] -// CHECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}([[COPY_ARG]]) : $@convention(thin) (@in_guaranteed Optional, @guaranteed @callee_guaranteed (@in_guaranteed Optional) -> @out AnyStruct) -> @out Optional -// CHECK: destroy_value [[PAPPLY]] : $@callee_guaranteed (@in_guaranteed Optional) -> @out Optional -// CHECK-NOT: destroy_value [[ARG]] : $@callee_guaranteed (@in_guaranteed Optional) -> @out AnyStruct -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s270_convOptAnyStructyyAA0gH0VADSgcF' -func s270_convOptAnyStruct(_ a1: @escaping (AnyStruct?) -> AnyStruct) { - let _: (AnyStruct?) -> AnyStruct? = a1 -} - -// Tests conversion between existential types -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s280_convExistTrivialyyAA0G6StructVAA1P_pcF : $@convention(thin) (@guaranteed @callee_guaranteed (@in_guaranteed P) -> TrivialStruct) -> () { -// CHECK: bb0([[ARG:%.*]] : $@callee_guaranteed (@in_guaranteed P) -> TrivialStruct): -// CHECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] -// CHECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}([[COPY_ARG]]) : $@convention(thin) (@in_guaranteed P2, @guaranteed @callee_guaranteed (@in_guaranteed P) -> TrivialStruct) -> @out P2 -// CHECK: destroy_value [[PAPPLY]] : $@callee_guaranteed (@in_guaranteed P2) -> @out P2 -// CHECK-NOT: destroy_value [[ARG]] -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s280_convExistTrivialyyAA0G6StructVAA1P_pcF' -func s280_convExistTrivial(_ s: @escaping (P) -> TrivialStruct) { - let _: (P2) -> P2 = s -} - -// Tests conversion between existential types - optionals case -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s290_convOptExistTrivyyAA13TrivialStructVAA1P_pSgcF : $@convention(thin) (@guaranteed @callee_guaranteed (@in_guaranteed Optional

) -> TrivialStruct) -> () { -// CHECK: bb0([[ARG:%.*]] : $@callee_guaranteed (@in_guaranteed Optional

) -> TrivialStruct): -// CHECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] -// CHECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}([[COPY_ARG]]) : $@convention(thin) (Optional, @guaranteed @callee_guaranteed (@in_guaranteed Optional

) -> TrivialStruct) -> @out P2 -// CHECK: destroy_value [[PAPPLY]] : $@callee_guaranteed (Optional) -> @out P2 -// CHECK-NOT: destroy_value [[ARG]] -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s290_convOptExistTrivyyAA13TrivialStructVAA1P_pSgcF' -func s290_convOptExistTriv(_ s: @escaping (P?) -> TrivialStruct) { - let _: (TrivialStruct?) -> P2 = s -} - -// Tests corner-case: reabstraction of an empty tuple to any -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s300__convETupleToAnyyyyycF : $@convention(thin) (@guaranteed @callee_guaranteed () -> ()) -> () { -// CHECK: bb0([[ARG:%.*]] : $@callee_guaranteed () -> ()): -// CHECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] -// CHECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}([[COPY_ARG]]) : $@convention(thin) (@guaranteed @callee_guaranteed () -> ()) -> @out Any -// CHECK: destroy_value [[PAPPLY]] : $@callee_guaranteed () -> @out Any -// CHECK-NOT: destroy_value [[ARG]] -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s300__convETupleToAnyyyyycF' -func s300__convETupleToAny(_ t: @escaping () -> ()) { - let _: () -> Any = t -} - -// Tests corner-case: reabstraction of a non-empty tuple to any -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s310__convIntTupleAnyyySi_SitycF : $@convention(thin) (@guaranteed @callee_guaranteed () -> (Int, Int)) -> () { -// CHECK: bb0([[ARG:%.*]] : $@callee_guaranteed () -> (Int, Int)): -// CHECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] -// CHECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}([[COPY_ARG]]) : $@convention(thin) (@guaranteed @callee_guaranteed () -> (Int, Int)) -> @out Any -// CHECK: destroy_value [[PAPPLY]] : $@callee_guaranteed () -> @out Any -// CHECK-NOT: destroy_value [[ARG]] -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s310__convIntTupleAnyyySi_SitycF' -func s310__convIntTupleAny(_ t: @escaping () -> (Int, Int)) { - let _: () -> Any = t -} - -// Tests translating and imploding into Any under opaque value mode -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s320__transImplodeAnyyyyypcF : $@convention(thin) (@guaranteed @callee_guaranteed (@in_guaranteed Any) -> ()) -> () { -// CHECK: bb0([[ARG:%.*]] : $@callee_guaranteed (@in_guaranteed Any) -> ()): -// CHECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] -// CHECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}([[COPY_ARG]]) : $@convention(thin) (Int, Int, @guaranteed @callee_guaranteed (@in_guaranteed Any) -> ()) -> () -// CHECK: destroy_value [[PAPPLY]] : $@callee_guaranteed (Int, Int) -> () -// CHECK-NOT: destroy_value [[ARG]] -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s320__transImplodeAnyyyyypcF' -func s320__transImplodeAny(_ t: @escaping (Any) -> ()) { - let _: ((Int, Int)) -> () = t -} - -// Tests support for address only let closures under opaque value mode - they are not by-address anymore -// --- -// CHECK-LABEL: sil private @$s20opaque_values_silgen21s330___addrLetClosureyxxlFxyXEfU_xyXEfU_ : $@convention(thin) (@in_guaranteed T) -> @out T { -// CHECK: bb0([[ARG:%.*]] : $T): -// CHECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] : $T -// CHECK: return [[COPY_ARG]] : $T -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s330___addrLetClosureyxxlFxyXEfU_xyXEfU_' -func s330___addrLetClosure(_ x:T) -> T { - return { { x }() }() -} - -// Tests support for capture of a mutable opaque value type +// Tests support for if statements for opaque value(s) under new mode // --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s340_______captureBoxyyF : $@convention(thin) () -> () { -// CHECK: bb0: -// CHECK: [[ALLOC_OF_BOX:%.*]] = alloc_box ${ var EmptyP }, var, name "mutableAddressOnly" -// CHECK: [[PROJ_BOX:%.*]] = project_box [[ALLOC_OF_BOX]] -// CHECK: [[APPLY_FOR_BOX:%.*]] = apply %{{.*}}(%{{.*}}) : $@convention(method) (@thin AddressOnlyStruct.Type) -> AddressOnlyStruct -// CHECK: [[INIT_OPAQUE:%.*]] = init_existential_value [[APPLY_FOR_BOX]] : $AddressOnlyStruct, $AddressOnlyStruct, $EmptyP -// CHECK: store [[INIT_OPAQUE]] to [init] [[PROJ_BOX]] : $*EmptyP -// CHECK: [[BORROW_BOX:%.*]] = begin_borrow [[ALLOC_OF_BOX]] : ${ var EmptyP } -// CHECK: mark_function_escape [[PROJ_BOX]] : $*EmptyP -// CHECK: apply %{{.*}}([[BORROW_BOX]]) : $@convention(thin) (@guaranteed { var EmptyP }) -> () -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s340_______captureBoxyyF' -func s340_______captureBox() { - var mutableAddressOnly: EmptyP = AddressOnlyStruct() - - func captureEverything() { - _ = s100_________identity((mutableAddressOnly)) - } +protocol EmptyP {} - captureEverything() -} +struct AddressOnlyStruct : EmptyP {} -// Tests support for if statements for opaque value(s) under new mode -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s350_______addrOnlyIf1xAA6EmptyP_pSb_tF : $@convention(thin) (Bool) -> @out EmptyP { -// CHECK: bb0([[ARG:%.*]] : $Bool): -// CHECK: [[ALLOC_OF_BOX:%.*]] = alloc_box ${ var EmptyP }, var -// CHECK: [[PROJ_BOX:%.*]] = project_box [[ALLOC_OF_BOX]] -// CHECK: [[APPLY_FOR_BOX:%.*]] = apply %{{.*}}(%{{.*}}) : $@convention(method) (@thin AddressOnlyStruct.Type) -> AddressOnlyStruct -// CHECK: [[INIT_OPAQUE:%.*]] = init_existential_value [[APPLY_FOR_BOX]] : $AddressOnlyStruct, $AddressOnlyStruct, $EmptyP -// CHECK: store [[INIT_OPAQUE]] to [init] [[PROJ_BOX]] : $*EmptyP -// CHECK: [[APPLY_FOR_BRANCH:%.*]] = apply %{{.*}}([[ARG]]) : $@convention(method) (Bool) -> Builtin.Int1 -// CHECK: cond_br [[APPLY_FOR_BRANCH]], bb2, bb1 -// CHECK: bb1: -// CHECK: [[READ:%.*]] = begin_access [read] [unknown] [[PROJ_BOX]] : $*EmptyP -// CHECK: [[RETVAL1:%.*]] = load [copy] [[READ]] : $*EmptyP -// CHECK: br bb3([[RETVAL1]] : $EmptyP) -// CHECK: bb2: -// CHECK: [[READ:%.*]] = begin_access [read] [unknown] [[PROJ_BOX]] : $*EmptyP -// CHECK: [[RETVAL2:%.*]] = load [copy] [[READ]] : $*EmptyP -// CHECK: br bb3([[RETVAL2]] : $EmptyP) -// CHECK: bb3([[RETVAL:%.*]] : $EmptyP): -// CHECK: destroy_value [[ALLOC_OF_BOX]] -// CHECK: return [[RETVAL]] : $EmptyP -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s350_______addrOnlyIf1xAA6EmptyP_pSb_tF' -func s350_______addrOnlyIf(x: Bool) -> EmptyP { +// CHECK-LABEL: sil hidden [ossa] @$s20opaque_values_silgen10addrOnlyIf1xAA6EmptyP_pSb_tF : $@convention(thin) (Bool) -> @out EmptyP { +// HECK: bb0([[ARG:%.*]] : $Bool): +// HECK: [[ALLOC_OF_BOX:%.*]] = alloc_box ${ var EmptyP }, var +// HECK: [[PROJ_BOX:%.*]] = project_box [[ALLOC_OF_BOX]] +// HECK: [[APPLY_FOR_BOX:%.*]] = apply %{{.*}}(%{{.*}}) : $@convention(method) (@thin AddressOnlyStruct.Type) -> AddressOnlyStruct +// HECK: [[INIT_OPAQUE:%.*]] = init_existential_value [[APPLY_FOR_BOX]] : $AddressOnlyStruct, $AddressOnlyStruct, $EmptyP +// HECK: store [[INIT_OPAQUE]] to [init] [[PROJ_BOX]] : $*EmptyP +// HECK: [[APPLY_FOR_BRANCH:%.*]] = apply %{{.*}}([[ARG]]) : $@convention(method) (Bool) -> Builtin.Int1 +// HECK: cond_br [[APPLY_FOR_BRANCH]], bb2, bb1 +// HECK: bb1: +// HECK: [[READ:%.*]] = begin_access [read] [unknown] [[PROJ_BOX]] : $*EmptyP +// HECK: [[RETVAL1:%.*]] = load [copy] [[READ]] : $*EmptyP +// HECK: br bb3([[RETVAL1]] : $EmptyP) +// HECK: bb2: +// HECK: [[READ:%.*]] = begin_access [read] [unknown] [[PROJ_BOX]] : $*EmptyP +// HECK: [[RETVAL2:%.*]] = load [copy] [[READ]] : $*EmptyP +// HECK: br bb3([[RETVAL2]] : $EmptyP) +// HECK: bb3([[RETVAL:%.*]] : $EmptyP): +// HECK: destroy_value [[ALLOC_OF_BOX]] +// HECK: return [[RETVAL]] : $EmptyP +// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen10addrOnlyIf1xAA6EmptyP_pSb_tF' +func addrOnlyIf(x: Bool) -> EmptyP { var a : EmptyP = AddressOnlyStruct() - + genericInout(&a) return x ? a : a } -// Tests support for guards and indirect enums for opaque values -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s360________guardEnumyyAA08IndirectF0OyxGlF : $@convention(thin) (@guaranteed IndirectEnum) -> () { -// CHECK: bb0([[ARG:%.*]] : $IndirectEnum): -// CHECK: [[COPY__ARG:%.*]] = copy_value [[ARG]] -// CHECK: switch_enum [[COPY__ARG]] : $IndirectEnum, case #IndirectEnum.Node!enumelt: [[NODE_BB:bb[0-9]+]], case #IndirectEnum.Nil!enumelt: [[NIL_BB:bb[0-9]+]] -// -// CHECK: [[NIL_BB]]: -// CHECK: br [[NIL_TRAMPOLINE:bb[0-9]+]] -// -// CHECK: [[NIL_TRAMPOLINE]]: -// CHECK: br [[EPILOG_BB:bb[0-9]+]] -// -// CHECK: [[NODE_BB]]([[EARG:%.*]] : $<τ_0_0> { var τ_0_0 } ): -// CHECK: [[PROJ_BOX:%.*]] = project_box [[EARG]] -// CHECK: [[LOAD_BOX:%.*]] = load [take] [[PROJ_BOX]] : $*T -// CHECK: [[COPY_BOX:%.*]] = copy_value [[LOAD_BOX]] : $T -// CHECK: destroy_value [[EARG]] -// CHECK: br [[CONT_BB:bb[0-9]+]] -// -// CHECK: [[CONT_BB]]: -// CHECK: destroy_value [[COPY_BOX]] -// CHECK: br [[EPILOG_BB]] -// -// CHECK: [[EPILOG_BB]]: -// CHECK-NOT: destroy_value [[ARG]] -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s360________guardEnumyyAA08IndirectF0OyxGlF' -func s360________guardEnum(_ e: IndirectEnum) { - do { - guard case .Node(let x) = e else { return } - _ = x - } -} - -// Tests contextual init() of opaque value types -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s370_____optToOptCastyxSgAClF : $@convention(thin) (@in_guaranteed Optional) -> @out Optional { -// CHECK: bb0([[ARG:%.*]] : $Optional): -// CHECK: [[COPY__ARG:%.*]] = copy_value [[ARG]] -// CHECK-NOT: destroy_value [[ARG]] -// CHECK: return [[COPY__ARG]] : $Optional -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s370_____optToOptCastyxSgAClF' -func s370_____optToOptCast(_ x : T!) -> T? { - return x -} - -// Tests casting optional opaques to optional opaques -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s380___contextualInityySiSgF : $@convention(thin) (Optional) -> () { -// CHECK: bb0([[ARG:%.*]] : $Optional): -// CHECK: [[ALLOC_OF_BOX:%.*]] = alloc_box ${ var Optional }, var -// CHECK: [[PROJ_BOX:%.*]] = project_box [[ALLOC_OF_BOX]] -// CHECK: store [[ARG]] to [trivial] [[PROJ_BOX]] : $*Optional -// CHECK: destroy_value [[ALLOC_OF_BOX]] -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s380___contextualInityySiSgF' -func s380___contextualInit(_ a : Int?) { - var x: Int! = a - _ = x -} - -// Tests opaque call result types -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s390___addrCallResultyyxycSglF : $@convention(thin) (@guaranteed Optional<@callee_guaranteed () -> @out T>) -> () { -// CHECK: bb0([[ARG:%.*]] : $Optional<@callee_guaranteed () -> @out T>): -// CHECK: [[ALLOC_OF_BOX:%.*]] = alloc_box $<τ_0_0> { var Optional<τ_0_0> } -// CHECK: [[PROJ_BOX:%.*]] = project_box [[ALLOC_OF_BOX]] -// CHECK: [[COPY__ARG:%.*]] = copy_value [[ARG]] -// CHECK: [[SENUM:%.*]] = select_enum [[COPY__ARG]] -// CHECK: cond_br [[SENUM]], bb3, bb1 -// CHECK: bb1: -// CHECK: br bb2 -// CHECK: bb2: -// CHECK: [[ONONE:%.*]] = enum $Optional, #Optional.none!enumelt -// CHECK: br bb4([[ONONE]] : $Optional) -// CHECK: bb4(%{{.*}} : $Optional): -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s390___addrCallResultyyxycSglF' -func s390___addrCallResult(_ f: (() -> T)?) { - var x = f?() - _ = x -} - -// Tests reabstraction / partial apply of protocols under opaque value mode -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s400______maybeCloneP1cyAA8Clonable_p_tF : $@convention(thin) (@in_guaranteed Clonable) -> () { -// CHECK: bb0([[ARG:%.*]] : $Clonable): -// CHECK: [[OPEN_ARG:%.*]] = open_existential_value [[ARG]] : $Clonable -// CHECK: [[APPLY_OPAQUE:%.*]] = apply %{{.*}}<@opened({{.*}}) Clonable>([[OPEN_ARG]]) : $@convention(thin) <τ_0_0 where τ_0_0 : Clonable> (@in_guaranteed τ_0_0) -> @owned @callee_guaranteed () -> @out Optional<τ_0_0> -// CHECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}<@opened({{.*}}) Clonable>([[APPLY_OPAQUE]]) : $@convention(thin) <τ_0_0 where τ_0_0 : Clonable> (@guaranteed @callee_guaranteed () -> @out Optional<τ_0_0>) -> @out Optional -// CHECK-NOT: destroy_value [[ARG]] -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s400______maybeCloneP1cyAA8Clonable_p_tF' -func s400______maybeCloneP(c: Clonable) { - let _: () -> Clonable? = c.maybeClone -} - -// Tests global opaque values / subscript rvalues -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s410__globalRvalueGetyS2iF : $@convention(thin) (Int) -> Int { -// CHECK: bb0([[ARG:%.*]] : $Int): -// CHECK: [[GLOBAL_ADDR:%.*]] = global_addr @$s20opaque_values_silgen16subscriptableGetAA013SubscriptableE0_pvp : $*SubscriptableGet -// CHECK: [[READ:%.*]] = begin_access [read] [dynamic] [[GLOBAL_ADDR]] : $*SubscriptableGet -// CHECK: [[OPEN_ARG:%.*]] = open_existential_addr immutable_access [[READ]] : $*SubscriptableGet to $*@opened -// CHECK: [[GET_OPAQUE:%.*]] = load [copy] [[OPEN_ARG]] : $*@opened -// CHECK: [[RETVAL:%.*]] = apply %{{.*}}<@opened({{.*}}) SubscriptableGet>([[ARG]], [[GET_OPAQUE]]) : $@convention(witness_method: SubscriptableGet) <τ_0_0 where τ_0_0 : SubscriptableGet> (Int, @in_guaranteed τ_0_0) -> Int -// CHECK: destroy_value [[GET_OPAQUE]] -// CHECK: return [[RETVAL]] : $Int -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s410__globalRvalueGetyS2iF' -func s410__globalRvalueGet(_ i : Int) -> Int { - return subscriptableGet[i] -} - -// Tests global opaque values / subscript lvalues -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s420__globalLvalueGetyS2iF : $@convention(thin) (Int) -> Int { -// CHECK: bb0([[ARG:%.*]] : $Int): -// CHECK: [[GLOBAL_ADDR:%.*]] = global_addr @$s20opaque_values_silgen19subscriptableGetSetAA013SubscriptableeF0_pvp : $*SubscriptableGetSet -// CHECK: [[READ:%.*]] = begin_access [read] [dynamic] [[GLOBAL_ADDR]] : $*SubscriptableGetSet -// CHECK: [[OPEN_ARG:%.*]] = open_existential_addr immutable_access [[READ]] : $*SubscriptableGetSet to $*@opened -// CHECK: [[GET_OPAQUE:%.*]] = load [copy] [[OPEN_ARG]] : $*@opened -// CHECK: [[RETVAL:%.*]] = apply %{{.*}}<@opened({{.*}}) SubscriptableGetSet>([[ARG]], [[GET_OPAQUE]]) : $@convention(witness_method: SubscriptableGetSet) <τ_0_0 where τ_0_0 : SubscriptableGetSet> (Int, @in_guaranteed τ_0_0) -> Int -// CHECK: destroy_value [[GET_OPAQUE]] -// CHECK: return [[RETVAL]] : $Int -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s420__globalLvalueGetyS2iF' -func s420__globalLvalueGet(_ i : Int) -> Int { - return subscriptableGetSet[i] -} - -// Tests tuple transformation -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s430_callUnreachableF1tyx_tlF : $@convention(thin) (@in_guaranteed T) -> () { -// CHECK: bb0([[ARG:%.*]] : $T): -// CHECK: [[APPLY_T:%.*]] = apply %{{.*}}<((T) -> (), T)>() : $@convention(thin) <τ_0_0> () -> @out Optional<(Int, τ_0_0)> -// CHECK: switch_enum [[APPLY_T]] : $Optional<(Int, (@callee_guaranteed (@in_guaranteed T) -> @out (), T))>, case #Optional.some!enumelt: bb2, case #Optional.none!enumelt: bb1 -// CHECK: bb2([[ENUMARG:%.*]] : $(Int, (@callee_guaranteed (@in_guaranteed T) -> @out (), T))): -// CHECK: ([[TELEM0:%.*]], [[TELEM1:%.*]]) = destructure_tuple [[ENUMARG]] : $(Int, (@callee_guaranteed (@in_guaranteed T) -> @out (), T)) -// CHECK: ([[TELEM10:%.*]], [[TELEM11:%.*]]) = destructure_tuple [[TELEM1]] : $(@callee_guaranteed (@in_guaranteed T) -> @out (), T) -// CHECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}([[TELEM10]]) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0, @guaranteed @callee_guaranteed (@in_guaranteed τ_0_0) -> @out ()) -> () -// CHECK: [[NEWT0:%.*]] = tuple ([[PAPPLY]] : $@callee_guaranteed (@in_guaranteed T) -> (), [[TELEM11]] : $T) -// CHECK: [[NEWT1:%.*]] = tuple ([[TELEM0]] : $Int, [[NEWT0]] : $(@callee_guaranteed (@in_guaranteed T) -> (), T)) -// CHECK: [[NEWENUM:%.*]] = enum $Optional<(Int, (@callee_guaranteed (@in_guaranteed T) -> (), T))>, #Optional.some!enumelt, [[NEWT1]] : $(Int, (@callee_guaranteed (@in_guaranteed T) -> (), T)) -// CHECK: br bb3([[NEWENUM]] : $Optional<(Int, (@callee_guaranteed (@in_guaranteed T) -> (), T))>) -// CHECK: bb3([[ENUMIN:%.*]] : $Optional<(Int, (@callee_guaranteed (@in_guaranteed T) -> (), T))>): -// CHECK: destroy_value [[ENUMIN]] -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s430_callUnreachableF1tyx_tlF' -func s430_callUnreachableF(t: T) { - let _: (Int, ((T) -> (), T))? = unreachableF() -} - -// Further testing for conditional checked cast under opaque value mode - make sure we don't create a buffer for results +// Tests LValue of error types / existential boxes // --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s440__cleanupEmissionyyxlF : $@convention(thin) (@in_guaranteed T) -> () { -// CHECK: bb0([[ARG:%.*]] : $T): -// CHECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] -// CHECK: checked_cast_value_br [[COPY_ARG]] : $T to $EmptyP, bb2, bb1 -// -// CHECK: bb2([[PTYPE:%.*]] : $EmptyP): -// CHECK: [[PSOME:%.*]] = enum $Optional, #Optional.some!enumelt, [[PTYPE]] : $EmptyP -// CHECK: br bb3([[PSOME]] : $Optional) -// -// CHECK: bb3([[ENUMRES:%.*]] : $Optional): -// CHECK: switch_enum [[ENUMRES]] : $Optional, case #Optional.some!enumelt: [[SOME_BB:bb[0-9]+]], case #Optional.none!enumelt: [[NONE_BB:bb[0-9]+]] -// -// CHECK: [[NONE_BB]]: -// CHECK: br [[NONE_TRAMPOLINE:bb[0-9]+]] -// -// CHECK: [[NONE_TRAMPOLINE]]: -// CHECK: br [[EPILOG_BB:bb[0-9]+]] -// -// CHECK: [[SOME_BB]]([[ENUMRES2:%.*]] : $EmptyP): -// CHECK: br [[CONT_BB:bb[0-9]+]] -// -// CHECK: [[CONT_BB]]: -// CHECK: destroy_value [[ENUMRES2]] -// CHECK: br [[EPILOG_BB]] -// -// CHECK: [[EPILOG_BB]]: -// CHECK-NOT: destroy_value [[ARG]] -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s440__cleanupEmissionyyxlF' -func s440__cleanupEmission(_ x: T) { - guard let x2 = x as? EmptyP else { return } - _ = x2 +// CHECK-LABEL: sil hidden [ossa] @$s20opaque_values_silgen12propOfLValueySSs5Error_pF : $@convention(thin) (@guaranteed Error) -> @owned String { +// HECK: bb0([[ARG:%.*]] : $Error): +// HECK: [[ALLOC_OF_BOX:%.*]] = alloc_box ${ var Error } +// HECK: [[PROJ_BOX:%.*]] = project_box [[ALLOC_OF_BOX]] +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] +// HECK: store [[COPY_ARG]] to [init] [[PROJ_BOX]] +// HECK: [[READ:%.*]] = begin_access [read] [unknown] [[PROJ_BOX]] : $*Error +// HECK: [[LOAD_BOX:%.*]] = load [copy] [[READ]] +// HECK: [[OPAQUE_ARG:%.*]] = open_existential_box [[LOAD_BOX]] : $Error to $*@opened({{.*}}) Error +// HECK: [[LOAD_OPAQUE:%.*]] = load [copy] [[OPAQUE_ARG]] +// HECK: [[ALLOC_OPEN:%.*]] = alloc_stack $@opened({{.*}}) Error +// HECK: store [[LOAD_OPAQUE]] to [init] [[ALLOC_OPEN]] +// HECK: [[RET_VAL:%.*]] = apply {{.*}}<@opened({{.*}}) Error>([[ALLOC_OPEN]]) +// HECK: return [[RET_VAL]] : $String +// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen12propOfLValueySSs5Error_pF' +func propOfLValue(_ x: Error) -> String { + var x = x + genericInout(&x) + return x._domain } // Test SILGenBuilder.loadCopy(). // --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s450__________lastValyxxd_tlF : $@convention(thin) (@guaranteed Array) -> @out T -// CHECK: [[LOAD:%.*]] = load [copy] %{{.*}} : $*T -// CHECK: return [[LOAD]] : $T -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s450__________lastValyxxd_tlF' -func s450__________lastVal(_ rest: T...) -> T { +// CHECK-LABEL: sil hidden [ossa] @$s20opaque_values_silgen7lastValyxxd_tlF : $@convention(thin) (@guaranteed Array) -> @out T { +// HECK: [[LOAD:%.*]] = load [copy] %{{.*}} : $*T +// HECK: return [[LOAD]] : $T +// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen7lastValyxxd_tlF' +func lastVal(_ rest: T...) -> T { var minValue: T for value in rest { minValue = value @@ -919,269 +161,76 @@ func s450__________lastVal(_ rest: T...) -> T { // Test SILGenFunction::emitPointerToPointer. // --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s460______________foo1pSRyxGSPyxG_tlF : $@convention(thin) (UnsafePointer) -> UnsafeBufferPointer { -// CHECK: [[F:%.*]] = function_ref @$ss017_convertPointerToB8Argumentyq_xs01_B0RzsABR_r0_lF : $@convention(thin) <τ_0_0, τ_0_1 where τ_0_0 : _Pointer, τ_0_1 : _Pointer> (@in_guaranteed τ_0_0) -> @out τ_0_1 -// CHECK: apply [[F]], UnsafePointer>(%0) : $@convention(thin) <τ_0_0, τ_0_1 where τ_0_0 : _Pointer, τ_0_1 : _Pointer> (@in_guaranteed τ_0_0) -> @out τ_0_1 -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s460______________foo1pSRyxGSPyxG_tlF' -func s460______________foo(p: UnsafePointer) -> UnsafeBufferPointer { +// CHECK-LABEL: sil hidden [ossa] @$s20opaque_values_silgen3foo1pSRyxGSPyxG_tlF : $@convention(thin) (UnsafePointer) -> UnsafeBufferPointer { +// HECK: [[F:%.*]] = function_ref @$sconvertPointerToB8Argumentyq_xB0RzsABR_r0_lF : $@convention(thin) <τ_0_0, τ_0_1 where τ_0_0 : _Pointer, τ_0_1 : _Pointer> (@in_guaranteed τ_0_0) -> @out τ_0_1 +// HECK: apply [[F]], UnsafePointer>(%0) : $@convention(thin) <τ_0_0, τ_0_1 where τ_0_0 : _Pointer, τ_0_1 : _Pointer> (@in_guaranteed τ_0_0) -> @out τ_0_1 +// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen3foo1pSRyxGSPyxG_tlF' +func foo(p: UnsafePointer) -> UnsafeBufferPointer { return UnsafeBufferPointer(start: p, count: 1) } -// Test emitNativeToCBridgedNonoptionalValue. -// --- -// CHECK-objc-LABEL: sil hidden @$s20opaque_values_silgen21s470________nativeToC7fromAnyyXlyp_tF : $@convention(thin) (@in_guaranteed Any) -> @owned AnyObject { -// CHECK-objc: bb0(%0 : $Any): -// CHECK-objc: [[BORROW:%.*]] = begin_borrow %0 : $Any -// CHECK-objc: [[SRC:%.*]] = copy_value [[BORROW]] : $Any -// CHECK-objc: [[OPEN:%.*]] = open_existential_opaque [[SRC]] : $Any to $@opened -// CHECK-objc: [[COPY:%.*]] = copy_value [[OPEN]] : $@opened -// CHECK-objc: [[F:%.*]] = function_ref @$ss27_bridgeAnythingToObjectiveCyyXlxlF : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> @owned AnyObject -// CHECK-objc: [[RET:%.*]] = apply [[F]]<@opened("{{.*}}") Any>([[COPY]]) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> @owned AnyObject -// CHECK-objc: destroy_value [[SRC]] : $Any -// CHECK-objc: destroy_value %0 : $Any -// CHECK-objc: return [[RET]] : $AnyObject -// CHECK-objc-LABEL: } // end sil function '$s20opaque_values_silgen21s470________nativeToC7fromAnyyXlyp_tF' -#if _runtime(_ObjC) -func s470________nativeToC(fromAny any: Any) -> AnyObject { - return any as AnyObject -} -#endif - -// Test emitOpenExistential. +// Test SILBuilder.createLoadBorrow. // --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s480_________getError04someF0yps0F0_p_tF : $@convention(thin) (@guaranteed Error) -> @out Any { -// CHECK: bb0([[ARG:%.*]] : $Error): -// CHECK: [[VAL:%.*]] = open_existential_box_value [[ARG]] : $Error to $@opened("{{.*}}") Error -// CHECK: [[COPY:%.*]] = copy_value [[VAL]] : $@opened("{{.*}}") Error -// CHECK: [[ANY:%.*]] = init_existential_value [[COPY]] : $@opened("{{.*}}") Error, $@opened("{{.*}}") Error, $Any -// CHECK-NOT: destroy_value [[ARG]] : $Error -// CHECK: return [[ANY]] : $Any -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s480_________getError04someF0yps0F0_p_tF' -func s480_________getError(someError: Error) -> Any { - return someError +protocol FooP { + func foo() } -// Test SILBuilder.createLoadBorrow. -// --- -// CHECK-LABEL: sil private @$s20opaque_values_silgen21s490_______loadBorrowyyF3FooL_V3foo3pos7ElementQzSg5IndexQz_tF : $@convention(method) (@in_guaranteed Elements.Index, @inout Foo) -> @out Optional { -// CHECK: bb0([[ARG0:%.*]] : $Elements.Index, [[ARG1:%.*]] : $*Foo): -// CHECK: [[READ:%.*]] = begin_access [read] [unknown] [[ARG1]] : $*Foo -// CHECK: [[LOAD:%.*]] = load [copy] [[READ]] : $*Foo -// CHECK: end_access [[READ]] : $*Foo -// CHECK: [[BORROW_LOAD:%.*]] = begin_borrow [[LOAD]] -// CHECK: [[EXTRACT:%.*]] = struct_extract [[BORROW_LOAD]] : $Foo, #Foo._elements -// CHECK: [[COPYELT:%.*]] = copy_value [[EXTRACT]] : $Elements -// CHECK: [[COPYIDX:%.*]] = copy_value [[ARG0]] : $Elements.Index -// CHECK: [[WT:%.*]] = witness_method $Elements, #Collection.subscript!getter : (Self) -> (Self.Index) -> Self.Element : $@convention(witness_method: Collection) <τ_0_0 where τ_0_0 : Collection> (@in_guaranteed τ_0_0.Index, @in_guaranteed τ_0_0) -> @out τ_0_0.Element -// CHECK: [[RESULT:%.*]] = apply [[WT]]([[COPYIDX]], [[COPYELT]]) : $@convention(witness_method: Collection) <τ_0_0 where τ_0_0 : Collection> (@in_guaranteed τ_0_0.Index, @in_guaranteed τ_0_0) -> @out τ_0_0.Element -// CHECK: destroy_value [[COPYELT]] : $Elements -// CHECK: [[ENUM_RESULT:%.*]] = enum $Optional, #Optional.some!enumelt, [[RESULT]] : $Elements.Element -// CHECK: destroy_value [[LOAD]] +// CHECK-LABEL: sil private [ossa] @$s20opaque_values_silgen10loadBorrowyyF4FooPL_V3foo3pos7ElementQzSg5IndexQz_tF : $@convention(method) (@in_guaranteed Elements.Index, @inout FooP) -> @out Optional { +// CHECK: bb0([[ARG0:%.*]] : @guaranteed $Elements.Index, [[ARG1:%.*]] : $*FooP): +// HECK: [[READ:%.*]] = begin_access [read] [unknown] [[ARG1]] : $*FooP +// HECK: [[LOAD:%.*]] = load [copy] [[READ]] : $*FooP +// HECK: end_access [[READ]] : $*FooP +// HECK: [[BORROW_LOAD:%.*]] = begin_borrow [[LOAD]] +// HECK: [[EXTRACT:%.*]] = struct_extract [[BORROW_LOAD]] : $FooP, #FooP._elements +// HECK: [[COPYELT:%.*]] = copy_value [[EXTRACT]] : $Elements +// HECK: [[COPYIDX:%.*]] = copy_value [[ARG0]] : $Elements.Index +// HECK: [[WT:%.*]] = witness_method $Elements, #Collection.subscript!getter : (Self) -> (Self.Index) -> Self.Element : $@convention(witness_method: Collection) <τ_0_0 where τ_0_0 : Collection> (@in_guaranteed τ_0_0.Index, @in_guaranteed τ_0_0) -> @out τ_0_0.Element +// HECK: [[RESULT:%.*]] = apply [[WT]]([[COPYIDX]], [[COPYELT]]) : $@convention(witness_method: Collection) <τ_0_0 where τ_0_0 : Collection> (@in_guaranteed τ_0_0.Index, @in_guaranteed τ_0_0) -> @out τ_0_0.Element +// HECK: destroy_value [[COPYELT]] : $Elements +// HECK: [[ENUM_RESULT:%.*]] = enum $Optional, #Optional.some!enumelt, [[RESULT]] : $Elements.Element +// HECK: destroy_value [[LOAD]] // CHECK-NOT: destroy_value [[ARG0]] : $Elements.Index -// CHECK: return [[ENUM_RESULT]] : $Optional -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s490_______loadBorrowyyF3FooL_V3foo3pos7ElementQzSg5IndexQz_tF' - -func s490_______loadBorrow() { - struct Foo { +// HECK: return [[ENUM_RESULT]] : $Optional +// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen10loadBorrowyyF4FooPL_V3foo3pos7ElementQzSg5IndexQz_tF' +func loadBorrow() { + struct FooP { internal let _elements: Elements public mutating func foo(pos: Elements.Index) -> Elements.Element? { return _elements[pos] } } - var foo = Foo(_elements: []) + var foo = FooP(_elements: []) _ = foo.foo(pos: 1) } -protocol ConvertibleToP { - func asP() -> P -} - -// Test visitBindOptionalExpr -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s500_______getAnyHashyAA1P_pSgAA14ConvertibleToP_pSgF : $@convention(thin) (@in_guaranteed Optional) -> @out Optional

{ -// CHECK: bb0(%0 : $Optional): -// CHECK: [[COPY:%.*]] = copy_value [[ARG]] : $Optional -// CHECK: [[DATA:%.*]] = unchecked_enum_data [[COPY]] : $Optional, #Optional.some!enumelt -// CHECK: [[BORROW_DATA:%.*]] = begin_borrow [[DATA]] : $ConvertibleToP -// CHECK: [[VAL:%.*]] = open_existential_value [[BORROW_DATA]] : $ConvertibleToP to $@opened("{{.*}}") ConvertibleToP -// CHECK: [[WT:%.*]] = witness_method $@opened("{{.*}}") ConvertibleToP, #ConvertibleToP.asP : (Self) -> () -> P, [[VAL]] : $@opened("{{.*}}") ConvertibleToP : $@convention(witness_method: ConvertibleToP) <τ_0_0 where τ_0_0 : ConvertibleToP> (@in_guaranteed τ_0_0) -> @out P -// CHECK: [[AS_P:%.*]] = apply [[WT]]<@opened("{{.*}}") ConvertibleToP>([[VAL]]) : $@convention(witness_method: ConvertibleToP) <τ_0_0 where τ_0_0 : ConvertibleToP> (@in_guaranteed τ_0_0) -> @out P -// CHECK: [[ENUM:%.*]] = enum $Optional

, #Optional.some!enumelt, [[AS_P]] : $P -// CHECK: destroy_value [[DATA]] : $ConvertibleToP -// CHECK: br bb{{.*}}([[ENUM]] : $Optional

) -// CHECK: // end sil function '$s20opaque_values_silgen21s500_______getAnyHashyAA1P_pSgAA14ConvertibleToP_pSgF' -func s500_______getAnyHash(_ value: ConvertibleToP?) -> P? { - return value?.asP() -} - -public protocol FooP { - func foo() -> Self -} - -// Test emitting a protocol witness for a method (with @in_guaranteed self) on a dependent generic type. -// --- -// CHECK-LABEL: sil private [transparent] [thunk] @$s20opaque_values_silgen21s510_______OpaqueSelfVyxGAA4FooPA2aEP3fooxyFTW : $@convention(witness_method: FooP) <τ_0_0> (@in_guaranteed s510_______OpaqueSelf<τ_0_0>) -> @out s510_______OpaqueSelf<τ_0_0> { -// CHECK: bb0(%0 : $s510_______OpaqueSelf<τ_0_0>): -// CHECK: [[FN:%.*]] = function_ref @$s20opaque_values_silgen21s510_______OpaqueSelfV3fooACyxGyF : $@convention(method) <τ_0_0> (@in_guaranteed s510_______OpaqueSelf<τ_0_0>) -> @out s510_______OpaqueSelf<τ_0_0> -// CHECK: [[RESULT:%.*]] = apply [[FN]]<τ_0_0>(%0) : $@convention(method) <τ_0_0> (@in_guaranteed s510_______OpaqueSelf<τ_0_0>) -> @out s510_______OpaqueSelf<τ_0_0> -// CHECK: return [[RESULT]] : $s510_______OpaqueSelf<τ_0_0> -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s510_______OpaqueSelfVyxGAA4FooPA2aEP3fooxyFTW' -struct s510_______OpaqueSelf : FooP { - var x: Base - - func foo() -> s510_______OpaqueSelf { - return self - } -} - -// Tests conditional value casts and correspondingly generated reabstraction thunk, with types -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen21s999_____condTFromAnyyyyp_xtlF : $@convention(thin) (@in_guaranteed Any, @in_guaranteed T) -> () { -// CHECK: bb0([[ARG0:%.*]] : $Any, [[ARG1:%.*]] : $T): -// CHECK: [[COPY__ARG:%.*]] = copy_value [[ARG]] -// CHECK: checked_cast_value_br [[COPY__ARG]] : $Any to $@callee_guaranteed (@in_guaranteed (Int, T)) -> @out (Int, T), bb2, bb1 -// CHECK: bb2([[THUNK_PARAM:%.*]] : $@callee_guaranteed (@in_guaranteed (Int, T)) -> @out (Int, T)): -// CHECK: [[THUNK_REF:%.*]] = function_ref @{{.*}} : $@convention(thin) <τ_0_0> (Int, @in_guaranteed τ_0_0, @guaranteed @callee_guaranteed (@in_guaranteed (Int, τ_0_0)) -> @out (Int, τ_0_0)) -> (Int, @out τ_0_0) -// CHECK: partial_apply [callee_guaranteed] [[THUNK_REF]]([[THUNK_PARAM]]) -// CHECK: bb6: -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen21s999_____condTFromAnyyyyp_xtlF' -func s999_____condTFromAny(_ x: Any, _ y: T) { - if let f = x as? (Int, T) -> (Int, T) { - _ = f(42, y) - } -} - -// Make sure that we insert a destroy of the box even though we used an Int type. -// CHECK-LABEL: sil @$s20opaque_values_silgen22s020_______assignToVaryyF : $@convention(thin) () -> () { -// CHECK: bb0: -// CHECK: [[Y_BOX:%.*]] = alloc_box ${ var Int }, var, name "y" -// CHECK: [[PROJECT_Y_BOX:%.*]] = project_box [[Y_BOX]] : ${ var Int }, 0 -// CHECK: [[X_BOX:%.*]] = alloc_box ${ var Any }, var, name "x" -// CHECK: [[PROJECT_X_BOX:%.*]] = project_box [[X_BOX]] : ${ var Any }, 0 -// CHECK: [[ACCESS_PROJECT_Y_BOX:%.*]] = begin_access [read] [unknown] [[PROJECT_Y_BOX]] : $*Int -// CHECK: [[Y:%.*]] = load [trivial] [[ACCESS_PROJECT_Y_BOX]] : $*Int -// CHECK: [[Y_ANY_FOR_X:%.*]] = init_existential_value [[Y]] : $Int, $Int, $Any -// CHECK: store [[Y_ANY_FOR_X]] to [init] [[PROJECT_X_BOX]] -// CHECK: [[ACCESS_PROJECT_Y_BOX:%.*]] = begin_access [read] [unknown] [[PROJECT_Y_BOX]] : $*Int -// CHECK: [[Y:%.*]] = load [trivial] [[ACCESS_PROJECT_Y_BOX]] : $*Int -// CHECK: [[Y_ANY_FOR_Z:%.*]] = init_existential_value [[Y]] : $Int, $Int, $Any -// CHECK: destroy_value [[Y_ANY_FOR_Z]] -// CEHCK: destroy_value [[X_BOX]] -// CHECK: destroy_value [[Y_BOX]] -// CHECK: } // end sil function '$s20opaque_values_silgen22s020_______assignToVaryyF' -public func s020_______assignToVar() { - var y: Int = 3 - var x: Any = y - let z: Any = y -} - -// s250_________testBoxT continued Test Implicit Value Construction under Opaque value mode -// --- -// CHECK-LABEL: sil hidden @$s20opaque_values_silgen3BoxV1tACyxGx_tcfC : $@convention(method) (@in T, @thin Box.Type) -> @out Box { -// CHECK: bb0([[ARG0:%.*]] : $T, [[ARG1:%.*]] : $@thin Box.Type): -// CHECK: [[RETVAL:%.*]] = struct $Box ([[ARG0]] : $T) -// CHECK: return [[RETVAL]] : $Box -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen3BoxV1tACyxGx_tcfC' - -// s270_convOptAnyStruct continued Test: reabstraction thunk helper -// --- -// CHECK-LABEL: sil shared [transparent] [serialized] [reabstraction_thunk] @$s20opaque_values_silgen9AnyStructVSgACIegnr_A2DIegnr_TR : $@convention(thin) (@in_guaranteed Optional, @guaranteed @callee_guaranteed (@in_guaranteed Optional) -> @out AnyStruct) -> @out Optional { -// CHECK: bb0([[ARG0:%.*]] : $Optional, [[ARG1:%.*]] : $@callee_guaranteed (@in_guaranteed Optional) -> @out AnyStruct): -// CHECK: [[APPLYARG:%.*]] = apply [[ARG1]]([[ARG0]]) : $@callee_guaranteed (@in_guaranteed Optional) -> @out AnyStruct -// CHECK: [[RETVAL:%.*]] = enum $Optional, #Optional.some!enumelt, [[APPLYARG]] : $AnyStruct -// CHECK: return [[RETVAL]] : $Optional -// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen9AnyStructVSgACIegnr_A2DIegnr_TR' - -// s300__convETupleToAny continued Test: reabstraction of () to Any -// --- -// CHECK-LABEL: sil shared [transparent] [serialized] [reabstraction_thunk] @$sIeg_ypIegr_TR : $@convention(thin) (@guaranteed @callee_guaranteed () -> ()) -> @out Any { -// CHECK: bb0([[ARG:%.*]] : $@callee_guaranteed () -> ()): -// CHECK: [[ASTACK:%.*]] = alloc_stack $Any -// CHECK: [[IADDR:%.*]] = init_existential_addr [[ASTACK]] : $*Any, $() -// CHECK: [[APPLYARG:%.*]] = apply [[ARG]]() : $@callee_guaranteed () -> () -// CHECK: [[LOAD_EXIST:%.*]] = load [trivial] [[IADDR]] : $*() -// CHECK: [[RETVAL:%.*]] = init_existential_value [[LOAD_EXIST]] : $(), $(), $Any -// CHECK: return [[RETVAL]] : $Any -// CHECK-LABEL: } // end sil function '$sIeg_ypIegr_TR' - -// s310_convIntTupleAny continued Test: reabstraction of non-empty tuple to Any -// --- -// CHECK-LABEL: sil shared [transparent] [serialized] [reabstraction_thunk] @$sS2iIegdd_ypIegr_TR : $@convention(thin) (@guaranteed @callee_guaranteed () -> (Int, Int)) -> @out Any { -// CHECK: bb0([[ARG:%.*]] : $@callee_guaranteed () -> (Int, Int)): -// CHECK: [[ASTACK:%.*]] = alloc_stack $Any -// CHECK: [[IADDR:%.*]] = init_existential_addr [[ASTACK]] : $*Any, $(Int, Int) -// CHECK: [[TADDR0:%.*]] = tuple_element_addr [[IADDR]] : $*(Int, Int), 0 -// CHECK: [[TADDR1:%.*]] = tuple_element_addr [[IADDR]] : $*(Int, Int), 1 -// CHECK: [[APPLYARG:%.*]] = apply [[ARG]]() : $@callee_guaranteed () -> (Int, Int) -// CHECK: [[TEXTRACT0:%.*]] = tuple_extract [[APPLYARG]] : $(Int, Int), 0 -// CHECK: [[TEXTRACT1:%.*]] = tuple_extract [[APPLYARG]] : $(Int, Int), 1 -// CHECK: store [[TEXTRACT0]] to [trivial] [[TADDR0]] : $*Int -// CHECK: store [[TEXTRACT1]] to [trivial] [[TADDR1]] : $*Int -// CHECK: [[LOAD_EXIST:%.*]] = load [trivial] [[IADDR]] : $*(Int, Int) -// CHECK: [[RETVAL:%.*]] = init_existential_value [[LOAD_EXIST]] : $(Int, Int), $(Int, Int), $Any -// CHECK: dealloc_stack [[ASTACK]] : $*Any -// CHECK: return [[RETVAL]] : $Any -// CHECK-LABEL: } // end sil function '$sS2iIegdd_ypIegr_TR' - - -// CHECK-LABEL: sil shared [transparent] [serialized] [reabstraction_thunk] @{{.*}} : $@convention(thin) (Int, Int, Int, Int, Int, @guaranteed @callee_guaranteed (@in_guaranteed (Int, (Int, (Int, Int)), Int)) -> @out (Int, (Int, (Int, Int)), Int)) -> (Int, Int, Int, Int, Int) -// CHECK: bb0([[ARG0:%.*]] : $Int, [[ARG1:%.*]] : $Int, [[ARG2:%.*]] : $Int, [[ARG3:%.*]] : $Int, [[ARG4:%.*]] : $Int, [[ARG5:%.*]] : $@callee_guaranteed (@in_guaranteed (Int, (Int, (Int, Int)), Int)) -> @out (Int, (Int, (Int, Int)), Int)): -// CHECK: [[TUPLE_TO_APPLY0:%.*]] = tuple ([[ARG2]] : $Int, [[ARG3]] : $Int) -// CHECK: [[TUPLE_TO_APPLY1:%.*]] = tuple ([[ARG1]] : $Int, [[TUPLE_TO_APPLY0]] : $(Int, Int)) -// CHECK: [[TUPLE_TO_APPLY2:%.*]] = tuple ([[ARG0]] : $Int, [[TUPLE_TO_APPLY1]] : $(Int, (Int, Int)), [[ARG4]] : $Int) -// CHECK: [[TUPLE_APPLY:%.*]] = apply [[ARG5]]([[TUPLE_TO_APPLY2]]) : $@callee_guaranteed (@in_guaranteed (Int, (Int, (Int, Int)), Int)) -> @out (Int, (Int, (Int, Int)), Int) -// CHECK: [[RET_VAL0:%.*]] = tuple_extract [[TUPLE_APPLY]] : $(Int, (Int, (Int, Int)), Int), 0 -// CHECK: [[TUPLE_EXTRACT1:%.*]] = tuple_extract [[TUPLE_APPLY]] : $(Int, (Int, (Int, Int)), Int), 1 -// CHECK: [[RET_VAL1:%.*]] = tuple_extract [[TUPLE_EXTRACT1]] : $(Int, (Int, Int)), 0 -// CHECK: [[TUPLE_EXTRACT2:%.*]] = tuple_extract [[TUPLE_EXTRACT1]] : $(Int, (Int, Int)), 1 -// CHECK: [[RET_VAL2:%.*]] = tuple_extract [[TUPLE_EXTRACT2]] : $(Int, Int), 0 -// CHECK: [[RET_VAL3:%.*]] = tuple_extract [[TUPLE_EXTRACT2]] : $(Int, Int), 1 -// CHECK: [[RET_VAL4:%.*]] = tuple_extract [[TUPLE_APPLY]] : $(Int, (Int, (Int, Int)), Int), 2 -// CHECK: [[RET_VAL_TUPLE:%.*]] = tuple ([[RET_VAL0]] : $Int, [[RET_VAL1]] : $Int, [[RET_VAL2]] : $Int, [[RET_VAL3]] : $Int, [[RET_VAL4]] : $Int) -// CHECK: return [[RET_VAL_TUPLE]] : $(Int, Int, Int, Int, Int) -// CHECK-LABEL: } // end sil function '{{.*}}' - -// CHECK-LABEL: sil shared [transparent] [serialized] [reabstraction_thunk] @{{.*}} : $@convention(thin) (Int, @in_guaranteed T, @guaranteed @callee_guaranteed (@in_guaranteed (Int, T)) -> @out (Int, T)) -> (Int, @out T) { -// CHECK: bb0([[ARG0:%.*]] : $Int, [[ARG1:%.*]] : $T, [[ARG2:%.*]] : $@callee_guaranteed (@in_guaranteed (Int, T)) -> @out (Int, T)): -// CHECK: [[TUPLE_TO_APPLY:%.*]] = tuple ([[ARG0]] : $Int, [[ARG1]] : $T) -// CHECK: [[TUPLE_APPLY:%.*]] = apply [[ARG2]]([[TUPLE_TO_APPLY]]) : $@callee_guaranteed (@in_guaranteed (Int, T)) -> @out (Int, T) -// CHECK: [[TUPLE_BORROW:%.*]] = begin_borrow [[TUPLE_APPLY]] : $(Int, T) -// CHECK: [[RET_VAL0:%.*]] = tuple_extract [[TUPLE_BORROW]] : $(Int, T), 0 -// CHECK: [[TUPLE_EXTRACT:%.*]] = tuple_extract [[TUPLE_BORROW]] : $(Int, T), 1 -// CHECK: [[RET_VAL1:%.*]] = copy_value [[TUPLE_EXTRACT]] : $T -// CHECK: end_borrow [[TUPLE_BORROW]] : $(Int, T) -// CHECK: destroy_value [[TUPLE_APPLY]] : $(Int, T) -// CHECK: [[RET_VAL_TUPLE:%.*]] = tuple ([[RET_VAL0]] : $Int, [[RET_VAL1]] : $T) -// CHECK: return [[RET_VAL_TUPLE]] : $(Int, T) -// CHECK-LABEL: } // end sil function '{{.*}}' - // Tests LogicalPathComponent's writeback for opaque value types // --- -// CHECK-LABEL: sil @$sSD20opaque_values_silgenE22inoutAccessOfSubscript3keyyq__tF : $@convention(method) (@in_guaranteed Value, @inout Dictionary) -> () { -// CHECK: bb0([[ARG0:%.*]] : $Value, [[ARG1:%.*]] : $*Dictionary): -// CHECK: [[WRITE:%.*]] = begin_access [modify] [unknown] [[ARG1]] : $*Dictionary -// CHECK: [[OPTIONAL_ALLOC:%.*]] = alloc_stack $Optional -// CHECK: switch_enum_addr [[OPTIONAL_ALLOC]] : $*Optional, case #Optional.some!enumelt: bb2, case #Optional.none!enumelt: bb1 -// CHECK: bb2: -// CHECK: [[OPTIONAL_LOAD:%.*]] = load [take] [[OPTIONAL_ALLOC]] : $*Optional -// CHECK: apply {{.*}}([[OPTIONAL_LOAD]], {{.*}}, [[WRITE]]) : $@convention(method) <τ_0_0, τ_0_1 where τ_0_0 : Hashable> (@in Optional<τ_0_1>, @in τ_0_1, @inout Dictionary<τ_0_0, τ_0_1>) -> () -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$sSD20opaque_values_silgenE22inoutAccessOfSubscript3keyyq__tF' +// Dictionary.subscript.getter +// CHECK-LABEL: sil [always_inline] [ossa] @$sSD20opaque_values_silgenEyq_Sgq_cig : $@convention(method) (@in_guaranteed Value, @guaranteed Dictionary) -> @out Optional { +// HECK: bb0([[ARG0:%.*]] : $Value, [[ARG1:%.*]] : $*Dictionary): +// HECK: [[WRITE:%.*]] = begin_access [modify] [unknown] [[ARG1]] : $*Dictionary +// HECK: [[OPTIONAL_ALLOC:%.*]] = alloc_stack $Optional +// HECK: switch_enum_addr [[OPTIONAL_ALLOC]] : $*Optional, case #Optional.some!enumelt: bb2, case #Optional.none!enumelt: bb1 +// HECK: bb2: +// HECK: [[OPTIONAL_LOAD:%.*]] = load [take] [[OPTIONAL_ALLOC]] : $*Optional +// HECK: apply {{.*}}([[OPTIONAL_LOAD]], {{.*}}, [[WRITE]]) : $@convention(method) <τ_0_0, τ_0_1 where τ_0_0 : Hashable> (@in Optional<τ_0_1>, @in τ_0_1, @inout Dictionary<τ_0_0, τ_0_1>) -> () +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$sSD20opaque_values_silgenEyq_Sgq_cig' // Tests materializeForSet's createSetterCallback for opaque values // --- -// CHECK-LABEL: sil shared [transparent] [serialized] @$sSD20opaque_values_silgenEyq_Sgq_cimytfU_ : $@convention(method) (Builtin.RawPointer, @inout Builtin.UnsafeValueBuffer, @inout Dictionary, @thick Dictionary.Type) -> () { -// CHECK: bb0([[ARG0:%.*]] : $Builtin.RawPointer, [[ARG1:%.*]] : $*Builtin.UnsafeValueBuffer, [[ARG2:%.*]] : $*Dictionary, [[ARG3:%.*]] : $@thick Dictionary.Type): -// CHECK: [[PROJ_VAL1:%.*]] = project_value_buffer $Value in [[ARG1]] : $*Builtin.UnsafeValueBuffer -// CHECK: [[LOAD_VAL1:%.*]] = load [take] [[PROJ_VAL1]] : $*Value -// CHECK: [[ADDR_VAL0:%.*]] = pointer_to_address [[ARG0]] : $Builtin.RawPointer to [strict] $*Optional -// CHECK: [[LOAD_VAL0:%.*]] = load [take] [[ADDR_VAL0]] : $*Optional -// CHECK: apply {{.*}}([[LOAD_VAL0]], [[LOAD_VAL1]], [[ARG2]]) : $@convention(method) <τ_0_0, τ_0_1 where τ_0_0 : Hashable> (@in Optional<τ_0_1>, @in τ_0_1, @inout Dictionary<τ_0_0, τ_0_1>) -> () -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$sSD20opaque_values_silgenEyq_Sgq_cimytfU_' +// Dictionary.subscript.setter +// CHECK-LABEL: sil [ossa] @$sSD20opaque_values_silgenEyq_Sgq_cis : $@convention(method) (@in Optional, @in Value, @inout Dictionary) -> () { +// HECK: bb0([[ARG0:%.*]] : $Builtin.RawPointer, [[ARG1:%.*]] : $*Builtin.UnsafeValueBuffer, [[ARG2:%.*]] : $*Dictionary, [[ARG3:%.*]] : $@thick Dictionary.Type): +// HECK: [[PROJ_VAL1:%.*]] = project_value_buffer $Value in [[ARG1]] : $*Builtin.UnsafeValueBuffer +// HECK: [[LOAD_VAL1:%.*]] = load [take] [[PROJ_VAL1]] : $*Value +// HECK: [[ADDR_VAL0:%.*]] = pointer_to_address [[ARG0]] : $Builtin.RawPointer to [strict] $*Optional +// HECK: [[LOAD_VAL0:%.*]] = load [take] [[ADDR_VAL0]] : $*Optional +// HECK: apply {{.*}}([[LOAD_VAL0]], [[LOAD_VAL1]], [[ARG2]]) : $@convention(method) <τ_0_0, τ_0_1 where τ_0_0 : Hashable> (@in Optional<τ_0_1>, @in τ_0_1, @inout Dictionary<τ_0_0, τ_0_1>) -> () +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$sSD20opaque_values_silgenEyq_Sgq_cis' extension Dictionary { public subscript(key: Value) -> Value? { @inline(__always) @@ -1199,37 +248,105 @@ extension Dictionary { } } -// s400______maybeCloneP continued Test: reabstraction thunk +// Test ownership of multi-case Enum values in the context of to @in thunks. // --- -// CHECK-LABEL: sil shared [transparent] [serialized] [reabstraction_thunk] @$sxSgIegr_20opaque_values_silgen8Clonable_pSgIegr_AbCRzlTR : $@convention(thin) <τ_0_0 where τ_0_0 : Clonable> (@guaranteed @callee_guaranteed () -> @out Optional<τ_0_0>) -> @out Optional { -// CHECK: bb0([[ARG:%.*]] : $@callee_guaranteed () -> @out Optional<τ_0_0>): -// CHECK: [[APPLY_ARG:%.*]] = apply [[ARG]]() : $@callee_guaranteed () -> @out Optional<τ_0_0> -// CHECK: switch_enum [[APPLY_ARG]] : $Optional<τ_0_0>, case #Optional.some!enumelt: bb2, case #Optional.none!enumelt: bb1 -// CHECK: bb1: -// CHECK: [[ONONE:%.*]] = enum $Optional, #Optional.none!enumelt -// CHECK: br bb3([[ONONE]] : $Optional) -// CHECK: bb2([[ENUM_SOME:%.*]] : $τ_0_0): -// CHECK: [[INIT_OPAQUE:%.*]] = init_existential_value [[ENUM_SOME]] : $τ_0_0, $τ_0_0, $Clonable -// CHECK: [[OSOME:%.*]] = enum $Optional, #Optional.some!enumelt, [[INIT_OPAQUE]] : $Clonable -// CHECK: br bb3([[OSOME]] : $Optional) -// CHECK: bb3([[RETVAL:%.*]] : $Optional): -// CHECK: return [[RETVAL]] : $Optional -// CHECK-LABEL: } // end sil function '$sxSgIegr_20opaque_values_silgen8Clonable_pSgIegr_AbCRzlTR' +// protocol witness for static Swift.Equatable.== infix(A, A) -> Swift.Bool in conformance Swift.FloatingPointSign : Swift.Equatable +// CHECK-LABEL: sil shared [transparent] [serialized] [thunk] [ossa] @$s20opaque_values_silgen17FloatingPointSignOSQAASQ2eeoiySbx_xtFZTW : $@convention(witness_method: Equatable) (@in_guaranteed FloatingPointSign, @in_guaranteed FloatingPointSign, @thick FloatingPointSign.Type) -> Bool { +// HECK: bb0(%0 : $FloatingPointSign, %1 : $FloatingPointSign, %2 : $@thick FloatingPointSign.Type): +// HECK: %3 = metatype $@thin FloatingPointSign.Type // user: %5 +// HECK: %4 = function_ref @$ss17FloatingPointSignO21__derived_enum_equalsySbAB_ABtFZ : $@convention(method) (FloatingPointSign, FloatingPointSign, @thin FloatingPointSign.Type) -> Bool // user: %5 +// HECK: %5 = apply %4(%0, %1, %3) : $@convention(method) (FloatingPointSign, FloatingPointSign, @thin FloatingPointSign.Type) -> Bool // user: %6 +// HECK: return %5 : $Bool +// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen17FloatingPointSignOSQAASQ2eeoiySbx_xtFZTW' +public enum FloatingPointSign { + /// The sign for a positive value. + case plus + + /// The sign for a negative value. + case minus +} + +#if os(macOS) +// Test open_existential_value used in a conversion context. +// +// TODO: Subsequent OSSA optimization should optimize away one of both of these copies. +// --- +// CHECK-OSX-LABEL: sil [ossa] @$s20opaque_values_silgen25unsafeDowncastToAnyObject04fromG0yXlyp_tF : $@convention(thin) (@in_guaranteed Any) -> @owned AnyObject { +// CHECK-OSX: bb0(%0 : @guaranteed $Any): +// CHECK-OSX: [[COPY:%.*]] = copy_value %0 : $Any +// CHECK-OSX: [[BORROW2:%.*]] = begin_borrow [[COPY]] : $Any +// CHECK-OSX: [[VAL:%.*]] = open_existential_value [[BORROW2]] : $Any to $@opened +// CHECK-OSX: [[COPY2:%.*]] = copy_value [[VAL]] : $@opened +// CHECK-OSX: end_borrow [[BORROW2]] : $Any +// CHECK-OSX: [[RESULT:%.*]] = apply %{{.*}}<@opened("{{.*}}") Any>([[COPY2]]) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> @owned AnyObject +// CHECK-OSX: destroy_value [[COPY2]] : $@opened +// CHECK-OSX: destroy_value [[COPY]] : $Any +// CHECK-OSX-NOT: destroy_value %0 : $Any +// CHECK-OSX: return [[RESULT]] : $AnyObject +// CHECK-OSX-LABEL: } // end sil function '$s20opaque_values_silgen25unsafeDowncastToAnyObject04fromG0yXlyp_tF' +public func unsafeDowncastToAnyObject(fromAny any: Any) -> AnyObject { + return any as AnyObject +} +#endif + +#if os(macOS) +// Test open_existential_box_value in a conversion context. +// --- +// CHECK-OSX-LABEL: sil [ossa] @$s20opaque_values_silgen22testOpenExistentialBox1eys5Error_pSg_tF : $@convention(thin) (@guaranteed Optional) -> () { +// CHECK-OSX: [[BORROW:%.*]] = begin_borrow [lexical] %{{.*}} : $Error +// CHECK-OSX: [[VAL:%.*]] = open_existential_box_value [[BORROW]] : $Error to $@opened +// CHECK-OSX: [[COPY:%.*]] = copy_value [[VAL]] : $@opened +// CHECK-OSX: [[ANY:%.*]] = init_existential_value [[COPY]] : $@opened +// CHECK-OSX: end_borrow [[BORROW]] : $Error +// CHECK-OSX-LABEL: } // end sil function '$s20opaque_values_silgen22testOpenExistentialBox1eys5Error_pSg_tF' +public func testOpenExistentialBox(e: Error?) { + if let u = e { + let a: Any = u + _ = a + } +} +#endif -// s320__transImplodeAny continued Test: reabstraction thunk +// Test passing a +1 RValue to @in_guaranteed. // --- -// CHECK-LABEL: sil shared [transparent] [serialized] [reabstraction_thunk] @$sypIegn_S2iIegyy_TR : $@convention(thin) (Int, Int, @guaranteed @callee_guaranteed (@in_guaranteed Any) -> ()) -> () { -// CHECK: bb0([[ARG0:%.*]] : $Int, [[ARG1:%.*]] : $Int, [[ARG2:%.*]] : $@callee_guaranteed (@in_guaranteed Any) -> ()): -// CHECK: [[ASTACK:%.*]] = alloc_stack $Any -// CHECK: [[IADDR:%.*]] = init_existential_addr [[ASTACK]] : $*Any, $(Int, Int) -// CHECK: [[TADDR0:%.*]] = tuple_element_addr [[IADDR]] : $*(Int, Int), 0 -// CHECK: store [[ARG0]] to [trivial] [[TADDR0]] : $*Int -// CHECK: [[TADDR1:%.*]] = tuple_element_addr [[IADDR]] : $*(Int, Int), 1 -// CHECK: store [[ARG1]] to [trivial] [[TADDR1]] : $*Int -// CHECK: [[LOAD_EXIST:%.*]] = load [trivial] [[IADDR]] : $*(Int, Int) -// CHECK: [[INIT_OPAQUE:%.*]] = init_existential_value [[LOAD_EXIST]] : $(Int, Int), $(Int, Int), $Any -// CHECK: [[BORROWED_INIT_OPAQUE:%.*]] = begin_borrow [[INIT_OPAQUE]] -// CHECK: [[APPLYARG:%.*]] = apply [[ARG2]]([[BORROWED_INIT_OPAQUE]]) : $@callee_guaranteed (@in_guaranteed Any) -> () -// CHECK: dealloc_stack [[ASTACK]] : $*Any -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$sypIegn_S2iIegyy_TR' +public protocol IP {} + +public protocol Seq { + associatedtype Iterator : IP + + func makeIterator() -> Iterator +} + +extension Seq where Self.Iterator == Self { + public func makeIterator() -> Self { + return self + } +} + +public struct EnumIter : IP, Seq { + internal var _base: Base + + public typealias Iterator = EnumIter +} + +// CHECK-LABEL: sil [ossa] @$s20opaque_values_silgen7EnumSeqV12makeIteratorAA0D4IterVy0G0QzGyF : $@convention(method) (@in_guaranteed EnumSeq) -> @out EnumIter { +// HECK: bb0(%0 : @guaranteed $EnumSeq): +// HECK: [[MT:%.*]] = metatype $@thin EnumIter.Type +// HECK: [[FIELD:%.*]] = struct_extract %0 : $EnumSeq, #EnumSeq._base +// HECK: [[COPY:%.*]] = copy_value [[FIELD]] : $Base +// HECK: [[WT:%.*]] = witness_method $Base, #Seq.makeIterator : (Self) -> () -> Self.Iterator : $@convention(witness_method: Seq) <τ_0_0 where τ_0_0 : Seq> (@in_guaranteed τ_0_0) -> @out τ_0_0.Iterator +// HECK: [[ITER:%.*]] = apply [[WT]]([[COPY]]) : $@convention(witness_method: Seq) <τ_0_0 where τ_0_0 : Seq> (@in_guaranteed τ_0_0) -> @out τ_0_0.Iterator +// HECK: destroy_value [[COPY]] : $Base +// HECK: [[FN:%.*]] = function_ref @$ss8EnumIterV5_baseAByxGx_tcfC : $@convention(method) <τ_0_0 where τ_0_0 : IP> (@in τ_0_0, @thin EnumIter<τ_0_0>.Type) -> @out EnumIter<τ_0_0> +// HECK: [[RET:%.*]] = apply [[FN]]([[ITER]], [[MT]]) : $@convention(method) <τ_0_0 where τ_0_0 : IP> (@in τ_0_0, @thin EnumIter<τ_0_0>.Type) -> @out EnumIter<τ_0_0> +// HECK: return [[RET]] : $EnumIter +// CHECK-LABEL: } // end sil function '$s20opaque_values_silgen7EnumSeqV12makeIteratorAA0D4IterVy0G0QzGyF' +public struct EnumSeq : Seq { + public typealias Iterator = EnumIter + + internal var _base: Base + + public func makeIterator() -> Iterator { + return EnumIter(_base: _base.makeIterator()) + } +} diff --git a/test/SILGen/opaque_values_silgen_lib.swift b/test/SILGen/opaque_values_silgen_lib.swift index f72300ceaa4c3..22e9aa97f6b94 100644 --- a/test/SILGen/opaque_values_silgen_lib.swift +++ b/test/SILGen/opaque_values_silgen_lib.swift @@ -1,72 +1,1080 @@ +// RUN: %target-swift-emit-silgen -enable-sil-opaque-values -Xllvm -sil-full-demangle -parse-stdlib -parse-as-library -module-name Swift %s | %FileCheck %s --check-prefix=CHECK -// RUN: %target-swift-emit-silgen -enable-sil-opaque-values -emit-sorted-sil -Xllvm -sil-full-demangle -parse-stdlib -parse-as-library -module-name Swift %s | %FileCheck %s +// Test SILGen -enable-sil-opaque-values -precedencegroup AssignmentPrecedence { assignment: true } +typealias AnyObject = Builtin.AnyObject -enum Optional { +public enum Optional { case none - case some(Wrapped) + case some(T) +} + +public protocol ExpressibleByNilLiteral { + init(nilLiteral: ()) +} + +extension Optional : ExpressibleByNilLiteral { + public init(nilLiteral: ()) { + self = .none + } +} + +func _diagnoseUnexpectedNilOptional(_filenameStart: Builtin.RawPointer, + _filenameLength: Builtin.Word, + _filenameIsASCII: Builtin.Int1, + _line: Builtin.Word, + _isImplicitUnwrap: Builtin.Int1) { + // This would usually contain an assert, but we don't need one since we are + // just emitting SILGen. } +precedencegroup AssignmentPrecedence { assignment: true } +precedencegroup CastingPrecedence {} +precedencegroup ComparisonPrecedence {} +precedencegroup TernaryPrecedence {} + +public protocol Error {} + +public protocol _ObjectiveCBridgeable {} + protocol EmptyP {} +struct AddressOnlyStruct : EmptyP {} + struct String { var ptr: Builtin.NativeObject } +public typealias _MaxBuiltinIntegerType = Builtin.IntLiteral + +public protocol _ExpressibleByBuiltinIntegerLiteral { + init(_builtinIntegerLiteral value: _MaxBuiltinIntegerType) +} + +public protocol ExpressibleByIntegerLiteral { + associatedtype IntegerLiteralType : _ExpressibleByBuiltinIntegerLiteral + + init(integerLiteral value: IntegerLiteralType) +} + +extension ExpressibleByIntegerLiteral + where Self : _ExpressibleByBuiltinIntegerLiteral { + @_transparent + public init(integerLiteral value: Self) { + self = value + } +} + +public protocol ExpressibleByFloatLiteral {} + +typealias Bool = Builtin.Int1 + +public struct Int64 : ExpressibleByIntegerLiteral, _ExpressibleByBuiltinIntegerLiteral { + public var _value: Builtin.Int64 + public init(_builtinIntegerLiteral x: _MaxBuiltinIntegerType) { + _value = Builtin.s_to_s_checked_trunc_IntLiteral_Int64(x).0 + } + public typealias IntegerLiteralType = Int64 + public init(integerLiteral value: Int64) { + self = value + } +} + +public protocol UnkeyedDecodingContainer { + var isAtEnd: Builtin.Int1 { get } +} + +public protocol Decoder { + func unkeyedContainer() throws -> UnkeyedDecodingContainer +} + +protocol FooP { + func foo() +} + +struct AnyStruct { + let a: Any +} + +protocol P { + var x : Builtin.Int64 { get } +} + +protocol P2 : P {} + +struct TrivialStruct { + var x: Builtin.Int64 +} + +extension TrivialStruct : P2 {} + +protocol Clonable { + func maybeClone() -> Self? +} + +func unreachableF() -> (Builtin.Int64, T)? { /* no body */ } + +protocol ConvertibleToP { + func asP() -> P +} + +indirect enum IndirectEnum { + case Nil + case Node(T) +} + +protocol SubscriptableGet { + subscript(a : Builtin.Int64) -> Builtin.Int64 { get } +} + +protocol SubscriptableGetSet { + subscript(a : Builtin.Int64) -> Builtin.Int64 { get set } +} + +var subscriptableGet : SubscriptableGet? +var subscriptableGetSet : SubscriptableGetSet? + +func genericInout(_: inout T) {} + +// ============================================================================= +// Begin Test Cases +// ============================================================================= + +enum PAndSEnum { case A(EmptyP, String) } + // Tests Empty protocol + Builtin.NativeObject enum (including opaque tuples as a return value) // --- -// CHECK-LABEL: sil hidden [ossa] @$ss21s010______PAndS_casesyyF : $@convention(thin) () -> () { -// CHECK: bb0: -// CHECK: [[MTYPE:%.*]] = metatype $@thin PAndSEnum.Type -// CHECK: [[EAPPLY:%.*]] = apply {{.*}}([[MTYPE]]) : $@convention(thin) (@thin PAndSEnum.Type) -> @owned @callee_guaranteed (@in_guaranteed EmptyP, @guaranteed String) -> @out PAndSEnum -// CHECK: destroy_value [[EAPPLY]] -// CHECK: return %{{.*}} : $() -// CHECK-LABEL: } // end sil function '$ss21s010______PAndS_casesyyF' -func s010______PAndS_cases() { +// Swift.f010_PAndS_cases() -> () +// CHECK-LABEL: sil hidden [ossa] @$ss16f010_PAndS_casesyyF : $@convention(thin) () -> () { +// HECK: bb0: +// HECK: [[MTYPE:%.*]] = metatype $@thin PAndSEnum.Type +// HECK: [[EAPPLY:%.*]] = apply {{.*}}([[MTYPE]]) : $@convention(thin) (@thin PAndSEnum.Type) -> @owned @callee_guaranteed (@in_guaranteed EmptyP, @guaranteed String) -> @out PAndSEnum +// HECK: destroy_value [[EAPPLY]] +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss16f010_PAndS_casesyyF' +func f010_PAndS_cases() { _ = PAndSEnum.A } // Init of Empty protocol + Builtin.NativeObject enum (including opaque tuples as a return value) // --- -// CHECK-LABEL: sil private [ossa] @$ss21s010______PAndS_casesyyFs0B5SEnumOs6EmptyP_p_SStcACmcfu_ACsAD_p_SStcfu0_ : $@convention(thin) (@in_guaranteed EmptyP, @guaranteed String, @thin PAndSEnum.Type) -> @out PAndSEnum { -// CHECK: bb0([[ARG0:%.*]] : @guaranteed $EmptyP, [[ARG1:%.*]] : @guaranteed $String, [[ARG2:%.*]] : $@thin PAndSEnum.Type): -// CHECK: [[COPY0:%.*]] = copy_value [[ARG0]] -// CHECK: [[COPY1:%.*]] = copy_value [[ARG1]] -// CHECK: [[RTUPLE:%.*]] = tuple ([[COPY0]] : $EmptyP, [[COPY1]] : $String) -// CHECK: [[RETVAL:%.*]] = enum $PAndSEnum, #PAndSEnum.A!enumelt, [[RTUPLE]] : $(EmptyP, String) -// CHECK: return [[RETVAL]] : $PAndSEnum -// CHECK-LABEL: } // end sil function '$ss21s010______PAndS_casesyyFs0B5SEnumOs6EmptyP_p_SStcACmcfu_ACsAD_p_SStcfu0_' -enum PAndSEnum { case A(EmptyP, String) } - +// implicit closure #2 (Swift.EmptyP, Swift.String) -> Swift.PAndSEnum in implicit closure #1 (Swift.PAndSEnum.Type) -> (Swift.EmptyP, Swift.String) -> Swift.PAndSEnum in Swift.f010_PAndS_cases() -> () +// CHECK-LABEL: sil private [ossa] @$ss16f010_PAndS_casesyyFs0B5SEnumOs6EmptyP_p_SStcACmcfu_ACsAD_p_SStcfu0_ : $@convention(thin) (@in_guaranteed EmptyP, @guaranteed String, @thin PAndSEnum.Type) -> @out PAndSEnum { +// HECK: bb0([[ARG0:%.*]] : @guaranteed $EmptyP, [[ARG1:%.*]] : @guaranteed $String, [[ARG2:%.*]] : $@thin PAndSEnum.Type): +// HECK: [[COPY0:%.*]] = copy_value [[ARG0]] +// HECK: [[COPY1:%.*]] = copy_value [[ARG1]] +// HECK: [[RTUPLE:%.*]] = tuple ([[COPY0]] : $EmptyP, [[COPY1]] : $String) +// HECK: [[RETVAL:%.*]] = enum $PAndSEnum, #PAndSEnum.A!enumelt, [[RTUPLE]] : $(EmptyP, String) +// HECK: return [[RETVAL]] : $PAndSEnum +// CHECK-LABEL: } // end sil function '$ss16f010_PAndS_casesyyFs0B5SEnumOs6EmptyP_p_SStcACmcfu_ACsAD_p_SStcfu0_' // Test emitBuiltinReinterpretCast. // --- -// CHECK-LABEL: sil hidden [ossa] @$ss21s020__________bitCast_2toq_x_q_mtr0_lF : $@convention(thin) (@in_guaranteed T, @thick U.Type) -> @out U { -// CHECK: bb0([[ARG:%.*]] : @guaranteed $T, -// CHECK: [[COPY:%.*]] = copy_value [[ARG]] : $T -// CHECK: [[CAST:%.*]] = unchecked_bitwise_cast [[COPY]] : $T to $U -// CHECK: [[RET:%.*]] = copy_value [[CAST]] : $U -// CHECK: destroy_value [[COPY]] : $T -// CHECK: return [[RET]] : $U -// CHECK-LABEL: } // end sil function '$ss21s020__________bitCast_2toq_x_q_mtr0_lF' -func s020__________bitCast(_ x: T, to type: U.Type) -> U { +// CHECK-LABEL: sil hidden [ossa] @$ss12f020_bitCast_2toq_x_q_mtr0_lF : $@convention(thin) (@in_guaranteed T, @thick U.Type) -> @out U { +// HECK: bb0([[ARG:%.*]] : @guaranteed $T, +// HECK: [[COPY:%.*]] = copy_value [[ARG]] : $T +// HECK: [[CAST:%.*]] = unchecked_bitwise_cast [[COPY]] : $T to $U +// HECK: [[RET:%.*]] = copy_value [[CAST]] : $U +// HECK: destroy_value [[COPY]] : $T +// HECK: return [[RET]] : $U +// CHECK-LABEL: } // end sil function '$ss12f020_bitCast_2toq_x_q_mtr0_lF' +func f020_bitCast(_ x: T, to type: U.Type) -> U { return Builtin.reinterpretCast(x) } // Test emitBuiltinCastReference // --- -// CHECK-LABEL: sil hidden [ossa] @$ss21s030__________refCast_2toq_x_q_mtr0_lF : $@convention(thin) (@in_guaranteed T, @thick U.Type) -> @out U { +// CHECK-LABEL: sil hidden [ossa] @$ss12f021_refCast_2toq_x_q_mtr0_lF : $@convention(thin) (@in_guaranteed T, @thick U.Type) -> @out U { // CHECK: bb0([[ARG:%.*]] : @guaranteed $T, %1 : $@thick U.Type): -// CHECK: [[COPY:%.*]] = copy_value [[ARG]] : $T -// CHECK: [[SRC:%.*]] = alloc_stack $T -// CHECK: store [[COPY]] to [init] [[SRC]] : $*T -// CHECK: [[DEST:%.*]] = alloc_stack $U -// CHECK: unchecked_ref_cast_addr T in [[SRC]] : $*T to U in [[DEST]] : $*U -// CHECK: [[LOAD:%.*]] = load [take] [[DEST]] : $*U -// CHECK: dealloc_stack [[DEST]] : $*U -// CHECK: dealloc_stack [[SRC]] : $*T +// HECK: [[COPY:%.*]] = copy_value [[ARG]] : $T +// HECK: [[SRC:%.*]] = alloc_stack $T +// HECK: store [[COPY]] to [init] [[SRC]] : $*T +// HECK: [[DEST:%.*]] = alloc_stack $U +// HECK: unchecked_ref_cast_addr T in [[SRC]] : $*T to U in [[DEST]] : $*U +// HECK: [[LOAD:%.*]] = load [take] [[DEST]] : $*U +// HECK: dealloc_stack [[DEST]] : $*U +// HECK: dealloc_stack [[SRC]] : $*T // CHECK-NOT: destroy_value [[ARG]] : $T -// CHECK: return [[LOAD]] : $U -// CHECK-LABEL: } // end sil function '$ss21s030__________refCast_2toq_x_q_mtr0_lF' -func s030__________refCast(_ x: T, to: U.Type) -> U { +// HECK: return [[LOAD]] : $U +// CHECK-LABEL: } // end sil function '$ss12f021_refCast_2toq_x_q_mtr0_lF' +func f021_refCast(_ x: T, to: U.Type) -> U { return Builtin.castReference(x) } + +// Test unsafe_bitwise_cast nontrivial ownership. +// --- +// CHECK-LABEL: sil [ossa] @$ss18f022_unsafeBitCast_2toq_x_q_mtr0_lF : $@convention(thin) (@in_guaranteed T, @thick U.Type) -> @out U { +// HECK: bb0([[ARG0:%.*]] : @guaranteed $T, [[ARG1:%.*]] : $@thick U.Type): +// HECK: [[ARG_COPY:%.*]] = copy_value [[ARG0]] : $T +// HECK: [[RESULT:%.*]] = unchecked_bitwise_cast [[ARG_COPY]] : $T to $U +// HECK: [[RESULT_COPY:%.*]] = copy_value [[RESULT]] : $U +// HECK: destroy_value [[ARG_COPY]] : $T +// HECK: return [[RESULT_COPY]] : $U +// CHECK-LABEL: } // end sil function '$ss18f022_unsafeBitCast_2toq_x_q_mtr0_lF' +public func f022_unsafeBitCast(_ x: T, to type: U.Type) -> U { + return Builtin.reinterpretCast(x) +} + +// Test emitSemanticStore. +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss16f030_assigninoutyyxz_xtlF : $@convention(thin) (@inout T, @in_guaranteed T) -> () { +// CHECK: bb0([[ARG0:%.*]] : $*T, [[ARG1:%.*]] : @guaranteed $T): +// HECK: [[CPY:%.*]] = copy_value [[ARG1]] : $T +// HECK: [[READ:%.*]] = begin_access [modify] [unknown] [[ARG0]] : $*T +// HECK: assign [[CPY]] to [[READ]] : $*T +// CHECK-NOT: destroy_value [[ARG1]] : $T +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss16f030_assigninoutyyxz_xtlF' +func f030_assigninout(_ a: inout T, _ b: T) { + a = b +} + +// Test that we no longer use copy_addr or tuple_element_addr when copy by value is possible +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss19f040_tupleReturnIntyBi64_Bi64__xt_tlF : $@convention(thin) (Builtin.Int64, @in_guaranteed T) -> Builtin.Int64 { +// HECK: bb0([[ARG0:%.*]] : $Builtin.Int64, [[ARG1:%.*]] : $T): +// HECK: [[ARG1_COPY:%.*]] = copy_value [[ARG1]] +// HECK: [[TPL:%.*]] = tuple ([[ARG0]] : $Builtin.Int64, [[ARG1_COPY]] : $T) +// HECK: [[BORROWED_ARG1:%.*]] = begin_borrow [[TPL]] : $(Builtin.Int64, T) +// HECK: [[CPY:%.*]] = copy_value [[BORROWED_ARG1]] : $(Builtin.Int64, T) +// HECK: [[BORROWED_CPY:%.*]] = begin_borrow [[CPY]] +// HECK: [[INT:%.*]] = tuple_extract [[BORROWED_CPY]] : $(Builtin.Int64, T), 0 +// HECK: [[GEN:%.*]] = tuple_extract [[BORROWED_CPY]] : $(Builtin.Int64, T), 1 +// HECK: [[COPY_GEN:%.*]] = copy_value [[GEN]] +// HECK: destroy_value [[COPY_GEN]] +// HECK: end_borrow [[BORROWED_CPY]] +// HECK: destroy_value [[CPY]] +// HECK: end_borrow [[BORROWED_ARG1]] : $(Builtin.Int64, T) +// HECK: destroy_value [[TPL]] : $(Builtin.Int64, T) +// HECK: return [[INT]] +// CHECK-LABEL: } // end sil function '$ss19f040_tupleReturnIntyBi64_Bi64__xt_tlF' +func f040_tupleReturnInt(_ x: (Builtin.Int64, T)) -> Builtin.Int64 { + let y = x.0 + return y +} + +// Test returning an opaque tuple of tuples. +// --- +// CHECK-LABEL: sil hidden [noinline] [ossa] @$ss16f050_multiResultyx_x_xttxlF : $@convention(thin) (@in_guaranteed T) -> (@out T, @out T, @out T) { +// HECK: bb0(%0 : $T): +// HECK: %[[CP1:.*]] = copy_value %{{.*}} : $T +// HECK: %[[CP2:.*]] = copy_value %{{.*}} : $T +// HECK: %[[CP3:.*]] = copy_value %{{.*}} : $T +// CHECK-NOT: destroy_value %0 : $T +// HECK: %[[TPL:.*]] = tuple (%[[CP1]] : $T, %[[CP2]] : $T, %[[CP3]] : $T) +// HECK: return %[[TPL]] : $(T, T, T) +// CHECK-LABEL: } // end sil function '$ss16f050_multiResultyx_x_xttxlF' +@inline(never) +func f050_multiResult(_ t: T) -> (T, (T, T)) { + return (t, (t, t)) +} + +// Test returning an opaque tuple of tuples as a concrete tuple. +// --- +// CHECK-LABEL: sil [ossa] @$ss20f060_callMultiResult1iBi64__Bi64__Bi64_ttBi64__tF : $@convention(thin) (Builtin.Int64) -> (Builtin.Int64, Builtin.Int64, Builtin.Int64) { +// HECK: bb0(%0 : $Builtin.Int64): +// HECK: %[[FN:.*]] = function_ref @$s20opaque_values_silgen21f050_multiResultyx_x_xttxlF : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) +// HECK: %[[TPL:.*]] = apply %[[FN]](%0) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> (@out τ_0_0, @out τ_0_0, @out τ_0_0) +// HECK: %[[I1:.*]] = tuple_extract %[[TPL]] : $(Builtin.Int64, Builtin.Int64, Builtin.Int64), 0 +// HECK: %[[I2:.*]] = tuple_extract %[[TPL]] : $(Builtin.Int64, Builtin.Int64, Builtin.Int64), 1 +// HECK: %[[I3:.*]] = tuple_extract %[[TPL]] : $(Builtin.Int64, Builtin.Int64, Builtin.Int64), 2 +// HECK: %[[R:.*]] = tuple (%[[I1]] : $Builtin.Int64, %[[I2]] : $Builtin.Int64, %[[I3]] : $Builtin.Int64) +// HECK: return %[[R]] : $(Builtin.Int64, Builtin.Int64, Builtin.Int64) +// CHECK-LABEL: } // end sil function '$ss20f060_callMultiResult1iBi64__Bi64__Bi64_ttBi64__tF' +public func f060_callMultiResult(i: Builtin.Int64) -> (Builtin.Int64, (Builtin.Int64, Builtin.Int64)) { + return f050_multiResult(i) +} + +// SILGen, prepareArchetypeCallee. Materialize a +// non-class-constrainted self from a class-constrained archetype. +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss20f070_materializeSelf1tyx_tRlzCs4FooPRzlF : $@convention(thin) (@guaranteed T) -> () { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $T): +// HECK: [[WITNESS_METHOD:%.*]] = witness_method $T, #FooP.foo : (Self) -> () -> () : $@convention(witness_method: FooP) <τ_0_0 where τ_0_0 : FooP> (@in_guaranteed τ_0_0) -> () +// HECK: apply [[WITNESS_METHOD]]([[ARG]]) : $@convention(witness_method: FooP) <τ_0_0 where τ_0_0 : FooP> (@in_guaranteed τ_0_0) -> () +// CHECK-NOT: destroy_value [[ARG]] : $T +// HECK: return %{{[0-9]+}} : $() +// CHECK-LABEL: } // end sil function '$ss20f070_materializeSelf1tyx_tRlzCs4FooPRzlF' +func f070_materializeSelf(t: T) where T: AnyObject { + t.foo() +} + +// Test open existential with opaque values +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss8f080_bar1pBi64_s1P_p_tF : $@convention(thin) (@in_guaranteed P) -> Builtin.Int64 { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $P): +// HECK: [[OPENED_ARG:%.*]] = open_existential_value [[ARG]] : $P to $@opened +// HECK: [[WITNESS_FUNC:%.*]] = witness_method $@opened +// HECK: [[RESULT:%.*]] = apply [[WITNESS_FUNC]]<{{.*}}>([[OPENED_ARG]]) : $@convention(witness_method: P) <τ_0_0 where τ_0_0 : P> (@in_guaranteed τ_0_0) -> Builtin.Int64 +// CHECK-NOT: destroy_value [[ARG]] : $P +// HECK: return [[RESULT]] : $Builtin.Int64 +// CHECK-LABEL: } // end sil function '$ss8f080_bar1pBi64_s1P_p_tF' +func f080_bar(p: P) -> Builtin.Int64 { + return p.x +} + +// Test OpaqueTypeLowering copyValue and destroyValue. +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss11f090_calleryxxlF : $@convention(thin) (@in_guaranteed T) -> @out T { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $T): +// CHECK-NOT: copy_value +// HECK: [[RESULT:%.*]] = apply {{%.*}}([[ARG]]) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> @out τ_0_0 +// CHECK-NOT: destroy_value [[ARG]] : $T +// HECK: return %{{.*}} : $T +// CHECK-LABEL: } // end sil function '$ss11f090_calleryxxlF' +func f090_caller(_ t: T) -> T { + return f090_caller(t) +} + +// Test a simple opaque parameter and return value. +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss13f100_identityyxxlF : $@convention(thin) (@in_guaranteed T) -> @out T { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $T): +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] : $T +// CHECK-NOT: destroy_value [[ARG]] : $T +// HECK: return [[COPY_ARG]] : $T +// CHECK-LABEL: } // end sil function '$ss13f100_identityyxxlF' +func f100_identity(_ t: T) -> T { + return t +} + +// Test a guaranteed opaque parameter. +// --- +// CHECK-LABEL: sil private [transparent] [thunk] [ossa] @$ss19f110_GuaranteedSelfVs4FooPssACP3fooyyFTW : $@convention(witness_method: FooP) (@in_guaranteed f110_GuaranteedSelf) -> () { +// CHECK: bb0(%0 : $f110_GuaranteedSelf): +// HECK: %[[F:.*]] = function_ref @$s20opaque_values_silgen21f110_GuaranteedSelfV3fooyyF : $@convention(method) (f110_GuaranteedSelf) -> () +// HECK: apply %[[F]](%0) : $@convention(method) (f110_GuaranteedSelf) -> () +// HECK: return +// CHECK-LABEL: } // end sil function '$ss19f110_GuaranteedSelfVs4FooPssACP3fooyyFTW' +struct f110_GuaranteedSelf : FooP { + func foo() {} +} + +// Tests a corner case wherein we used to do a temporary and return a pointer to T instead of T +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss16f120_returnValueyxxlF : $@convention(thin) (@in_guaranteed T) -> @out T { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $T): +// HECK: [[COPY_ARG1:%.*]] = copy_value [[ARG]] : $T +// HECK: [[BORROWED_ARG2:%.*]] = begin_borrow [[COPY_ARG1]] +// HECK: [[COPY_ARG2:%.*]] = copy_value [[BORROWED_ARG2]] : $T +// HECK: end_borrow [[BORROWED_ARG2]] +// HECK: return [[COPY_ARG2]] : $T +// CHECK-LABEL: } // end sil function '$ss16f120_returnValueyxxlF' +func f120_returnValue(_ x: T) -> T { + let y = x + return y +} + +// Tests Optional initialization by value +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss9f130_wrapyxSgxlF : $@convention(thin) (@in_guaranteed T) -> @out Optional { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $T): +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] : $T +// HECK: [[OPTIONAL_ARG:%.*]] = enum $Optional, #Optional.some!enumelt, [[COPY_ARG]] : $T +// CHECK-NOT: destroy_value [[ARG]] : $T +// HECK: return [[OPTIONAL_ARG]] : $Optional +// CHECK-LABEL: } // end sil function '$ss9f130_wrapyxSgxlF' +func f130_wrap(_ x: T) -> T? { + return x +} + +func f150_anyArg(_: Any) {} + +// Tests init of opaque existentials +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss15f160_callAnyArgyyF : $@convention(thin) () -> () { +// CHECK: bb0: +// HECK: [[INT_TYPE:%.*]] = metatype $@thin Builtin.Int64.Type +// HECK: [[INT_LIT:%.*]] = integer_literal $Builtin.Builtin.Int64Literal, 42 +// HECK: [[INT_ARG:%.*]] = apply %{{.*}}([[INT_LIT]], [[INT_TYPE]]) : $@convention(method) (Builtin.Builtin.Int64Literal, @thin Builtin.Int64.Type) -> Builtin.Int64 +// HECK: [[INIT_OPAQUE:%.*]] = init_existential_value [[INT_ARG]] : $Builtin.Int64, $Builtin.Int64, $Any +// HECK: apply %{{.*}}([[INIT_OPAQUE]]) : $@convention(thin) (@in_guaranteed Any) -> () +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss15f160_callAnyArgyyF' +func f160_callAnyArg() { + f150_anyArg(Int64(42)) +} + +// Tests unconditional_checked_cast for opaque values +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss18f170_force_convertxylF : $@convention(thin) () -> @out T { +// CHECK: bb0: +// HECK-NOT: alloc_stack +// HECK: [[INT_TYPE:%.*]] = metatype $@thin Builtin.Int64.Type +// HECK: [[INT_LIT:%.*]] = integer_literal $Builtin.Builtin.Int64Literal, 42 +// HECK: [[INT_ARG:%.*]] = apply %{{.*}}([[INT_LIT]], [[INT_TYPE]]) : $@convention(method) (Builtin.Builtin.Int64Literal, @thin Builtin.Int64.Type) -> Builtin.Int64 +// HECK: [[INT_CAST:%.*]] = unconditional_checked_cast_value [[INT_ARG]] : $Builtin.Int64 to $T +// HECK: [[CAST_BORROW:%.*]] = begin_borrow [[INT_CAST]] : $T +// HECK: [[RETURN_VAL:%.*]] = copy_value [[CAST_BORROW]] : $T +// HECK: end_borrow [[CAST_BORROW]] : $T +// HECK: destroy_value [[INT_CAST]] : $T +// HECK: return [[RETURN_VAL]] : $T +// CHECK-LABEL: } // end sil function '$ss18f170_force_convertxylF' +func f170_force_convert() -> T { + let x : T = Int64(42) as! T + return x +} + +// Tests supporting function for f190_return_foo_var - cast and return of protocol +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss15f180_return_foos4FooP_pyF : $@convention(thin) () -> @out FooP { +// CHECK: bb0: +// HECK: [[INT_LIT:%.*]] = integer_literal $Builtin.Builtin.Int64Literal, 42 +// HECK: [[INT_ARG:%.*]] = apply %{{.*}}([[INT_LIT]], [[INT_TYPE]]) : $@convention(method) (Builtin.Builtin.Int64Literal, @thin Builtin.Int64.Type) -> Builtin.Int64 +// HECK: [[INT_CAST:%.*]] = unconditional_checked_cast_value [[INT_ARG]] : $Builtin.Int64 to $FooP +// HECK: return [[INT_CAST]] : $FooP +// CHECK-LABEL: } // end sil function '$ss15f180_return_foos4FooP_pyF' +func f180_return_foo() -> FooP { + return Int64(42) as! FooP +} +var foo_var : FooP = f180_return_foo() + +// Tests return of global variables by doing a load of copy +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss19f190_return_foo_vars4FooP_pyF : $@convention(thin) () -> @out FooP { +// CHECK: bb0: +// HECK: [[GLOBAL:%.*]] = global_addr {{.*}} : $*FooP +// HECK: [[READ:%.*]] = begin_access [read] [dynamic] [[GLOBAL]] : $*FooP +// HECK: [[LOAD_GLOBAL:%.*]] = load [copy] [[READ]] : $*FooP +// HECK: return [[LOAD_GLOBAL]] : $FooP +// CHECK-LABEL: } // end sil function '$ss19f190_return_foo_vars4FooP_pyF' +func f190_return_foo_var() -> FooP { + return foo_var +} + +// Tests deinit of opaque existentials +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss16f200_use_foo_varyyF : $@convention(thin) () -> () { +// CHECK: bb0: +// HECK: [[GLOBAL:%.*]] = global_addr {{.*}} : $*FooP +// HECK: [[READ:%.*]] = begin_access [read] [dynamic] [[GLOBAL]] : $*FooP +// HECK: [[LOAD_GLOBAL:%.*]] = load [copy] [[READ]] : $*FooP +// HECK: [[BORROW:%.*]] = begin_borrow [[LOAD_GLOBAL]] : $FooP +// HECK: [[OPEN_VAR:%.*]] = open_existential_value [[BORROW]] : $FooP +// HECK: [[WITNESS:%.*]] = witness_method $@opened +// HECK: apply [[WITNESS]] +// HECK: end_borrow [[BORROW]] +// HECK: destroy_value [[LOAD_GLOBAL]] +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss16f200_use_foo_varyyF' +func f200_use_foo_var() { + foo_var.foo() +} + + +// Tests composition erasure of opaque existentials + copy into of opaques +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss16f210_compErasureys5Error_psAB_s4FooPpF : $@convention(thin) (@in_guaranteed Error & FooP) -> @owned Error { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $Error & FooP): +// HECK: [[OPAQUE_ARG:%.*]] = open_existential_value [[ARG]] : $Error & FooP to $@opened({{.*}}) Error & FooP +// HECK: [[EXIST_BOX:%.*]] = alloc_existential_box $Error, $@opened({{.*}}) Error & FooP +// HECK: [[PROJ_BOX:%.*]] = project_existential_box $@opened({{.*}}) Error & FooP in [[EXIST_BOX]] +// HECK: [[COPY_OPAQUE:%.*]] = copy_value [[OPAQUE_ARG]] : $@opened({{.*}}) Error & FooP +// HECK: store [[COPY_OPAQUE]] to [init] [[PROJ_BOX]] : $*@opened({{.*}}) Error & FooP +// CHECK-NOT: destroy_value [[ARG]] : $Error & FooP +// HECK: return [[EXIST_BOX]] : $Error +// CHECK-LABEL: } // end sil function '$ss16f210_compErasureys5Error_psAB_s4FooPpF' +func f210_compErasure(_ x: FooP & Error) -> Error { + return x +} + +// Tests Implicit Value Construction under Opaque value mode +// --- + +// f250_testBoxT continued Test Implicit Value Construction under Opaque value mode +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss3BoxV1tAByxGx_tcfC : $@convention(method) (@in T, @thin Box.Type) -> @out Box { +// CHECK: bb0([[ARG0:%.*]] : @owned $T, [[ARG1:%.*]] : $@thin Box.Type): +// HECK: [[RETVAL:%.*]] = struct $Box ([[ARG0]] : $T) +// HECK: return [[RETVAL]] : $Box +// CHECK-LABEL: } // end sil function '$ss3BoxV1tAByxGx_tcfC' +struct Box { + let t: T +} + +// CHECK-LABEL: sil hidden [ossa] @$ss13f250_testBoxTyyF : $@convention(thin) () -> () { +// CHECK: bb0: +// HECK: [[BOX_MTYPE:%.*]] = metatype $@thin Box.Type +// HECK: [[MTYPE:%.*]] = metatype $@thin Builtin.Int64.Type +// HECK: [[INTLIT:%.*]] = integer_literal $Builtin.Builtin.Int64Literal, 42 +// HECK: [[AINT:%.*]] = apply {{.*}}([[INTLIT]], [[MTYPE]]) : $@convention(method) (Builtin.Builtin.Int64Literal, @thin Builtin.Int64.Type) -> Builtin.Int64 +// HECK: apply {{.*}}([[AINT]], [[BOX_MTYPE]]) : $@convention(method) <τ_0_0> (@in τ_0_0, @thin Box<τ_0_0>.Type) -> @out Box<τ_0_0> +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss13f250_testBoxTyyF' +func f250_testBoxT() { + let _ = Box(t: Int64(42)) +} + +enum AddressOnlyEnum { + case nought + case mere(EmptyP) + case phantom(AddressOnlyStruct) +} + +// Tests Address only enums +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss15f260_AOnly_enumyys17AddressOnlyStructVF : $@convention(thin) (AddressOnlyStruct) -> () { +// CHECK: bb0([[ARG:%.*]] : $AddressOnlyStruct): +// HECK: [[MTYPE1:%.*]] = metatype $@thin AddressOnlyEnum.Type +// HECK: [[APPLY1:%.*]] = apply {{.*}}([[MTYPE1]]) : $@convention(thin) (@thin AddressOnlyEnum.Type) -> @owned @callee_guaranteed (@in_guaranteed EmptyP) -> @out AddressOnlyEnum +// HECK: destroy_value [[APPLY1]] +// HECK: [[MTYPE2:%.*]] = metatype $@thin AddressOnlyEnum.Type +// HECK: [[ENUM1:%.*]] = enum $AddressOnlyEnum, #AddressOnlyEnum.nought!enumelt +// HECK: [[MTYPE3:%.*]] = metatype $@thin AddressOnlyEnum.Type +// HECK: [[INIT_OPAQUE:%.*]] = init_existential_value [[ARG]] : $AddressOnlyStruct, $AddressOnlyStruct, $EmptyP +// HECK: [[ENUM2:%.*]] = enum $AddressOnlyEnum, #AddressOnlyEnum.mere!enumelt, [[INIT_OPAQUE]] : $EmptyP +// HECK: destroy_value [[ENUM2]] +// HECK: [[MTYPE4:%.*]] = metatype $@thin AddressOnlyEnum.Type +// HECK: [[ENUM3:%.*]] = enum $AddressOnlyEnum, #AddressOnlyEnum.phantom!enumelt, [[ARG]] : $AddressOnlyStruct +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss15f260_AOnly_enumyys17AddressOnlyStructVF' +func f260_AOnly_enum(_ s: AddressOnlyStruct) { + _ = AddressOnlyEnum.mere + + _ = AddressOnlyEnum.nought + + _ = AddressOnlyEnum.mere(s) + + _ = AddressOnlyEnum.phantom(s) +} + +// Tests InjectOptional for opaque value types + conversion of opaque structs +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss21f270_convOptAnyStructyys0dE0VACSgcF : $@convention(thin) (@guaranteed @callee_guaranteed (@in_guaranteed Optional) -> @out AnyStruct) -> () { +// HECK: bb0([[ARG:%.*]] : $@callee_guaranteed (@in_guaranteed Optional) -> @out AnyStruct): +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] +// HECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}([[COPY_ARG]]) : $@convention(thin) (@in_guaranteed Optional, @guaranteed @callee_guaranteed (@in_guaranteed Optional) -> @out AnyStruct) -> @out Optional +// HECK: destroy_value [[PAPPLY]] : $@callee_guaranteed (@in_guaranteed Optional) -> @out Optional +// HECK-NOT: destroy_value [[ARG]] : $@callee_guaranteed (@in_guaranteed Optional) -> @out AnyStruct +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss21f270_convOptAnyStructyys0dE0VACSgcF' +func f270_convOptAnyStruct(_ a1: @escaping (AnyStruct?) -> AnyStruct) { + let _: (AnyStruct?) -> AnyStruct? = a1 +} + +// f270_convOptAnyStruct continued Test: reabstraction thunk helper +// --- +// CHECK-LABEL: sil shared [transparent] [serialized] [reabstraction_thunk] [ossa] @$ss9AnyStructVSgABIegnr_A2CIegnr_TR : $@convention(thin) (@in_guaranteed Optional, @guaranteed @callee_guaranteed (@in_guaranteed Optional) -> @out AnyStruct) -> @out Optional { +// CHECK: bb0([[ARG0:%.*]] : @guaranteed $Optional, [[ARG1:%.*]] : @guaranteed $@callee_guaranteed (@in_guaranteed Optional) -> @out AnyStruct): +// HECK: [[APPLYARG:%.*]] = apply [[ARG1]]([[ARG0]]) : $@callee_guaranteed (@in_guaranteed Optional) -> @out AnyStruct +// HECK: [[RETVAL:%.*]] = enum $Optional, #Optional.some!enumelt, [[APPLYARG]] : $AnyStruct +// HECK: return [[RETVAL]] : $Optional +// CHECK-LABEL: } // end sil function '$ss9AnyStructVSgABIegnr_A2CIegnr_TR' + +// Tests conversion between existential types +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss21f280_convExistTrivialyys0D6StructVs1P_pcF : $@convention(thin) (@guaranteed @callee_guaranteed (@in_guaranteed P) -> TrivialStruct) -> () { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $@callee_guaranteed (@in_guaranteed P) -> TrivialStruct): +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] +// HECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}([[COPY_ARG]]) : $@convention(thin) (@in_guaranteed P2, @guaranteed @callee_guaranteed (@in_guaranteed P) -> TrivialStruct) -> @out P2 +// HECK: destroy_value [[PAPPLY]] : $@callee_guaranteed (@in_guaranteed P2) -> @out P2 +// CHECK-NOT: destroy_value [[ARG]] +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss21f280_convExistTrivialyys0D6StructVs1P_pcF' +func f280_convExistTrivial(_ s: @escaping (P) -> TrivialStruct) { + let _: (P2) -> P2 = s +} + +// part of f280_convExistTrivial: conversion between existential types - reabstraction thunk +// --- +// CHECK-LABEL: sil shared [transparent] [serialized] [reabstraction_thunk] [ossa] @$ss1P_ps13TrivialStructVIegnd_s2P2_psAD_pIegnr_TR : $@convention(thin) (@in_guaranteed P2, @guaranteed @callee_guaranteed (@in_guaranteed P) -> TrivialStruct) -> @out P2 { +// CHECK: bb0([[ARG0:%.*]] : @guaranteed $P2, [[ARG1:%.*]] : @guaranteed $@callee_guaranteed (@in_guaranteed P) -> TrivialStruct): +// HECK: [[OPENED_ARG:%.*]] = open_existential_value [[ARG]] : $P2 to $@opened({{.*}}) P2 +// HECK: [[COPIED_VAL:%.*]] = copy_value [[OPENED_ARG]] +// HECK: [[INIT_P:%.*]] = init_existential_value [[COPIED_VAL]] : $@opened({{.*}}) P2, $@opened({{.*}}) P2, $P +// HECK: [[BORROWED_INIT_P:%.*]] = begin_borrow [[INIT_P]] +// HECK: [[APPLY_P:%.*]] = apply [[ARG1]]([[BORROWED_INIT_P]]) : $@callee_guaranteed (@in_guaranteed P) -> TrivialStruct +// HECK: [[RETVAL:%.*]] = init_existential_value [[APPLY_P]] : $TrivialStruct, $TrivialStruct, $P2 +// HECK: end_borrow [[BORROWED_INIT_P]] +// CHECK-NOT: destroy_value [[ARG0]] +// HECK: return [[RETVAL]] : $P2 +// CHECK-LABEL: } // end sil function '$ss1P_ps13TrivialStructVIegnd_s2P2_psAD_pIegnr_TR' + +// Tests conversion between existential types - optionals case +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss21f290_convOptExistTrivyys13TrivialStructVs1P_pSgcF : $@convention(thin) (@guaranteed @callee_guaranteed (@in_guaranteed Optional

) -> TrivialStruct) -> () { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $@callee_guaranteed (@in_guaranteed Optional

) -> TrivialStruct): +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] +// HECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}([[COPY_ARG]]) : $@convention(thin) (Optional, @guaranteed @callee_guaranteed (@in_guaranteed Optional

) -> TrivialStruct) -> @out P2 +// HECK: destroy_value [[PAPPLY]] : $@callee_guaranteed (Optional) -> @out P2 +// CHECK-NOT: destroy_value [[ARG]] +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss21f290_convOptExistTrivyys13TrivialStructVs1P_pSgcF' +func f290_convOptExistTriv(_ s: @escaping (P?) -> TrivialStruct) { + let _: (TrivialStruct?) -> P2 = s +} + +// part of f290_convOptExistTriv: conversion between existential types - reabstraction thunk - optionals case +// --- +// CHECK-LABEL: sil shared [transparent] [serialized] [reabstraction_thunk] [ossa] @$ss1P_pSgs13TrivialStructVIegnd_ADSgs2P2_pIegyr_TR : $@convention(thin) (Optional, @guaranteed @callee_guaranteed (@in_guaranteed Optional

) -> TrivialStruct) -> @out P2 { +// CHECK: bb0([[ARG0:%.*]] : $Optional, [[ARG1:%.*]] : @guaranteed $@callee_guaranteed (@in_guaranteed Optional

) -> TrivialStruct): +// HECK: switch_enum [[ARG0]] : $Optional, case #Optional.some!enumelt: bb2, case #Optional.none!enumelt: bb1 +// HECK: bb1: +// HECK: [[ONONE:%.*]] = enum $Optional

, #Optional.none!enumelt +// HECK: br bb3([[ONONE]] : $Optional

) +// HECK: bb2([[OSOME:%.*]] : $TrivialStruct): +// HECK: [[INIT_S:%.*]] = init_existential_value [[OSOME]] : $TrivialStruct, $TrivialStruct, $P +// HECK: [[ENUM_S:%.*]] = enum $Optional

, #Optional.some!enumelt, [[INIT_S]] : $P +// HECK: br bb3([[ENUM_S]] : $Optional

) +// HECK: bb3([[OPT_S:%.*]] : $Optional

): +// HECK: [[BORROWED_OPT_S:%.*]] = begin_borrow [[OPT_S]] +// HECK: [[APPLY_P:%.*]] = apply [[ARG1]]([[BORROWED_OPT_S]]) : $@callee_guaranteed (@in_guaranteed Optional

) -> TrivialStruct +// HECK: [[RETVAL:%.*]] = init_existential_value [[APPLY_P]] : $TrivialStruct, $TrivialStruct, $P2 +// HECK: return [[RETVAL]] : $P2 +// CHECK-LABEL: } // end sil function '$ss1P_pSgs13TrivialStructVIegnd_ADSgs2P2_pIegyr_TR' + +// Tests corner-case: reabstraction of an empty tuple to any +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss20f300_convETupleToAnyyyyycF : $@convention(thin) (@guaranteed @callee_guaranteed () -> ()) -> () { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $@callee_guaranteed () -> ()): +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] +// HECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}([[COPY_ARG]]) : $@convention(thin) (@guaranteed @callee_guaranteed () -> ()) -> @out Any +// HECK: destroy_value [[PAPPLY]] : $@callee_guaranteed () -> @out Any +// CHECK-NOT: destroy_value [[ARG]] +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss20f300_convETupleToAnyyyyycF' +func f300_convETupleToAny(_ t: @escaping () -> ()) { + let _: () -> Any = t +} + +// f300_convETupleToAny continued Test: reabstraction of () to Any +// --- +// CHECK-LABEL: sil shared [transparent] [serialized] [reabstraction_thunk] [ossa] @$sIeg_ypIegr_TR : $@convention(thin) (@guaranteed @callee_guaranteed () -> ()) -> @out Any { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $@callee_guaranteed () -> ()): +// HECK: [[ASTACK:%.*]] = alloc_stack $Any +// HECK: [[IADDR:%.*]] = init_existential_addr [[ASTACK]] : $*Any, $() +// HECK: [[APPLYARG:%.*]] = apply [[ARG]]() : $@callee_guaranteed () -> () +// HECK: [[LOAD_EXIST:%.*]] = load [trivial] [[IADDR]] : $*() +// HECK: [[RETVAL:%.*]] = init_existential_value [[LOAD_EXIST]] : $(), $(), $Any +// HECK: return [[RETVAL]] : $Any +// CHECK-LABEL: } // end sil function '$sIeg_ypIegr_TR' + +// Tests corner-case: reabstraction of a non-empty tuple to any +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss21f310_convnIntTupleAnyyyBi64__Bi64_tycF : $@convention(thin) (@guaranteed @callee_guaranteed () -> (Builtin.Int64, Builtin.Int64)) -> () { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $@callee_guaranteed () -> (Builtin.Int64, Builtin.Int64)): +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] +// HECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}([[COPY_ARG]]) : $@convention(thin) (@guaranteed @callee_guaranteed () -> (Builtin.Int64, Builtin.Int64)) -> @out Any +// HECK: destroy_value [[PAPPLY]] : $@callee_guaranteed () -> @out Any +// CHECK-NOT: destroy_value [[ARG]] +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss21f310_convnIntTupleAnyyyBi64__Bi64_tycF' +func f310_convnIntTupleAny(_ t: @escaping () -> (Builtin.Int64, Builtin.Int64)) { + let _: () -> Any = t +} + +// f310_convIntTupleAny continued Test: reabstraction of non-empty tuple to Any +// --- +// CHECK-LABEL: sil shared [transparent] [serialized] [reabstraction_thunk] [ossa] @$sBi64_Bi64_Iegdd_ypIegr_TR : $@convention(thin) (@guaranteed @callee_guaranteed () -> (Builtin.Int64, Builtin.Int64)) -> @out Any { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $@callee_guaranteed () -> (Builtin.Int64, Builtin.Int64)): +// HECK: [[ASTACK:%.*]] = alloc_stack $Any +// HECK: [[IADDR:%.*]] = init_existential_addr [[ASTACK]] : $*Any, $(Builtin.Int64, Builtin.Int64) +// HECK: [[TADDR0:%.*]] = tuple_element_addr [[IADDR]] : $*(Builtin.Int64, Builtin.Int64), 0 +// HECK: [[TADDR1:%.*]] = tuple_element_addr [[IADDR]] : $*(Builtin.Int64, Builtin.Int64), 1 +// HECK: [[APPLYARG:%.*]] = apply [[ARG]]() : $@callee_guaranteed () -> (Builtin.Int64, Builtin.Int64) +// HECK: [[TEXTRACT0:%.*]] = tuple_extract [[APPLYARG]] : $(Builtin.Int64, Builtin.Int64), 0 +// HECK: [[TEXTRACT1:%.*]] = tuple_extract [[APPLYARG]] : $(Builtin.Int64, Builtin.Int64), 1 +// HECK: store [[TEXTRACT0]] to [trivial] [[TADDR0]] : $*Builtin.Int64 +// HECK: store [[TEXTRACT1]] to [trivial] [[TADDR1]] : $*Builtin.Int64 +// HECK: [[LOAD_EXIST:%.*]] = load [trivial] [[IADDR]] : $*(Builtin.Int64, Builtin.Int64) +// HECK: [[RETVAL:%.*]] = init_existential_value [[LOAD_EXIST]] : $(Builtin.Int64, Builtin.Int64), $(Builtin.Int64, Builtin.Int64), $Any +// HECK: dealloc_stack [[ASTACK]] : $*Any +// HECK: return [[RETVAL]] : $Any +// CHECK-LABEL: } // end sil function '$sBi64_Bi64_Iegdd_ypIegr_TR' + +// Tests translating and imploding into Any under opaque value mode +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss20f320_transImplodeAnyyyyypcF : $@convention(thin) (@guaranteed @callee_guaranteed (@in_guaranteed Any) -> ()) -> () { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $@callee_guaranteed (@in_guaranteed Any) -> ()): +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] +// HECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}([[COPY_ARG]]) : $@convention(thin) (Builtin.Int64, Builtin.Int64, @guaranteed @callee_guaranteed (@in_guaranteed Any) -> ()) -> () +// HECK: destroy_value [[PAPPLY]] : $@callee_guaranteed (Builtin.Int64, Builtin.Int64) -> () +// CHECK-NOT: destroy_value [[ARG]] +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss20f320_transImplodeAnyyyyypcF' +func f320_transImplodeAny(_ t: @escaping (Any) -> ()) { + let _: ((Builtin.Int64, Builtin.Int64)) -> () = t +} + +// f320_transImplodeAny continued Test: reabstraction thunk +// --- +// CHECK-LABEL: sil shared [transparent] [serialized] [reabstraction_thunk] [ossa] @$sypIegn_Bi64_Bi64_Iegyy_TR : $@convention(thin) (Builtin.Int64, Builtin.Int64, @guaranteed @callee_guaranteed (@in_guaranteed Any) -> ()) -> () { +// CHECK: bb0([[ARG0:%.*]] : $Builtin.Int64, [[ARG1:%.*]] : $Builtin.Int64, [[ARG2:%.*]] : @guaranteed $@callee_guaranteed (@in_guaranteed Any) -> ()): +// HECK: [[ASTACK:%.*]] = alloc_stack $Any +// HECK: [[IADDR:%.*]] = init_existential_addr [[ASTACK]] : $*Any, $(Builtin.Int64, Builtin.Int64) +// HECK: [[TADDR0:%.*]] = tuple_element_addr [[IADDR]] : $*(Builtin.Int64, Builtin.Int64), 0 +// HECK: store [[ARG0]] to [trivial] [[TADDR0]] : $*Builtin.Int64 +// HECK: [[TADDR1:%.*]] = tuple_element_addr [[IADDR]] : $*(Builtin.Int64, Builtin.Int64), 1 +// HECK: store [[ARG1]] to [trivial] [[TADDR1]] : $*Builtin.Int64 +// HECK: [[LOAD_EXIST:%.*]] = load [trivial] [[IADDR]] : $*(Builtin.Int64, Builtin.Int64) +// HECK: [[INIT_OPAQUE:%.*]] = init_existential_value [[LOAD_EXIST]] : $(Builtin.Int64, Builtin.Int64), $(Builtin.Int64, Builtin.Int64), $Any +// HECK: [[BORROWED_INIT_OPAQUE:%.*]] = begin_borrow [[INIT_OPAQUE]] +// HECK: [[APPLYARG:%.*]] = apply [[ARG2]]([[BORROWED_INIT_OPAQUE]]) : $@callee_guaranteed (@in_guaranteed Any) -> () +// HECK: dealloc_stack [[ASTACK]] : $*Any +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$sypIegn_Bi64_Bi64_Iegyy_TR' + +// Tests support for address only let closures under opaque value mode - they are not by-address anymore +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss19f330_addrLetClosureyxxlF : $@convention(thin) (@in_guaranteed T) -> @out T { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $T): +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] : $T +// HECK: return [[COPY_ARG]] : $T +// CHECK-LABEL: } // end sil function '$ss19f330_addrLetClosureyxxlF' +func f330_addrLetClosure(_ x:T) -> T { + return { { x }() }() +} + +// Tests support for capture of a mutable opaque value type +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss15f340_captureBoxyyF : $@convention(thin) () -> () { +// CHECK: bb0: +// HECK: [[ALLOC_OF_BOX:%.*]] = alloc_box ${ var EmptyP }, var, name "mutableAddressOnly" +// HECK: [[PROJ_BOX:%.*]] = project_box [[ALLOC_OF_BOX]] +// HECK: [[APPLY_FOR_BOX:%.*]] = apply %{{.*}}(%{{.*}}) : $@convention(method) (@thin AddressOnlyStruct.Type) -> AddressOnlyStruct +// HECK: [[INIT_OPAQUE:%.*]] = init_existential_value [[APPLY_FOR_BOX]] : $AddressOnlyStruct, $AddressOnlyStruct, $EmptyP +// HECK: store [[INIT_OPAQUE]] to [init] [[PROJ_BOX]] : $*EmptyP +// HECK: [[BORROW_BOX:%.*]] = begin_borrow [[ALLOC_OF_BOX]] : ${ var EmptyP } +// HECK: mark_function_escape [[PROJ_BOX]] : $*EmptyP +// HECK: apply %{{.*}}([[BORROW_BOX]]) : $@convention(thin) (@guaranteed { var EmptyP }) -> () +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss15f340_captureBoxyyF' +func f340_captureBox() { + var mutableAddressOnly: EmptyP = AddressOnlyStruct() + + func captureEverything() { + genericInout(&mutableAddressOnly) + } + + captureEverything() +} + +// Tests support for guards and indirect enums for opaque values +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss14f360_guardEnumyys08IndirectC0OyxGlF : $@convention(thin) (@guaranteed IndirectEnum) -> () { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $IndirectEnum): +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] +// HECK: switch_enum [[COPY_ARG]] : $IndirectEnum, case #IndirectEnum.Node!enumelt: [[NODE_BB:bb[0-9]+]], case #IndirectEnum.Nil!enumelt: [[NIL_BB:bb[0-9]+]] +// +// HECK: [[NIL_BB]]: +// HECK: br [[NIL_TRAMPOLINE:bb[0-9]+]] +// +// HECK: [[NIL_TRAMPOLINE]]: +// HECK: br [[EPILOG_BB:bb[0-9]+]] +// +// HECK: [[NODE_BB]]([[EARG:%.*]] : $<τ_0_0> { var τ_0_0 } ): +// HECK: [[PROJ_BOX:%.*]] = project_box [[EARG]] +// HECK: [[LOAD_BOX:%.*]] = load [take] [[PROJ_BOX]] : $*T +// HECK: [[COPY_BOX:%.*]] = copy_value [[LOAD_BOX]] : $T +// HECK: destroy_value [[EARG]] +// HECK: br [[CONT_BB:bb[0-9]+]] +// +// HECK: [[CONT_BB]]: +// HECK: destroy_value [[COPY_BOX]] +// HECK: br [[EPILOG_BB]] +// +// HECK: [[EPILOG_BB]]: +// CHECK-NOT: destroy_value [[ARG]] +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss14f360_guardEnumyys08IndirectC0OyxGlF' +func f360_guardEnum(_ e: IndirectEnum) { + do { + guard case .Node(let x) = e else { return } + _ = x + } +} + +// Tests contextual init() of opaque value types +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss17f370_optToOptCastyxSgABlF : $@convention(thin) (@in_guaranteed Optional) -> @out Optional { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $Optional): +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] +// CHECK-NOT: destroy_value [[ARG]] +// HECK: return [[COPY_ARG]] : $Optional +// CHECK-LABEL: } // end sil function '$ss17f370_optToOptCastyxSgABlF' +func f370_optToOptCast(_ x : T!) -> T? { + return x +} + +// Tests casting optional opaques to optional opaques +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss19f380_contextualInityyBi64_SgF : $@convention(thin) (Optional) -> () { +// CHECK: bb0([[ARG:%.*]] : $Optional): +// HECK: [[ALLOC_OF_BOX:%.*]] = alloc_box ${ var Optional }, var +// HECK: [[PROJ_BOX:%.*]] = project_box [[ALLOC_OF_BOX]] +// HECK: store [[ARG]] to [trivial] [[PROJ_BOX]] : $*Optional +// HECK: destroy_value [[ALLOC_OF_BOX]] +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss19f380_contextualInityyBi64_SgF' +func f380_contextualInit(_ a : Builtin.Int64?) { + var x: Builtin.Int64? = a + genericInout(&x) + _ = x +} + +// Tests opaque call result types +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss19f390_addrCallResultyyxycSglF : $@convention(thin) (@guaranteed Optional<@callee_guaranteed @substituted <τ_0_0> () -> @out τ_0_0 for >) -> () { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $Optional<@callee_guaranteed @substituted <τ_0_0> () -> @out τ_0_0 for >): +// HECK: [[ALLOC_OF_BOX:%.*]] = alloc_box $<τ_0_0> { var Optional<τ_0_0> } +// HECK: [[PROJ_BOX:%.*]] = project_box [[ALLOC_OF_BOX]] +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] +// HECK: [[SENUM:%.*]] = select_enum [[COPY_ARG]] +// HECK: cond_br [[SENUM]], bb3, bb1 +// HECK: bb1: +// HECK: br bb2 +// HECK: bb2: +// HECK: [[ONONE:%.*]] = enum $Optional, #Optional.none!enumelt +// HECK: br bb4([[ONONE]] : $Optional) +// HECK: bb4(%{{.*}} : $Optional): +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss19f390_addrCallResultyyxycSglF' +func f390_addrCallResult(_ f: (() -> T)?) { + var x = f?() + genericInout(&x) + _ = x +} + +// Tests reabstraction / partial apply of protocols under opaque value mode +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss16f400_maybeCloneP1cys8Clonable_p_tF : $@convention(thin) (@in_guaranteed Clonable) -> () { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $Clonable): +// HECK: [[OPEN_ARG:%.*]] = open_existential_value [[ARG]] : $Clonable +// HECK: [[APPLY_OPAQUE:%.*]] = apply %{{.*}}<@opened({{.*}}) Clonable>([[OPEN_ARG]]) : $@convention(thin) <τ_0_0 where τ_0_0 : Clonable> (@in_guaranteed τ_0_0) -> @owned @callee_guaranteed () -> @out Optional<τ_0_0> +// HECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}<@opened({{.*}}) Clonable>([[APPLY_OPAQUE]]) : $@convention(thin) <τ_0_0 where τ_0_0 : Clonable> (@guaranteed @callee_guaranteed () -> @out Optional<τ_0_0>) -> @out Optional +// CHECK-NOT: destroy_value [[ARG]] +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss16f400_maybeCloneP1cys8Clonable_p_tF' +func f400_maybeCloneP(c: Clonable) { + let _: () -> Clonable? = c.maybeClone +} + +// Tests global opaque values / subscript rvalues +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss20f410_globalRvalueGetyBi64_Bi64_F : $@convention(thin) (Builtin.Int64) -> Builtin.Int64 { +// CHECK: bb0([[ARG:%.*]] : $Builtin.Int64): +// HECK: [[GLOBAL_ADDR:%.*]] = global_addr @$s20opaque_values_silgen16subscriptableGetAA013SubscriptableE0_pvp : $*SubscriptableGet +// HECK: [[READ:%.*]] = begin_access [read] [dynamic] [[GLOBAL_ADDR]] : $*SubscriptableGet +// HECK: [[OPEN_ARG:%.*]] = open_existential_addr immutable_access [[READ]] : $*SubscriptableGet to $*@opened +// HECK: [[GET_OPAQUE:%.*]] = load [copy] [[OPEN_ARG]] : $*@opened +// HECK: [[RETVAL:%.*]] = apply %{{.*}}<@opened({{.*}}) SubscriptableGet>([[ARG]], [[GET_OPAQUE]]) : $@convention(witness_method: SubscriptableGet) <τ_0_0 where τ_0_0 : SubscriptableGet> (Builtin.Int64, @in_guaranteed τ_0_0) -> Builtin.Int64 +// HECK: destroy_value [[GET_OPAQUE]] +// HECK: return [[RETVAL]] : $Builtin.Int64 +// CHECK-LABEL: } // end sil function '$ss20f410_globalRvalueGetyBi64_Bi64_F' +func f410_globalRvalueGet(_ i : Builtin.Int64) -> Builtin.Int64 { + return subscriptableGet![i] +} + +// Tests global opaque values / subscript lvalues +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss20f420_globalLvalueGetyBi64_SgBi64_F : $@convention(thin) (Builtin.Int64) -> Optional { +// CHECK: bb0([[ARG:%.*]] : $Builtin.Int64): +// HECK: [[GLOBAL_ADDR:%.*]] = global_addr @$s20opaque_values_silgen19subscriptableGetSetAA013SubscriptableeF0_pvp : $*SubscriptableGetSet +// HECK: [[READ:%.*]] = begin_access [read] [dynamic] [[GLOBAL_ADDR]] : $*SubscriptableGetSet +// HECK: [[OPEN_ARG:%.*]] = open_existential_addr immutable_access [[READ]] : $*SubscriptableGetSet to $*@opened +// HECK: [[GET_OPAQUE:%.*]] = load [copy] [[OPEN_ARG]] : $*@opened +// HECK: [[RETVAL:%.*]] = apply %{{.*}}<@opened({{.*}}) SubscriptableGetSet>([[ARG]], [[GET_OPAQUE]]) : $@convention(witness_method: SubscriptableGetSet) <τ_0_0 where τ_0_0 : SubscriptableGetSet> (Builtin.Int64, @in_guaranteed τ_0_0) -> Builtin.Int64 +// HECK: destroy_value [[GET_OPAQUE]] +// HECK: return [[RETVAL]] : $Builtin.Int64 +// CHECK-LABEL: } // end sil function '$ss20f420_globalLvalueGetyBi64_SgBi64_F' +func f420_globalLvalueGet(_ i : Builtin.Int64) -> Builtin.Int64? { + return subscriptableGetSet![i] +} + +// Tests tuple transformation +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss21f430_callUnreachableF1tyx_tlF : $@convention(thin) (@in_guaranteed T) -> () { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $T): +// HECK: [[APPLY_T:%.*]] = apply %{{.*}}<((T) -> (), T)>() : $@convention(thin) <τ_0_0> () -> @out Optional<(Builtin.Int64, τ_0_0)> +// HECK: switch_enum [[APPLY_T]] : $Optional<(Builtin.Int64, (@callee_guaranteed (@in_guaranteed T) -> @out (), T))>, case #Optional.some!enumelt: bb2, case #Optional.none!enumelt: bb1 +// HECK: bb2([[ENUMARG:%.*]] : $(Builtin.Int64, (@callee_guaranteed (@in_guaranteed T) -> @out (), T))): +// HECK: ([[TELEM0:%.*]], [[TELEM1:%.*]]) = destructure_tuple [[ENUMARG]] : $(Builtin.Int64, (@callee_guaranteed (@in_guaranteed T) -> @out (), T)) +// HECK: ([[TELEM10:%.*]], [[TELEM11:%.*]]) = destructure_tuple [[TELEM1]] : $(@callee_guaranteed (@in_guaranteed T) -> @out (), T) +// HECK: [[PAPPLY:%.*]] = partial_apply [callee_guaranteed] %{{.*}}([[TELEM10]]) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0, @guaranteed @callee_guaranteed (@in_guaranteed τ_0_0) -> @out ()) -> () +// HECK: [[NEWT0:%.*]] = tuple ([[PAPPLY]] : $@callee_guaranteed (@in_guaranteed T) -> (), [[TELEM11]] : $T) +// HECK: [[NEWT1:%.*]] = tuple ([[TELEM0]] : $Builtin.Int64, [[NEWT0]] : $(@callee_guaranteed (@in_guaranteed T) -> (), T)) +// HECK: [[NEWENUM:%.*]] = enum $Optional<(Builtin.Int64, (@callee_guaranteed (@in_guaranteed T) -> (), T))>, #Optional.some!enumelt, [[NEWT1]] : $(Builtin.Int64, (@callee_guaranteed (@in_guaranteed T) -> (), T)) +// HECK: br bb3([[NEWENUM]] : $Optional<(Builtin.Int64, (@callee_guaranteed (@in_guaranteed T) -> (), T))>) +// HECK: bb3([[ENUMIN:%.*]] : $Optional<(Builtin.Int64, (@callee_guaranteed (@in_guaranteed T) -> (), T))>): +// HECK: destroy_value [[ENUMIN]] +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss21f430_callUnreachableF1tyx_tlF' +func f430_callUnreachableF(t: T) { + let _: (Builtin.Int64, ((T) -> (), T))? = unreachableF() +} + + +// Further testing for conditional checked cast under opaque value mode - make sure we don't create a buffer for results +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss20f440_cleanupEmissionyyxlF : $@convention(thin) (@in_guaranteed T) -> () { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $T): +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] +// HECK: checked_cast_value_br [[COPY_ARG]] : $T to $EmptyP, bb2, bb1 +// +// HECK: bb2([[PTYPE:%.*]] : $EmptyP): +// HECK: [[PSOME:%.*]] = enum $Optional, #Optional.some!enumelt, [[PTYPE]] : $EmptyP +// HECK: br bb3([[PSOME]] : $Optional) +// +// HECK: bb3([[ENUMRES:%.*]] : $Optional): +// HECK: switch_enum [[ENUMRES]] : $Optional, case #Optional.some!enumelt: [[SOME_BB:bb[0-9]+]], case #Optional.none!enumelt: [[NONE_BB:bb[0-9]+]] +// +// HECK: [[NONE_BB]]: +// HECK: br [[NONE_TRAMPOLINE:bb[0-9]+]] +// +// HECK: [[NONE_TRAMPOLINE]]: +// HECK: br [[EPILOG_BB:bb[0-9]+]] +// +// HECK: [[SOME_BB]]([[ENUMRES2:%.*]] : $EmptyP): +// HECK: br [[CONT_BB:bb[0-9]+]] +// +// HECK: [[CONT_BB]]: +// HECK: destroy_value [[ENUMRES2]] +// HECK: br [[EPILOG_BB]] +// +// HECK: [[EPILOG_BB]]: +// CHECK-NOT: destroy_value [[ARG]] +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss20f440_cleanupEmissionyyxlF' +func f440_cleanupEmission(_ x: T) { + guard let x2 = x as? EmptyP else { return } + _ = x2 +} + + +// Test emitNativeToCBridgedNonoptionalValue. +// --- +// CHECK-objc-LABEL: sil hidden [ossa] @$ss14f470_nativeToC7fromAnyyXlyp_tF : $@convention(thin) (@in_guaranteed Any) -> @owned AnyObject { +// CHECK-objc: bb0(%0 : $Any): +// CHECK-objc: [[BORROW:%.*]] = begin_borrow %0 : $Any +// CHECK-objc: [[SRC:%.*]] = copy_value [[BORROW]] : $Any +// CHECK-objc: [[OPEN:%.*]] = open_existential_opaque [[SRC]] : $Any to $@opened +// CHECK-objc: [[COPY:%.*]] = copy_value [[OPEN]] : $@opened +// CHECK-objc: [[F:%.*]] = function_ref @$sf27_bridgeAnythingToObjectiveCyyXlxlF : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> @owned AnyObject +// CHECK-objc: [[RET:%.*]] = apply [[F]]<@opened("{{.*}}") Any>([[COPY]]) : $@convention(thin) <τ_0_0> (@in_guaranteed τ_0_0) -> @owned AnyObject +// CHECK-objc: destroy_value [[SRC]] : $Any +// CHECK-objc: destroy_value %0 : $Any +// CHECK-objc: return [[RET]] : $AnyObject +// CHECK-objc-LABEL: } // end sil function '$ss14f470_nativeToC7fromAnyyXlyp_tF' +#if _runtime(_ObjC) +func f470_nativeToC(fromAny any: Any) -> AnyObject { + return any as AnyObject +} +#endif + + +// Test emitOpenExistential. +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss13f480_getError04someC0yps0C0_p_tF : $@convention(thin) (@guaranteed Error) -> @out Any { +// CHECK: bb0([[ARG:%.*]] : @guaranteed $Error): +// HECK: [[VAL:%.*]] = open_existential_box_value [[ARG]] : $Error to $@opened("{{.*}}") Error +// HECK: [[COPY:%.*]] = copy_value [[VAL]] : $@opened("{{.*}}") Error +// HECK: [[ANY:%.*]] = init_existential_value [[COPY]] : $@opened("{{.*}}") Error, $@opened("{{.*}}") Error, $Any +// CHECK-NOT: destroy_value [[ARG]] : $Error +// HECK: return [[ANY]] : $Any +// CHECK-LABEL: } // end sil function '$ss13f480_getError04someC0yps0C0_p_tF' +func f480_getError(someError: Error) -> Any { + return someError +} + +// Test visitBindOptionalExpr +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss15f500_getAnyHashys1P_pSgs14ConvertibleToP_pSgF : $@convention(thin) (@in_guaranteed Optional) -> @out Optional

{ +// CHECK: bb0(%0 : @guaranteed $Optional): +// HECK: [[COPY:%.*]] = copy_value [[ARG]] : $Optional +// HECK: [[DATA:%.*]] = unchecked_enum_data [[COPY]] : $Optional, #Optional.some!enumelt +// HECK: [[BORROW_DATA:%.*]] = begin_borrow [[DATA]] : $ConvertibleToP +// HECK: [[VAL:%.*]] = open_existential_value [[BORROW_DATA]] : $ConvertibleToP to $@opened("{{.*}}") ConvertibleToP +// HECK: [[WT:%.*]] = witness_method $@opened("{{.*}}") ConvertibleToP, #ConvertibleToP.asP : (Self) -> () -> P, [[VAL]] : $@opened("{{.*}}") ConvertibleToP : $@convention(witness_method: ConvertibleToP) <τ_0_0 where τ_0_0 : ConvertibleToP> (@in_guaranteed τ_0_0) -> @out P +// HECK: [[AS_P:%.*]] = apply [[WT]]<@opened("{{.*}}") ConvertibleToP>([[VAL]]) : $@convention(witness_method: ConvertibleToP) <τ_0_0 where τ_0_0 : ConvertibleToP> (@in_guaranteed τ_0_0) -> @out P +// HECK: [[ENUM:%.*]] = enum $Optional

, #Optional.some!enumelt, [[AS_P]] : $P +// HECK: destroy_value [[DATA]] : $ConvertibleToP +// HECK: br bb{{.*}}([[ENUM]] : $Optional

) +// HECK: } // end sil function '$ss15f500_getAnyHashys1P_pSgs14ConvertibleToP_pSgF' +func f500_getAnyHash(_ value: ConvertibleToP?) -> P? { + return value?.asP() +} +public protocol FooPP { + func foo() -> Self +} + +// Test emitting a protocol witness for a method (with @in_guaranteed self) on a dependent generic type. +// --- +// CHECK-LABEL: sil private [transparent] [thunk] [ossa] @$ss15f510_OpaqueSelfVyxGs5FooPPssADP3fooxyFTW : $@convention(witness_method: FooPP) <τ_0_0> (@in_guaranteed f510_OpaqueSelf<τ_0_0>) -> @out f510_OpaqueSelf<τ_0_0> { +// CHECK: bb0(%0 : @guaranteed $f510_OpaqueSelf<τ_0_0>): +// HECK: [[FN:%.*]] = function_ref @$s20opaque_values_silgen21f510_OpaqueSelfV3fooACyxGyF : $@convention(method) <τ_0_0> (@in_guaranteed f510_OpaqueSelf<τ_0_0>) -> @out f510_OpaqueSelf<τ_0_0> +// HECK: [[RESULT:%.*]] = apply [[FN]]<τ_0_0>(%0) : $@convention(method) <τ_0_0> (@in_guaranteed f510_OpaqueSelf<τ_0_0>) -> @out f510_OpaqueSelf<τ_0_0> +// HECK: return [[RESULT]] : $f510_OpaqueSelf<τ_0_0> +// CHECK-LABEL: } // end sil function '$ss15f510_OpaqueSelfVyxGs5FooPPssADP3fooxyFTW' +struct f510_OpaqueSelf : FooPP { + var x: Base + + func foo() -> f510_OpaqueSelf { + return self + } +} + +// Tests conditional value casts and correspondingly generated reabstraction thunk, with types +// --- +// CHECK-LABEL: sil hidden [ossa] @$ss17f520_condTFromAnyyyyp_xtlF : $@convention(thin) (@in_guaranteed Any, @in_guaranteed T) -> () { +// CHECK: bb0([[ARG0:%.*]] : @guaranteed $Any, [[ARG1:%.*]] : @guaranteed $T): +// HECK: [[COPY_ARG:%.*]] = copy_value [[ARG]] +// HECK: checked_cast_value_br [[COPY_ARG]] : $Any to $@callee_guaranteed (@in_guaranteed (Builtin.Int64, T)) -> @out (Builtin.Int64, T), bb2, bb1 +// HECK: bb2([[THUNK_PARAM:%.*]] : $@callee_guaranteed (@in_guaranteed (Builtin.Int64, T)) -> @out (Builtin.Int64, T)): +// HECK: [[THUNK_REF:%.*]] = function_ref @{{.*}} : $@convention(thin) <τ_0_0> (Builtin.Int64, @in_guaranteed τ_0_0, @guaranteed @callee_guaranteed (@in_guaranteed (Builtin.Int64, τ_0_0)) -> @out (Builtin.Int64, τ_0_0)) -> (Builtin.Int64, @out τ_0_0) +// HECK: partial_apply [callee_guaranteed] [[THUNK_REF]]([[THUNK_PARAM]]) +// CHECK: bb6: +// HECK: return %{{.*}} : $() +// CHECK-LABEL: } // end sil function '$ss17f520_condTFromAnyyyyp_xtlF' +func f520_condTFromAny(_ x: Any, _ y: T) { + if let f = x as? (Int64, T) -> (Int64, T) { + _ = f(Int64(42), y) + } +} + +// Make sure that we insert a destroy of the box even though we used an Builtin.Int64 type. +// CHECK-LABEL: sil [ossa] @$ss16f530_assignToVaryyF : $@convention(thin) () -> () { +// CHECK: bb0: +// HECK: [[Y_BOX:%.*]] = alloc_box ${ var Builtin.Int64 }, var, name "y" +// HECK: [[PROJECT_Y_BOX:%.*]] = project_box [[Y_BOX]] : ${ var Builtin.Int64 }, 0 +// HECK: [[X_BOX:%.*]] = alloc_box ${ var Any }, var, name "x" +// HECK: [[PROJECT_X_BOX:%.*]] = project_box [[X_BOX]] : ${ var Any }, 0 +// HECK: [[ACCESS_PROJECT_Y_BOX:%.*]] = begin_access [read] [unknown] [[PROJECT_Y_BOX]] : $*Builtin.Int64 +// HECK: [[Y:%.*]] = load [trivial] [[ACCESS_PROJECT_Y_BOX]] : $*Builtin.Int64 +// HECK: [[Y_ANY_FOR_X:%.*]] = init_existential_value [[Y]] : $Builtin.Int64, $Builtin.Int64, $Any +// HECK: store [[Y_ANY_FOR_X]] to [init] [[PROJECT_X_BOX]] +// HECK: [[ACCESS_PROJECT_Y_BOX:%.*]] = begin_access [read] [unknown] [[PROJECT_Y_BOX]] : $*Builtin.Int64 +// HECK: [[Y:%.*]] = load [trivial] [[ACCESS_PROJECT_Y_BOX]] : $*Builtin.Int64 +// HECK: [[Y_ANY_FOR_Z:%.*]] = init_existential_value [[Y]] : $Builtin.Int64, $Builtin.Int64, $Any +// HECK: destroy_value [[Y_ANY_FOR_Z]] +// HECK: destroy_value [[X_BOX]] +// HECK: destroy_value [[Y_BOX]] +// HECK: } // end sil function '$ss16f530_assignToVaryyF' +public func f530_assignToVar() { + var y: Int64 = 3 + var x: Any = y + let z: Any = y + genericInout(&y) + genericInout(&x) + _ = z +} + +// Test open_existential_value ownership +// --- +// CHECK-LABEL: sil [ossa] @$ss16f540_takeDecoder4fromBi1_s0C0_p_tKF : $@convention(thin) (@in_guaranteed Decoder) -> (Builtin.Int1, @error Error) { +// CHECK: bb0(%0 : @guaranteed $Decoder): +// HECK: [[OPENED:%.*]] = open_existential_value %0 : $Decoder to $@opened("{{.*}}") Decoder +// HECK: [[WT:%.*]] = witness_method $@opened("{{.*}}") Decoder, #Decoder.unkeyedContainer : (Self) -> () throws -> UnkeyedDecodingContainer, %3 : $@opened("{{.*}}") Decoder : $@convention(witness_method: Decoder) <τ_0_0 where τ_0_0 : Decoder> (@in_guaranteed τ_0_0) -> (@out UnkeyedDecodingContainer, @error Error) +// HECK: try_apply [[WT]]<@opened("{{.*}}") Decoder>([[OPENED]]) : $@convention(witness_method: Decoder) <τ_0_0 where τ_0_0 : Decoder> (@in_guaranteed τ_0_0) -> (@out UnkeyedDecodingContainer, @error Error), normal bb2, error bb1 +// +// CHECK:bb{{.*}}([[RET1:%.*]] : @owned $UnkeyedDecodingContainer): +// HECK: [[BORROW2:%.*]] = begin_borrow [lexical] [[RET1]] : $UnkeyedDecodingContainer +// HECK: [[OPENED2:%.*]] = open_existential_value [[BORROW2]] : $UnkeyedDecodingContainer to $@opened("{{.*}}") UnkeyedDecodingContainer +// HECK: [[WT2:%.*]] = witness_method $@opened("{{.*}}") UnkeyedDecodingContainer, #UnkeyedDecodingContainer.isAtEnd!getter : (Self) -> () -> Builtin.Int1, [[OPENED2]] : $@opened("{{.*}}") UnkeyedDecodingContainer : $@convention(witness_method: UnkeyedDecodingContainer) <τ_0_0 where τ_0_0 : UnkeyedDecodingContainer> (@in_guaranteed τ_0_0) -> Builtin.Int1 +// HECK: [[RET2:%.*]] = apply [[WT2]]<@opened("{{.*}}") UnkeyedDecodingContainer>([[OPENED2]]) : $@convention(witness_method: UnkeyedDecodingContainer) <τ_0_0 where τ_0_0 : UnkeyedDecodingContainer> (@in_guaranteed τ_0_0) -> Builtin.Int1 +// HECK: end_borrow [[BORROW2]] : $UnkeyedDecodingContainer +// HECK: destroy_value [[RET1]] : $UnkeyedDecodingContainer +// CHECK-NOT: destroy_value %0 : $Decoder +// HECK: return [[RET2]] : $Builtin.Int1 +// CHECK-LABEL: } // end sil function '$ss16f540_takeDecoder4fromBi1_s0C0_p_tKF' +public func f540_takeDecoder(from decoder: Decoder) throws -> Builtin.Int1 { + let container = try decoder.unkeyedContainer() + return container.isAtEnd +} diff --git a/test/SILGen/opaque_values_silgen_todo.swift b/test/SILGen/opaque_values_silgen_todo.swift deleted file mode 100644 index 24d10eb4a1b73..0000000000000 --- a/test/SILGen/opaque_values_silgen_todo.swift +++ /dev/null @@ -1,2 +0,0 @@ -// RUN: %target-swift-emit-silgen -enable-sil-opaque-values -emit-sorted-sil -Xllvm -sil-full-demangle %s | %FileCheck %s -// REQUIRES: EnableSILOpaqueValues diff --git a/test/SILGen/opaque_values_silgen_vtable.swift b/test/SILGen/opaque_values_silgen_vtable.swift new file mode 100644 index 0000000000000..0c3f59a8da4ff --- /dev/null +++ b/test/SILGen/opaque_values_silgen_vtable.swift @@ -0,0 +1,53 @@ +// RUN: %target-swift-emit-silgen -enable-sil-opaque-values -emit-sorted-sil -Xllvm -sil-full-demangle %s | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-runtime + +class OpaqueClass { + typealias ObnoxiousTuple = (T, (T.Type, (T) -> T)) + + func inAndOut(x: T) -> T { return x } + func variantOptionalityTuples(x: ObnoxiousTuple) -> ObnoxiousTuple? { return x } +} + +class OpaqueTupleClass: OpaqueClass<(U, U)> { + override func inAndOut(x: (U, U)) -> (U, U) { return x } +} + +class StillOpaqueClass: OpaqueClass { + override func variantOptionalityTuples(x: ObnoxiousTuple?) -> ObnoxiousTuple { return x! } +} + +// Test vtables - OpaqueTupleClass +// --- +// CHECK-LABEL: sil private [thunk] [ossa] @$s27opaque_values_silgen_vtable16OpaqueTupleClassC8inAndOut1xx_xtx_xt_tFAA0eG0CAdExx_tFTV : $@convention(method) <τ_0_0> (@in_guaranteed (τ_0_0, τ_0_0), @guaranteed OpaqueTupleClass<τ_0_0>) -> @out (τ_0_0, τ_0_0) { +// HECK: bb0([[ARG0:%.*]] : $(U, U), [[ARG1:%.*]] : $OpaqueTupleClass): +// HECK: ([[TELEM0:%.*]], [[TELEM1:%.*]]) = destructure_tuple [[ARG0]] : $(U, U) +// HECK: [[APPLY:%.*]] = apply {{.*}}([[TELEM0]], [[TELEM1]], [[ARG1]]) : $@convention(method) <τ_0_0> (@in_guaranteed τ_0_0, @in_guaranteed τ_0_0, @guaranteed OpaqueTupleClass<τ_0_0>) -> (@out τ_0_0, @out τ_0_0) +// HECK: [[BORROWED_CALL:%.*]] = begin_borrow [[APPLY]] +// HECK: [[BORROWED_CALL_EXT0:%.*]] = tuple_extract [[BORROWED_CALL]] : $(U, U), 0 +// HECK: [[RETVAL0:%.*]] = copy_value [[BORROWED_CALL_EXT0]] : $U +// HECK: [[BORROWED_CALL_EXT1:%.*]] = tuple_extract [[BORROWED_CALL]] : $(U, U), 1 +// HECK: [[RETVAL1:%.*]] = copy_value [[BORROWED_CALL_EXT1]] : $U +// HECK: end_borrow [[BORROWED_CALL]] +// HECK: [[RETVAL:%.*]] = tuple ([[RETVAL0]] : $U, [[RETVAL1]] : $U) +// HECK: return [[RETVAL]] +// CHECK-LABEL: } // end sil function '$s27opaque_values_silgen_vtable16OpaqueTupleClassC8inAndOut1xx_xtx_xt_tFAA0eG0CAdExx_tFTV' + +// Test vtables - StillOpaqueClass +// --- +// CHECK-LABEL: sil private [thunk] [ossa] @$s27opaque_values_silgen_vtable16StillOpaqueClassC24variantOptionalityTuples1xx_xm_xxcttx_xm_xxcttSg_tFAA0fG0CAdeFx_xm_xxctt_tFTV : $@convention(method) <τ_0_0> (@in_guaranteed τ_0_0, @thick τ_0_0.Type, @guaranteed @callee_guaranteed @substituted <τ_0_0, τ_0_1> (@in_guaranteed τ_0_0) -> @out τ_0_1 for <τ_0_0, τ_0_0>, @guaranteed StillOpaqueClass<τ_0_0>) -> @out Optional<(τ_0_0, (@thick τ_0_0.Type, @callee_guaranteed @substituted <τ_0_0, τ_0_1> (@in_guaranteed τ_0_0) -> @out τ_0_1 for <τ_0_0, τ_0_0>))> { +// HECK: bb0([[ARG0:%.*]] : $T, [[ARG1:%.*]] : $@thick T.Type, [[ARG2:%.*]] : $@callee_guaranteed (@in_guaranteed T) -> @out T, [[ARG3:%.*]] : $StillOpaqueClass): +// HECK: [[TELEM0:%.*]] = tuple ([[ARG1]] : $@thick T.Type, [[ARG2]] : $@callee_guaranteed (@in_guaranteed T) -> @out T) +// HECK: [[TELEM1:%.*]] = tuple ([[ARG0]] : $T, [[TELEM0]] : $(@thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T)) +// HECK: [[ENUMOPT0:%.*]] = enum $Optional<(T, (@thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T))>, #Optional.some!enumelt, [[TELEM1]] : $(T, (@thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T)) +// HECK: [[APPLY:%.*]] = apply {{.*}}([[ENUMOPT0]], [[ARG3]]) : $@convention(method) <τ_0_0> (@in_guaranteed Optional<(τ_0_0, (@thick τ_0_0.Type, @callee_guaranteed (@in_guaranteed τ_0_0) -> @out τ_0_0))>, @guaranteed StillOpaqueClass<τ_0_0>) -> (@out τ_0_0, @thick τ_0_0.Type, @owned @callee_guaranteed (@in_guaranteed τ_0_0) -> @out τ_0_0) +// HECK: [[BORROWED_T:%.*]] = begin_borrow [[APPLY]] +// HECK: [[BORROWED_T_EXT0:%.*]] = tuple_extract [[BORROWED_T]] : $(T, @thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T), 0 +// HECK: [[RETVAL0:%.*]] = copy_value [[BORROWED_T_EXT0]] +// HECK: [[BORROWED_T_EXT1:%.*]] = tuple_extract [[BORROWED_T]] : $(T, @thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T), 1 +// HECK: [[BORROWED_T_EXT2:%.*]] = tuple_extract [[BORROWED_T]] : $(T, @thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T), 2 +// HECK: [[RETVAL1:%.*]] = copy_value [[BORROWED_T_EXT2]] +// HECK: end_borrow [[BORROWED_T]] +// HECK: [[RETTUPLE0:%.*]] = tuple ([[BORROWED_T_EXT1]] : $@thick T.Type, [[RETVAL1]] : $@callee_guaranteed (@in_guaranteed T) -> @out T) +// HECK: [[RETTUPLE1:%.*]] = tuple ([[RETVAL0]] : $T, [[RETTUPLE0]] : $(@thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T)) +// HECK: [[RETVAL:%.*]] = enum $Optional<(T, (@thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T))>, #Optional.some!enumelt, [[RETTUPLE1]] : $(T, (@thick T.Type, @callee_guaranteed (@in_guaranteed T) -> @out T)) +// HECK: return [[RETVAL]] +// CHECK-LABEL: // end sil function '$s27opaque_values_silgen_vtable16StillOpaqueClassC24variantOptionalityTuples1xx_xm_xxcttx_xm_xxcttSg_tFAA0fG0CAdeFx_xm_xxctt_tFTV' From 636f3a7534603cc90b3ff33af6878e6aba447bd7 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Sun, 6 Mar 2022 19:35:53 -0800 Subject: [PATCH 76/88] Add a test file for combined SILGen + AddressLowering --- test/SILOptimizer/opaque_values_Onone.swift | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 test/SILOptimizer/opaque_values_Onone.swift diff --git a/test/SILOptimizer/opaque_values_Onone.swift b/test/SILOptimizer/opaque_values_Onone.swift new file mode 100644 index 0000000000000..648805e09a13a --- /dev/null +++ b/test/SILOptimizer/opaque_values_Onone.swift @@ -0,0 +1,10 @@ +// RUN: %target-swift-frontend -enable-sil-opaque-values -parse-as-library -emit-sil -Onone %s | %FileCheck %s + +// CHECK-LABEL: sil hidden @$s19opaque_values_Onone16generic_identity1txx_tlF : $@convention(thin) (@in_guaranteed T) -> @out T { +// CHECK: bb0(%0 : $*T, %1 : $*T): +// CHECK: debug_value %1 : $*T, let, name "t", argno 1 +// CHECK: copy_addr %1 to [initialization] %0 : $*T +// CHECK-LABEL: } // end sil function '$s19opaque_values_Onone16generic_identity1txx_tlF' +func generic_identity(t: T) -> T { + return t +} From fd9ebb58e76a3c6ee5aac63d725ff6ea20275ecb Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Sun, 20 Mar 2022 00:08:03 -0700 Subject: [PATCH 77/88] [SIL-opaque] Add address lowering test case. For borrowing a projection. --- test/SILOptimizer/address_lowering.sil | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/test/SILOptimizer/address_lowering.sil b/test/SILOptimizer/address_lowering.sil index a12914463e6ab..798dcae3f5981 100644 --- a/test/SILOptimizer/address_lowering.sil +++ b/test/SILOptimizer/address_lowering.sil @@ -806,6 +806,28 @@ bb0(%0 : @owned $(AnyObject, T)): return %tuple : $(AnyObject, T) } +// CHECK-LABEL: sil [ossa] @f126_testDestructureAndBorrow : $@convention(method) (@in (SI, I)) -> (@out Element, @out I) { +// CHECK: bb0(%0 : $*Element, %1 : $*I, %2 : $*(SI, I)): +// CHECK: [[SI:%.*]] = tuple_element_addr %2 : $*(SI, I), 0 +// CHECK: [[I:%.*]] = tuple_element_addr %2 : $*(SI, I), 1 +// CHECK: [[LD:%.*]] = load [trivial] [[I]] : $*I +// CHECK: [[E:%.*]] = struct_element_addr [[SI]] : $*SI, #SI.element +// CHECK: copy_addr [[E]] to [initialization] %0 : $*Element +// CHECK: destroy_addr [[SI]] : $*SI +// CHECK: store [[LD]] to [trivial] %1 : $*I +// CHECK-LABEL: } // end sil function 'f126_testDestructureAndBorrow' +sil [ossa] @f126_testDestructureAndBorrow : $@convention(method) (@in (SI, I)) -> (@out Element, @out I) { +bb0(%0 : @owned $(SI, I)): + (%si, %i) = destructure_tuple %0 : $(SI, I) + %borrow = begin_borrow %si : $SI + %element = struct_extract %borrow : $SI, #SI.element + %copy = copy_value %element : $Element + end_borrow %borrow : $SI + destroy_value %si : $SI + %tuple = tuple(%copy : $Element, %i : $I) + return %tuple : $(Element, I) +} + // CHECK-LABEL: sil [ossa] @f130_testReleaseValue : $@convention(thin) (@in T) -> () { // CHECK: bb0(%0 : $*T): // CHECK: destroy_addr %0 : $*T From 05fb8c79302f3cfc99175800b5c97eb497650244 Mon Sep 17 00:00:00 2001 From: Andrew Trick Date: Tue, 22 Mar 2022 17:02:45 -0700 Subject: [PATCH 78/88] Remove CheckedCastValue from Swift compiler sources. --- SwiftCompilerSources/Sources/SIL/Instruction.swift | 10 ---------- SwiftCompilerSources/Sources/SIL/Registration.swift | 2 -- 2 files changed, 12 deletions(-) diff --git a/SwiftCompilerSources/Sources/SIL/Instruction.swift b/SwiftCompilerSources/Sources/SIL/Instruction.swift index a1cc3b265e92d..f3b6d56d3805b 100644 --- a/SwiftCompilerSources/Sources/SIL/Instruction.swift +++ b/SwiftCompilerSources/Sources/SIL/Instruction.swift @@ -408,12 +408,6 @@ class UnconditionalCheckedCastInst : SingleValueInstruction, UnaryInstruction { public override var mayTrap: Bool { true } } -final public -class UnconditionalCheckedCastValueInst : SingleValueInstruction, - UnaryInstruction { - public override var mayTrap: Bool { true } -} - final public class ConvertFunctionInst : SingleValueInstruction, UnaryInstruction {} @@ -624,7 +618,3 @@ final public class CheckedCastBranchInst : TermInst, UnaryInstruction { final public class CheckedCastAddrBranchInst : TermInst, UnaryInstruction { } - -final public class CheckedCastValueBranchInst : TermInst, UnaryInstruction { -} - diff --git a/SwiftCompilerSources/Sources/SIL/Registration.swift b/SwiftCompilerSources/Sources/SIL/Registration.swift index c61148cfb5f17..a07f2d0027b66 100644 --- a/SwiftCompilerSources/Sources/SIL/Registration.swift +++ b/SwiftCompilerSources/Sources/SIL/Registration.swift @@ -92,7 +92,6 @@ public func registerSILClasses() { register(RefElementAddrInst.self) register(RefTailAddrInst.self) register(UnconditionalCheckedCastInst.self) - register(UnconditionalCheckedCastValueInst.self) register(ConvertFunctionInst.self) register(ThinToThickFunctionInst.self) register(ObjCExistentialMetatypeToObjectInst.self) @@ -140,5 +139,4 @@ public func registerSILClasses() { register(AwaitAsyncContinuationInst.self) register(CheckedCastBranchInst.self) register(CheckedCastAddrBranchInst.self) - register(CheckedCastValueBranchInst.self) } From cb3818ea1672da1c224784af5637f05cb199543b Mon Sep 17 00:00:00 2001 From: John McCall Date: Tue, 22 Mar 2022 18:25:54 -0400 Subject: [PATCH 79/88] [NFC] Split the basic target-layout logic into its own file --- include/swift/ABI/Metadata.h | 233 ++++---------------------- include/swift/ABI/TargetLayout.h | 187 +++++++++++++++++++++ include/swift/Remote/MetadataReader.h | 9 +- 3 files changed, 221 insertions(+), 208 deletions(-) create mode 100644 include/swift/ABI/TargetLayout.h diff --git a/include/swift/ABI/Metadata.h b/include/swift/ABI/Metadata.h index 946f511cd9795..c2183683a6026 100644 --- a/include/swift/ABI/Metadata.h +++ b/include/swift/ABI/Metadata.h @@ -33,6 +33,7 @@ #include "swift/Runtime/Once.h" #include "swift/ABI/MetadataValues.h" #include "swift/ABI/System.h" +#include "swift/ABI/TargetLayout.h" #include "swift/ABI/TrailingObjects.h" #include "swift/Basic/Malloc.h" #include "swift/Basic/FlaggedPointer.h" @@ -66,146 +67,6 @@ template class TargetEnumDescriptor; template class TargetStructDescriptor; template struct TargetGenericMetadataPattern; -template -struct RuntimeTarget; - -template <> -struct RuntimeTarget<4> { - using StoredPointer = uint32_t; - // To avoid implicit conversions from StoredSignedPointer to StoredPointer. - using StoredSignedPointer = struct { - uint32_t SignedValue; - }; - using StoredSize = uint32_t; - using StoredPointerDifference = int32_t; - static constexpr size_t PointerSize = 4; -}; - -template <> -struct RuntimeTarget<8> { - using StoredPointer = uint64_t; - // To avoid implicit conversions from StoredSignedPointer to StoredPointer. - using StoredSignedPointer = struct { - uint64_t SignedValue; - }; - using StoredSize = uint64_t; - using StoredPointerDifference = int64_t; - static constexpr size_t PointerSize = 8; -}; - -namespace reflection { - class FieldDescriptor; -} - -/// In-process native runtime target. -/// -/// For interactions in the runtime, this should be the equivalent of working -/// with a plain old pointer type. -struct InProcess { - static constexpr size_t PointerSize = sizeof(uintptr_t); - using StoredPointer = uintptr_t; - using StoredSignedPointer = uintptr_t; - using StoredSize = size_t; - using StoredPointerDifference = ptrdiff_t; - -#if SWIFT_OBJC_INTEROP - static constexpr bool ObjCInterop = true; - template - using TargetAnyClassMetadata = TargetAnyClassMetadataObjCInterop; -#else - static constexpr bool ObjCInterop = false; - template - using TargetAnyClassMetadata = TargetAnyClassMetadata; -#endif - template - using TargetClassMetadata = TargetClassMetadata>; - - static_assert(sizeof(StoredSize) == sizeof(StoredPointerDifference), - "target uses differently-sized size_t and ptrdiff_t"); - - template - using Pointer = T*; - - template - using SignedPointer = T; - - template - using FarRelativeDirectPointer = FarRelativeDirectPointer; - - template - using RelativeIndirectablePointer = - RelativeIndirectablePointer; - - template - using RelativeDirectPointer = RelativeDirectPointer; -}; - -/// Represents a pointer in another address space. -/// -/// This type should not have * or -> operators -- you must as a memory reader -/// to read the data at the stored address on your behalf. -template -struct ExternalPointer { - using StoredPointer = typename Runtime::StoredPointer; - StoredPointer PointerValue; -}; - -template struct WithObjCInterop { - using StoredPointer = typename Runtime::StoredPointer; - using StoredSignedPointer = typename Runtime::StoredSignedPointer; - using StoredSize = typename Runtime::StoredSize; - using StoredPointerDifference = typename Runtime::StoredPointerDifference; - static constexpr size_t PointerSize = Runtime::PointerSize; - static constexpr bool ObjCInterop = true; - template - using TargetAnyClassMetadata = TargetAnyClassMetadataObjCInterop; -}; - -template struct NoObjCInterop { - using StoredPointer = typename Runtime::StoredPointer; - using StoredSignedPointer = typename Runtime::StoredSignedPointer; - using StoredSize = typename Runtime::StoredSize; - using StoredPointerDifference = typename Runtime::StoredPointerDifference; - static constexpr size_t PointerSize = Runtime::PointerSize; - static constexpr bool ObjCInterop = false; - template - using TargetAnyClassMetadata = TargetAnyClassMetadata; -}; - -/// An external process's runtime target, which may be a different architecture, -/// and may or may not have Objective-C interoperability. -template -struct External { - using StoredPointer = typename Runtime::StoredPointer; - using StoredSignedPointer = typename Runtime::StoredSignedPointer; - using StoredSize = typename Runtime::StoredSize; - using StoredPointerDifference = typename Runtime::StoredPointerDifference; - template - using TargetAnyClassMetadata = - typename Runtime::template TargetAnyClassMetadata; - template - using TargetClassMetadata = TargetClassMetadata>; - - static constexpr size_t PointerSize = Runtime::PointerSize; - static constexpr bool ObjCInterop = Runtime::ObjCInterop; - const StoredPointer PointerValue; - - template - using Pointer = StoredPointer; - - template - using SignedPointer = StoredSignedPointer; - - template - using FarRelativeDirectPointer = StoredPointer; - - template - using RelativeIndirectablePointer = int32_t; - - template - using RelativeDirectPointer = int32_t; -}; - /// Template for branching on native pointer types versus external ones template class Pointee> using TargetMetadataPointer @@ -214,37 +75,28 @@ using TargetMetadataPointer template class Pointee> using ConstTargetMetadataPointer = typename Runtime::template Pointer>; - -template -using TargetPointer = typename Runtime::template Pointer; - -template -using TargetSignedPointer = typename Runtime::template SignedPointer; - -template -using ConstTargetPointer = typename Runtime::template Pointer; - - -template class Pointee, - bool Nullable = true> -using ConstTargetFarRelativeDirectPointer - = typename Runtime::template FarRelativeDirectPointer, - Nullable>; - -template -using TargetRelativeDirectPointer - = typename Runtime::template RelativeDirectPointer; - -template -using TargetRelativeIndirectablePointer - = typename Runtime::template RelativeIndirectablePointer; struct HeapObject; class WeakReference; struct UnownedReference; -template struct TargetMetadata; -using Metadata = TargetMetadata; +template +struct TargetAnyClassMetadataTypeImpl; +template +struct TargetAnyClassMetadataTypeImpl { + using type = TargetAnyClassMetadataObjCInterop; +}; +template +struct TargetAnyClassMetadataTypeImpl { + using type = TargetAnyClassMetadata; +}; +template +using TargetAnyClassMetadataType = + typename TargetAnyClassMetadataTypeImpl::type; + +template +using TargetClassMetadataType = + TargetClassMetadata>; /// The result of requesting type metadata. Generally the return value of /// a function. @@ -730,23 +582,13 @@ struct TargetMetadata { getTypeContextDescriptor() const { switch (getKind()) { case MetadataKind::Class: { - if (Runtime::ObjCInterop) { - const auto cls = static_cast> *>(this); - if (!cls->isTypeMetadata()) - return nullptr; - if (cls->isArtificialSubclass()) - return nullptr; - return cls->getDescription(); - } else { - const auto cls = static_cast> *>(this); - if (!cls->isTypeMetadata()) - return nullptr; - if (cls->isArtificialSubclass()) - return nullptr; - return cls->getDescription(); - } + const auto cls = + static_cast *>(this); + if (!cls->isTypeMetadata()) + return nullptr; + if (cls->isArtificialSubclass()) + return nullptr; + return cls->getDescription(); } case MetadataKind::Struct: case MetadataKind::Enum: @@ -763,7 +605,7 @@ struct TargetMetadata { /// Get the class object for this type if it has one, or return null if the /// type is not a class (or not a class with a class object). - const typename Runtime::template TargetClassMetadata * + const TargetClassMetadataType * getClassObject() const; /// Retrieve the generic arguments of this type, if it has any. @@ -1008,8 +850,7 @@ struct TargetClassMetadataBounds : TargetMetadataBounds { using TargetMetadataBounds::NegativeSizeInWords; using TargetMetadataBounds::PositiveSizeInWords; - using TargetClassMetadata = - typename Runtime::template TargetClassMetadata; + using TargetClassMetadata = TargetClassMetadataType; /// The offset from the address point of the metadata to the immediate /// members. @@ -1022,13 +863,10 @@ struct TargetClassMetadataBounds : TargetMetadataBounds { : TargetMetadataBounds{negativeSizeInWords, positiveSizeInWords}, ImmediateMembersOffset(immediateMembersOffset) {} - template - using TargetClassMetadataT = typename Runtime::template TargetClassMetadata; - /// Return the basic bounds of all Swift class metadata. /// The immediate members offset will not be meaningful. static constexpr TargetClassMetadataBounds forSwiftRootClass() { - using Metadata = FullMetadata>; + using Metadata = FullMetadata>; return forAddressPointAndSize(sizeof(typename Metadata::HeaderType), sizeof(Metadata)); } @@ -1071,8 +909,7 @@ template struct TargetAnyClassMetadata : public TargetHeapMetadata { using StoredPointer = typename Runtime::StoredPointer; using StoredSize = typename Runtime::StoredSize; - using TargetClassMetadata = - typename Runtime::template TargetClassMetadata; + using TargetClassMetadata = TargetClassMetadataType; protected: constexpr TargetAnyClassMetadata( @@ -1109,9 +946,7 @@ struct TargetAnyClassMetadataObjCInterop : public TargetAnyClassMetadata { using StoredPointer = typename Runtime::StoredPointer; using StoredSize = typename Runtime::StoredSize; - - using TargetClassMetadataObjCInterop = - swift::TargetClassMetadata>; + using TargetClassMetadataObjCInterop = TargetClassMetadataType; constexpr TargetAnyClassMetadataObjCInterop( TargetAnyClassMetadataObjCInterop *isa, @@ -1156,11 +991,7 @@ struct TargetAnyClassMetadataObjCInterop } }; -#if SWIFT_OBJC_INTEROP -using AnyClassMetadata = TargetAnyClassMetadataObjCInterop; -#else -using AnyClassMetadata = TargetAnyClassMetadata; -#endif +using AnyClassMetadata = TargetAnyClassMetadataType; using ClassIVarDestroyer = SWIFT_CC(swift) void(SWIFT_CONTEXT HeapObject *); @@ -2594,7 +2425,7 @@ class RelativeTargetProtocolDescriptorPointer { template struct TargetTypeReference { template - using TargetClassMetadata = typename T::template TargetClassMetadata; + using TargetClassMetadata = TargetClassMetadataType; union { /// A direct reference to a TypeContextDescriptor or ProtocolDescriptor. diff --git a/include/swift/ABI/TargetLayout.h b/include/swift/ABI/TargetLayout.h new file mode 100644 index 0000000000000..404428075834f --- /dev/null +++ b/include/swift/ABI/TargetLayout.h @@ -0,0 +1,187 @@ +//===--- TargetLayout.h - Target-parameterized layout support ---*- C++ -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// A lot of Swift's runtime structures need to be parsed by code +// that may not be running on the same target as the structures were +// emitted for. To facilitate this, we do two things in the definition +// of those structures: +// +// First, the structures are templated over the target runtime. +// +// Second, the layout of the structure is defined using types that +// are either fixed size for all targets or dependent on the layout +// of the structure. +// +// This file defines common templates, types, and typedefs for doing this +// layout. +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_ABI_TARGETLAYOUT_H +#define SWIFT_ABI_TARGETLAYOUT_H + +#include "swift/Runtime/Config.h" +#include "swift/Basic/RelativePointer.h" + +namespace swift { + +template +struct RuntimeTarget; + +template <> +struct RuntimeTarget<4> { + using StoredPointer = uint32_t; + // To avoid implicit conversions from StoredSignedPointer to StoredPointer. + using StoredSignedPointer = struct { + uint32_t SignedValue; + }; + using StoredSize = uint32_t; + using StoredPointerDifference = int32_t; + static constexpr size_t PointerSize = 4; +}; + +template <> +struct RuntimeTarget<8> { + using StoredPointer = uint64_t; + // To avoid implicit conversions from StoredSignedPointer to StoredPointer. + using StoredSignedPointer = struct { + uint64_t SignedValue; + }; + using StoredSize = uint64_t; + using StoredPointerDifference = int64_t; + static constexpr size_t PointerSize = 8; +}; + +namespace reflection { + class FieldDescriptor; +} + +/// In-process native runtime target. +/// +/// For interactions in the runtime, this should be the equivalent of working +/// with a plain old pointer type. +struct InProcess { + static constexpr size_t PointerSize = sizeof(uintptr_t); + using StoredPointer = uintptr_t; + using StoredSignedPointer = uintptr_t; + using StoredSize = size_t; + using StoredPointerDifference = ptrdiff_t; + +#if SWIFT_OBJC_INTEROP + static constexpr bool ObjCInterop = true; +#else + static constexpr bool ObjCInterop = false; +#endif + + static_assert(sizeof(StoredSize) == sizeof(StoredPointerDifference), + "target uses differently-sized size_t and ptrdiff_t"); + + template + using Pointer = T*; + + template + using SignedPointer = T; + + template + using FarRelativeDirectPointer = FarRelativeDirectPointer; + + template + using RelativeIndirectablePointer = + RelativeIndirectablePointer; + + template + using RelativeDirectPointer = RelativeDirectPointer; +}; + +/// Represents a pointer in another address space. +/// +/// This type should not have * or -> operators -- you must as a memory reader +/// to read the data at the stored address on your behalf. +template +struct ExternalPointer { + using StoredPointer = typename Runtime::StoredPointer; + StoredPointer PointerValue; +}; + +template struct WithObjCInterop { + using StoredPointer = typename Runtime::StoredPointer; + using StoredSignedPointer = typename Runtime::StoredSignedPointer; + using StoredSize = typename Runtime::StoredSize; + using StoredPointerDifference = typename Runtime::StoredPointerDifference; + static constexpr size_t PointerSize = Runtime::PointerSize; + static constexpr bool ObjCInterop = true; +}; + +template struct NoObjCInterop { + using StoredPointer = typename Runtime::StoredPointer; + using StoredSignedPointer = typename Runtime::StoredSignedPointer; + using StoredSize = typename Runtime::StoredSize; + using StoredPointerDifference = typename Runtime::StoredPointerDifference; + static constexpr size_t PointerSize = Runtime::PointerSize; + static constexpr bool ObjCInterop = false; +}; + +/// An external process's runtime target, which may be a different architecture, +/// and may or may not have Objective-C interoperability. +template +struct External { + using StoredPointer = typename Runtime::StoredPointer; + using StoredSignedPointer = typename Runtime::StoredSignedPointer; + using StoredSize = typename Runtime::StoredSize; + using StoredPointerDifference = typename Runtime::StoredPointerDifference; + + static constexpr size_t PointerSize = Runtime::PointerSize; + static constexpr bool ObjCInterop = Runtime::ObjCInterop; + const StoredPointer PointerValue; + + template + using Pointer = StoredPointer; + + template + using SignedPointer = StoredSignedPointer; + + template + using FarRelativeDirectPointer = StoredPointer; + + template + using RelativeIndirectablePointer = int32_t; + + template + using RelativeDirectPointer = int32_t; +}; + +template +using TargetPointer = typename Runtime::template Pointer; + +template +using TargetSignedPointer = typename Runtime::template SignedPointer; + +template +using ConstTargetPointer = typename Runtime::template Pointer; + +template class Pointee, + bool Nullable = true> +using ConstTargetFarRelativeDirectPointer + = typename Runtime::template FarRelativeDirectPointer, + Nullable>; + +template +using TargetRelativeDirectPointer + = typename Runtime::template RelativeDirectPointer; + +template +using TargetRelativeIndirectablePointer + = typename Runtime::template RelativeIndirectablePointer; + +} // end namespace swift + +#endif diff --git a/include/swift/Remote/MetadataReader.h b/include/swift/Remote/MetadataReader.h index 265cf2d67b9ec..63bbf9c0c8a6c 100644 --- a/include/swift/Remote/MetadataReader.h +++ b/include/swift/Remote/MetadataReader.h @@ -168,8 +168,7 @@ class MetadataReader { using StoredPointer = typename Runtime::StoredPointer; using StoredSignedPointer = typename Runtime::StoredSignedPointer; using StoredSize = typename Runtime::StoredSize; - using TargetClassMetadata = - typename Runtime::template TargetClassMetadata; + using TargetClassMetadata = TargetClassMetadataType; private: /// The maximum number of bytes to read when reading metadata. Anything larger @@ -1663,10 +1662,6 @@ class MetadataReader { return Reader->readString(RemoteAddress(namePtr), className); } - template - using TargetClassMetadataT = - typename Runtime::template TargetClassMetadata; - MetadataRef readMetadata(StoredPointer address) { auto cached = MetadataCache.find(address); if (cached != MetadataCache.end()) @@ -1681,7 +1676,7 @@ class MetadataReader { switch (getEnumeratedMetadataKind(KindValue)) { case MetadataKind::Class: - return _readMetadata(address); + return _readMetadata(address); case MetadataKind::Enum: return _readMetadata(address); From e2332a34bc8fda5bcc34de526440650dbd9dc6c7 Mon Sep 17 00:00:00 2001 From: John McCall Date: Tue, 22 Mar 2022 18:44:28 -0400 Subject: [PATCH 80/88] [NFC] Move ValueWitnessTable layout into its own file I've also target-ified EnumValueWitnessTable while I was at it. --- include/swift/ABI/Metadata.h | 200 +--------------- include/swift/ABI/ValueWitnessTable.h | 324 ++++++++++++++++++++++++++ include/swift/Runtime/Metadata.h | 88 ------- 3 files changed, 325 insertions(+), 287 deletions(-) create mode 100644 include/swift/ABI/ValueWitnessTable.h diff --git a/include/swift/ABI/Metadata.h b/include/swift/ABI/Metadata.h index c2183683a6026..a8fff1e36fc94 100644 --- a/include/swift/ABI/Metadata.h +++ b/include/swift/ABI/Metadata.h @@ -18,10 +18,6 @@ #define SWIFT_ABI_METADATA_H #include -#include -#include -#include -#include #include #include #include @@ -35,6 +31,7 @@ #include "swift/ABI/System.h" #include "swift/ABI/TargetLayout.h" #include "swift/ABI/TrailingObjects.h" +#include "swift/ABI/ValueWitnessTable.h" #include "swift/Basic/Malloc.h" #include "swift/Basic/FlaggedPointer.h" #include "swift/Basic/RelativePointer.h" @@ -147,201 +144,6 @@ struct MetadataDependency { template struct TargetProtocolConformanceDescriptor; -/// Storage for an arbitrary value. In C/C++ terms, this is an -/// 'object', because it is rooted in memory. -/// -/// The context dictates what type is actually stored in this object, -/// and so this type is intentionally incomplete. -/// -/// An object can be in one of two states: -/// - An uninitialized object has a completely unspecified state. -/// - An initialized object holds a valid value of the type. -struct OpaqueValue; - -/// A fixed-size buffer for local values. It is capable of owning -/// (possibly in side-allocated memory) the storage necessary -/// to hold a value of an arbitrary type. Because it is fixed-size, -/// it can be allocated in places that must be agnostic to the -/// actual type: for example, within objects of existential type, -/// or for local variables in generic functions. -/// -/// The context dictates its type, which ultimately means providing -/// access to a value witness table by which the value can be -/// accessed and manipulated. -/// -/// A buffer can directly store three pointers and is pointer-aligned. -/// Three pointers is a sweet spot for Swift, because it means we can -/// store a structure containing a pointer, a size, and an owning -/// object, which is a common pattern in code due to ARC. In a GC -/// environment, this could be reduced to two pointers without much loss. -/// -/// A buffer can be in one of three states: -/// - An unallocated buffer has a completely unspecified state. -/// - An allocated buffer has been initialized so that it -/// owns uninitialized value storage for the stored type. -/// - An initialized buffer is an allocated buffer whose value -/// storage has been initialized. -template -struct TargetValueBuffer { - TargetPointer PrivateData[NumWords_ValueBuffer]; -}; -using ValueBuffer = TargetValueBuffer; - -/// Can a value with the given size and alignment be allocated inline? -constexpr inline bool canBeInline(bool isBitwiseTakable, size_t size, - size_t alignment) { - return isBitwiseTakable && size <= sizeof(ValueBuffer) && - alignment <= alignof(ValueBuffer); -} - -template -constexpr inline bool canBeInline(bool isBitwiseTakable) { - return canBeInline(isBitwiseTakable, sizeof(T), alignof(T)); -} - -template struct TargetValueWitnessTable; -using ValueWitnessTable = TargetValueWitnessTable; - -template class TargetValueWitnessTypes; -using ValueWitnessTypes = TargetValueWitnessTypes; - -template -class TargetValueWitnessTypes { -public: - using StoredPointer = typename Runtime::StoredPointer; - -// Note that, for now, we aren't strict about 'const'. -#define WANT_ALL_VALUE_WITNESSES -#define DATA_VALUE_WITNESS(lowerId, upperId, type) -#define FUNCTION_VALUE_WITNESS(lowerId, upperId, returnType, paramTypes) \ - typedef returnType (*lowerId ## Unsigned) paramTypes; \ - typedef TargetSignedPointer lowerId; -#define MUTABLE_VALUE_TYPE TargetPointer -#define IMMUTABLE_VALUE_TYPE ConstTargetPointer -#define MUTABLE_BUFFER_TYPE TargetPointer -#define IMMUTABLE_BUFFER_TYPE ConstTargetPointer -#define TYPE_TYPE ConstTargetPointer -#define SIZE_TYPE StoredSize -#define INT_TYPE int -#define UINT_TYPE unsigned -#define VOID_TYPE void -#include "swift/ABI/ValueWitness.def" - - // Handle the data witnesses explicitly so we can use more specific - // types for the flags enums. - typedef size_t size; - typedef size_t stride; - typedef ValueWitnessFlags flags; - typedef uint32_t extraInhabitantCount; -}; - -struct TypeLayout; - -/// A value-witness table. A value witness table is built around -/// the requirements of some specific type. The information in -/// a value-witness table is intended to be sufficient to lay out -/// and manipulate values of an arbitrary type. -template struct TargetValueWitnessTable { - // For the meaning of all of these witnesses, consult the comments - // on their associated typedefs, above. - -#define WANT_ONLY_REQUIRED_VALUE_WITNESSES -#define VALUE_WITNESS(LOWER_ID, UPPER_ID) \ - typename TargetValueWitnessTypes::LOWER_ID LOWER_ID; -#define FUNCTION_VALUE_WITNESS(LOWER_ID, UPPER_ID, RET, PARAMS) \ - typename TargetValueWitnessTypes::LOWER_ID LOWER_ID; - -#include "swift/ABI/ValueWitness.def" - - using StoredSize = typename Runtime::StoredSize; - - /// Is the external type layout of this type incomplete? - bool isIncomplete() const { - return flags.isIncomplete(); - } - - /// Would values of a type with the given layout requirements be - /// allocated inline? - static bool isValueInline(bool isBitwiseTakable, StoredSize size, - StoredSize alignment) { - return (isBitwiseTakable && size <= sizeof(TargetValueBuffer) && - alignment <= alignof(TargetValueBuffer)); - } - - /// Are values of this type allocated inline? - bool isValueInline() const { - return flags.isInlineStorage(); - } - - /// Is this type POD? - bool isPOD() const { - return flags.isPOD(); - } - - /// Is this type bitwise-takable? - bool isBitwiseTakable() const { - return flags.isBitwiseTakable(); - } - - /// Return the size of this type. Unlike in C, this has not been - /// padded up to the alignment; that value is maintained as - /// 'stride'. - StoredSize getSize() const { - return size; - } - - /// Return the stride of this type. This is the size rounded up to - /// be a multiple of the alignment. - StoredSize getStride() const { - return stride; - } - - /// Return the alignment required by this type, in bytes. - StoredSize getAlignment() const { - return flags.getAlignment(); - } - - /// The alignment mask of this type. An offset may be rounded up to - /// the required alignment by adding this mask and masking by its - /// bit-negation. - /// - /// For example, if the type needs to be 8-byte aligned, the value - /// of this witness is 0x7. - StoredSize getAlignmentMask() const { - return flags.getAlignmentMask(); - } - - /// The number of extra inhabitants, that is, bit patterns that do not form - /// valid values of the type, in this type's binary representation. - unsigned getNumExtraInhabitants() const { - return extraInhabitantCount; - } - - /// Assert that this value witness table is an enum value witness table - /// and return it as such. - /// - /// This has an awful name because it's supposed to be internal to - /// this file. Code outside this file should use LLVM's cast/dyn_cast. - /// We don't want to use those here because we need to avoid accidentally - /// introducing ABI dependencies on LLVM structures. - const struct EnumValueWitnessTable *_asEVWT() const; - - /// Get the type layout record within this value witness table. - const TypeLayout *getTypeLayout() const { - return reinterpret_cast(&size); - } - - /// Check whether this metadata is complete. - bool checkIsComplete() const; - - /// "Publish" the layout of this type to other threads. All other stores - /// to the value witness table (including its extended header) should have - /// happened before this is called. - void publishLayout(const TypeLayout &layout); -}; - /// The header before a metadata object which appears on all type /// metadata. Note that heap metadata are not necessarily type /// metadata, even for objects of a heap type: for example, objects of diff --git a/include/swift/ABI/ValueWitnessTable.h b/include/swift/ABI/ValueWitnessTable.h new file mode 100644 index 0000000000000..ccc14c0bfb84e --- /dev/null +++ b/include/swift/ABI/ValueWitnessTable.h @@ -0,0 +1,324 @@ +//===--- ValueWitnessTable.h - Value witness table ABI ----------*- C++ -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// This files defines ValueWitnessTable, a structure attached to type +// metadata objects (class Metadata) which tells generic code (whether +// generated or in the Swift runtime) how to lay out, copy, move, and +// destroy values of the type. +// +// This class uses the target-layout infrastructure; ValueWitnessTable is +// a typedef for the in-process application of the layout. +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_ABI_VALUEWITNESSTABLE_H +#define SWIFT_ABI_VALUEWITNESSTABLE_H + +#include "swift/Runtime/Config.h" +#include "swift/ABI/TargetLayout.h" +#include "swift/ABI/MetadataValues.h" + +namespace swift { + +struct TypeLayout; +template struct TargetEnumValueWitnessTable; +template struct TargetMetadata; +using Metadata = TargetMetadata; + +/// Storage for an arbitrary value. In C/C++ terms, this is an +/// 'object', because it is rooted in memory. +/// +/// The context dictates what type is actually stored in this object, +/// and so this type is intentionally incomplete. +/// +/// An object can be in one of two states: +/// - An uninitialized object has a completely unspecified state. +/// - An initialized object holds a valid value of the type. +struct OpaqueValue; + +/// A fixed-size buffer for local values. It is capable of owning +/// (possibly in side-allocated memory) the storage necessary +/// to hold a value of an arbitrary type. Because it is fixed-size, +/// it can be allocated in places that must be agnostic to the +/// actual type: for example, within objects of existential type, +/// or for local variables in generic functions. +/// +/// The context dictates its type, which ultimately means providing +/// access to a value witness table by which the value can be +/// accessed and manipulated. +/// +/// A buffer can directly store three pointers and is pointer-aligned. +/// Three pointers is a sweet spot for Swift, because it means we can +/// store a structure containing a pointer, a size, and an owning +/// object, which is a common pattern in code due to ARC. In a GC +/// environment, this could be reduced to two pointers without much loss. +/// +/// A buffer can be in one of three states: +/// - An unallocated buffer has a completely unspecified state. +/// - An allocated buffer has been initialized so that it +/// owns uninitialized value storage for the stored type. +/// - An initialized buffer is an allocated buffer whose value +/// storage has been initialized. +template +struct TargetValueBuffer { + TargetPointer PrivateData[NumWords_ValueBuffer]; +}; +using ValueBuffer = TargetValueBuffer; + +/// Can a value with the given size and alignment be allocated inline? +constexpr inline bool canBeInline(bool isBitwiseTakable, size_t size, + size_t alignment) { + return isBitwiseTakable && size <= sizeof(ValueBuffer) && + alignment <= alignof(ValueBuffer); +} + +template +constexpr inline bool canBeInline(bool isBitwiseTakable) { + return canBeInline(isBitwiseTakable, sizeof(T), alignof(T)); +} + +template struct TargetValueWitnessTable; +using ValueWitnessTable = TargetValueWitnessTable; + +template class TargetValueWitnessTypes; +using ValueWitnessTypes = TargetValueWitnessTypes; + +template +class TargetValueWitnessTypes { +public: + using StoredPointer = typename Runtime::StoredPointer; + +// Note that, for now, we aren't strict about 'const'. +#define WANT_ALL_VALUE_WITNESSES +#define DATA_VALUE_WITNESS(lowerId, upperId, type) +#define FUNCTION_VALUE_WITNESS(lowerId, upperId, returnType, paramTypes) \ + typedef returnType (*lowerId ## Unsigned) paramTypes; \ + typedef TargetSignedPointer lowerId; +#define MUTABLE_VALUE_TYPE TargetPointer +#define IMMUTABLE_VALUE_TYPE ConstTargetPointer +#define MUTABLE_BUFFER_TYPE TargetPointer +#define IMMUTABLE_BUFFER_TYPE ConstTargetPointer +#define TYPE_TYPE ConstTargetPointer +#define SIZE_TYPE StoredSize +#define INT_TYPE int +#define UINT_TYPE unsigned +#define VOID_TYPE void +#include "swift/ABI/ValueWitness.def" + + // Handle the data witnesses explicitly so we can use more specific + // types for the flags enums. + typedef size_t size; + typedef size_t stride; + typedef ValueWitnessFlags flags; + typedef uint32_t extraInhabitantCount; +}; + +/// A value-witness table. A value witness table is built around +/// the requirements of some specific type. The information in +/// a value-witness table is intended to be sufficient to lay out +/// and manipulate values of an arbitrary type. +template struct TargetValueWitnessTable { + // For the meaning of all of these witnesses, consult the comments + // on their associated typedefs, above. + +#define WANT_ONLY_REQUIRED_VALUE_WITNESSES +#define VALUE_WITNESS(LOWER_ID, UPPER_ID) \ + typename TargetValueWitnessTypes::LOWER_ID LOWER_ID; +#define FUNCTION_VALUE_WITNESS(LOWER_ID, UPPER_ID, RET, PARAMS) \ + typename TargetValueWitnessTypes::LOWER_ID LOWER_ID; + +#include "swift/ABI/ValueWitness.def" + + using StoredSize = typename Runtime::StoredSize; + + /// Is the external type layout of this type incomplete? + bool isIncomplete() const { + return flags.isIncomplete(); + } + + /// Would values of a type with the given layout requirements be + /// allocated inline? + static bool isValueInline(bool isBitwiseTakable, StoredSize size, + StoredSize alignment) { + return (isBitwiseTakable && size <= sizeof(TargetValueBuffer) && + alignment <= alignof(TargetValueBuffer)); + } + + /// Are values of this type allocated inline? + bool isValueInline() const { + return flags.isInlineStorage(); + } + + /// Is this type POD? + bool isPOD() const { + return flags.isPOD(); + } + + /// Is this type bitwise-takable? + bool isBitwiseTakable() const { + return flags.isBitwiseTakable(); + } + + /// Return the size of this type. Unlike in C, this has not been + /// padded up to the alignment; that value is maintained as + /// 'stride'. + StoredSize getSize() const { + return size; + } + + /// Return the stride of this type. This is the size rounded up to + /// be a multiple of the alignment. + StoredSize getStride() const { + return stride; + } + + /// Return the alignment required by this type, in bytes. + StoredSize getAlignment() const { + return flags.getAlignment(); + } + + /// The alignment mask of this type. An offset may be rounded up to + /// the required alignment by adding this mask and masking by its + /// bit-negation. + /// + /// For example, if the type needs to be 8-byte aligned, the value + /// of this witness is 0x7. + StoredSize getAlignmentMask() const { + return flags.getAlignmentMask(); + } + + /// The number of extra inhabitants, that is, bit patterns that do not form + /// valid values of the type, in this type's binary representation. + unsigned getNumExtraInhabitants() const { + return extraInhabitantCount; + } + + /// Assert that this value witness table is an enum value witness table + /// and return it as such. + /// + /// This has an awful name because it's supposed to be internal to + /// this file. Code outside this file should use LLVM's cast/dyn_cast. + /// We don't want to use those here because we need to avoid accidentally + /// introducing ABI dependencies on LLVM structures. + const TargetEnumValueWitnessTable *_asEVWT() const; + + /// Get the type layout record within this value witness table. + const TypeLayout *getTypeLayout() const { + return reinterpret_cast(&size); + } + + /// Check whether this metadata is complete. + bool checkIsComplete() const; + + /// "Publish" the layout of this type to other threads. All other stores + /// to the value witness table (including its extended header) should have + /// happened before this is called. + void publishLayout(const TypeLayout &layout); +}; + +/// A value-witness table with enum entry points. +/// These entry points are available only if the HasEnumWitnesses flag bit is +/// set in the 'flags' field. +template +struct TargetEnumValueWitnessTable : TargetValueWitnessTable { +#define WANT_ONLY_ENUM_VALUE_WITNESSES +#define VALUE_WITNESS(LOWER_ID, UPPER_ID) \ + ValueWitnessTypes::LOWER_ID LOWER_ID; +#define FUNCTION_VALUE_WITNESS(LOWER_ID, UPPER_ID, RET, PARAMS) \ + ValueWitnessTypes::LOWER_ID LOWER_ID; + +#include "swift/ABI/ValueWitness.def" + + constexpr TargetEnumValueWitnessTable() + : TargetValueWitnessTable{}, + getEnumTag(nullptr), + destructiveProjectEnumData(nullptr), + destructiveInjectEnumTag(nullptr) {} + constexpr TargetEnumValueWitnessTable( + const TargetValueWitnessTable &base, + ValueWitnessTypes::getEnumTagUnsigned getEnumTag, + ValueWitnessTypes::destructiveProjectEnumDataUnsigned + destructiveProjectEnumData, + ValueWitnessTypes::destructiveInjectEnumTagUnsigned + destructiveInjectEnumTag) + : TargetValueWitnessTable(base), + getEnumTag(getEnumTag), + destructiveProjectEnumData(destructiveProjectEnumData), + destructiveInjectEnumTag(destructiveInjectEnumTag) {} + + static bool classof(const TargetValueWitnessTable *table) { + return table->flags.hasEnumWitnesses(); + } +}; +using EnumValueWitnessTable = + TargetEnumValueWitnessTable; + +template +inline const TargetEnumValueWitnessTable * +TargetValueWitnessTable::_asEVWT() const { + assert(TargetEnumValueWitnessTable::classof(this)); + return static_cast *>(this); +} + +/// A type layout record. This is the subset of the value witness table that is +/// necessary to perform dependent layout of generic value types. It excludes +/// the value witness functions and includes only the size, alignment, +/// extra inhabitants, and miscellaneous flags about the type. +struct TypeLayout { + ValueWitnessTypes::size size; + ValueWitnessTypes::stride stride; + ValueWitnessTypes::flags flags; + ValueWitnessTypes::extraInhabitantCount extraInhabitantCount; + +private: + void _static_assert_layout(); +public: + TypeLayout() = default; + constexpr TypeLayout(ValueWitnessTypes::size size, + ValueWitnessTypes::stride stride, + ValueWitnessTypes::flags flags, + ValueWitnessTypes::extraInhabitantCount xiCount) + : size(size), stride(stride), flags(flags), extraInhabitantCount(xiCount) {} + + const TypeLayout *getTypeLayout() const { return this; } + + /// The number of extra inhabitants, that is, bit patterns that do not form + /// valid values of the type, in this type's binary representation. + unsigned getNumExtraInhabitants() const { + return extraInhabitantCount; + } + + bool hasExtraInhabitants() const { + return extraInhabitantCount != 0; + } +}; + +inline void TypeLayout::_static_assert_layout() { + #define CHECK_TYPE_LAYOUT_OFFSET(FIELD) \ + static_assert(offsetof(ValueWitnessTable, FIELD) \ + - offsetof(ValueWitnessTable, size) \ + == offsetof(TypeLayout, FIELD), \ + "layout of " #FIELD " in TypeLayout doesn't match " \ + "value witness table") + CHECK_TYPE_LAYOUT_OFFSET(size); + CHECK_TYPE_LAYOUT_OFFSET(flags); + CHECK_TYPE_LAYOUT_OFFSET(extraInhabitantCount); + CHECK_TYPE_LAYOUT_OFFSET(stride); + + #undef CHECK_TYPE_LAYOUT_OFFSET +} + +} // end namespace swift + +#endif diff --git a/include/swift/Runtime/Metadata.h b/include/swift/Runtime/Metadata.h index 18f54d5f2c35e..b6007d71ac81a 100644 --- a/include/swift/Runtime/Metadata.h +++ b/include/swift/Runtime/Metadata.h @@ -106,88 +106,6 @@ SWIFT_RUNTIME_EXPORT OpaqueValue *swift_copyPOD(OpaqueValue *dest, OpaqueValue *src, const Metadata *self); - -/// A value-witness table with enum entry points. -/// These entry points are available only if the HasEnumWitnesses flag bit is -/// set in the 'flags' field. -struct EnumValueWitnessTable : ValueWitnessTable { -#define WANT_ONLY_ENUM_VALUE_WITNESSES -#define VALUE_WITNESS(LOWER_ID, UPPER_ID) \ - ValueWitnessTypes::LOWER_ID LOWER_ID; -#define FUNCTION_VALUE_WITNESS(LOWER_ID, UPPER_ID, RET, PARAMS) \ - ValueWitnessTypes::LOWER_ID LOWER_ID; - -#include "swift/ABI/ValueWitness.def" - - constexpr EnumValueWitnessTable() - : ValueWitnessTable{}, - getEnumTag(nullptr), - destructiveProjectEnumData(nullptr), - destructiveInjectEnumTag(nullptr) {} - constexpr EnumValueWitnessTable( - const ValueWitnessTable &base, - ValueWitnessTypes::getEnumTagUnsigned getEnumTag, - ValueWitnessTypes::destructiveProjectEnumDataUnsigned - destructiveProjectEnumData, - ValueWitnessTypes::destructiveInjectEnumTagUnsigned - destructiveInjectEnumTag) - : ValueWitnessTable(base), - getEnumTag(getEnumTag), - destructiveProjectEnumData(destructiveProjectEnumData), - destructiveInjectEnumTag(destructiveInjectEnumTag) {} - - static bool classof(const ValueWitnessTable *table) { - return table->flags.hasEnumWitnesses(); - } -}; - -/// A type layout record. This is the subset of the value witness table that is -/// necessary to perform dependent layout of generic value types. It excludes -/// the value witness functions and includes only the size, alignment, -/// extra inhabitants, and miscellaneous flags about the type. -struct TypeLayout { - ValueWitnessTypes::size size; - ValueWitnessTypes::stride stride; - ValueWitnessTypes::flags flags; - ValueWitnessTypes::extraInhabitantCount extraInhabitantCount; - -private: - void _static_assert_layout(); -public: - TypeLayout() = default; - constexpr TypeLayout(ValueWitnessTypes::size size, - ValueWitnessTypes::stride stride, - ValueWitnessTypes::flags flags, - ValueWitnessTypes::extraInhabitantCount xiCount) - : size(size), stride(stride), flags(flags), extraInhabitantCount(xiCount) {} - - const TypeLayout *getTypeLayout() const { return this; } - - /// The number of extra inhabitants, that is, bit patterns that do not form - /// valid values of the type, in this type's binary representation. - unsigned getNumExtraInhabitants() const { - return extraInhabitantCount; - } - - bool hasExtraInhabitants() const { - return extraInhabitantCount != 0; - } -}; - -inline void TypeLayout::_static_assert_layout() { - #define CHECK_TYPE_LAYOUT_OFFSET(FIELD) \ - static_assert(offsetof(ValueWitnessTable, FIELD) \ - - offsetof(ValueWitnessTable, size) \ - == offsetof(TypeLayout, FIELD), \ - "layout of " #FIELD " in TypeLayout doesn't match " \ - "value witness table") - CHECK_TYPE_LAYOUT_OFFSET(size); - CHECK_TYPE_LAYOUT_OFFSET(flags); - CHECK_TYPE_LAYOUT_OFFSET(extraInhabitantCount); - CHECK_TYPE_LAYOUT_OFFSET(stride); - - #undef CHECK_TYPE_LAYOUT_OFFSET -} template <> inline void ValueWitnessTable::publishLayout(const TypeLayout &layout) { @@ -210,12 +128,6 @@ template <> inline bool ValueWitnessTable::checkIsComplete() const { return !flags.isIncomplete(); } -template <> -inline const EnumValueWitnessTable *ValueWitnessTable::_asEVWT() const { - assert(EnumValueWitnessTable::classof(this)); - return static_cast(this); -} - // Standard value-witness tables. #define BUILTIN_TYPE(Symbol, _) \ From e52c3a0548ea4ce2e30bdd9bb35ea1d91e53aa16 Mon Sep 17 00:00:00 2001 From: Pavel Yaskevich Date: Tue, 22 Mar 2022 18:46:01 -0700 Subject: [PATCH 81/88] [MiscDiagnostics] Produce warnings about confusable `self` iff its explicit Restrict the warning to diagnose only explicit instances of `self` reference that do not mention the parent type via dot syntax e.g. `MyStruct.self`. Resolves: SR-15897 Resolves: SR-15691 Resolves: rdar://90624344 --- lib/Sema/MiscDiagnostics.cpp | 58 ++++++++++++++++++++------------- test/Parse/self_rebinding.swift | 5 +++ 2 files changed, 40 insertions(+), 23 deletions(-) diff --git a/lib/Sema/MiscDiagnostics.cpp b/lib/Sema/MiscDiagnostics.cpp index 03f3af7a5f3a0..037037043459a 100644 --- a/lib/Sema/MiscDiagnostics.cpp +++ b/lib/Sema/MiscDiagnostics.cpp @@ -4742,31 +4742,43 @@ static void diagUnqualifiedAccessToMethodNamedSelf(const Expr *E, if (!E || isa(E) || !E->getType()) return {false, E}; - if (auto *declRefExpr = dyn_cast(E)) { - if (declRefExpr->getDecl()->getBaseName() == Ctx.Id_self && - declRefExpr->getType()->is()) { - if (auto typeContext = DC->getInnermostTypeContext()) { - // self() is not easily confusable - if (!isa(Parent.getAsExpr())) { - auto baseType = typeContext->getDeclaredInterfaceType(); - if (!baseType->getEnumOrBoundGenericEnum()) { - auto baseTypeString = baseType.getString(); - - Ctx.Diags.diagnose(E->getLoc(), diag::self_refers_to_method, - baseTypeString); - - Ctx.Diags - .diagnose(E->getLoc(), - diag::fix_unqualified_access_member_named_self, - baseTypeString) - .fixItInsert(E->getLoc(), diag::insert_type_qualification, - baseType); - } - } - } - } + auto *DRE = dyn_cast(E); + // If this is not an explicit 'self' reference, let's keep searching. + if (!DRE || DRE->isImplicit()) + return {true, E}; + + // If this not 'self' or it's not a function reference, it's unrelated. + if (!(DRE->getDecl()->getBaseName() == Ctx.Id_self && + DRE->getType()->is())) + return {true, E}; + + auto typeContext = DC->getInnermostTypeContext(); + // Use of 'self' in enums is not confusable. + if (!typeContext || typeContext->getSelfEnumDecl()) + return {true, E}; + + // self(...) is not easily confusable. + if (auto *parentExpr = Parent.getAsExpr()) { + if (isa(parentExpr)) + return {true, E}; + + // Explicit call to a static method 'self' of some type is not + // confusable. + if (isa(parentExpr) && !parentExpr->isImplicit()) + return {true, E}; } + auto baseType = typeContext->getDeclaredInterfaceType(); + auto baseTypeString = baseType.getString(); + + Ctx.Diags.diagnose(E->getLoc(), diag::self_refers_to_method, + baseTypeString); + + Ctx.Diags + .diagnose(E->getLoc(), diag::fix_unqualified_access_member_named_self, + baseTypeString) + .fixItInsert(E->getLoc(), diag::insert_type_qualification, baseType); + return {true, E}; } }; diff --git a/test/Parse/self_rebinding.swift b/test/Parse/self_rebinding.swift index 17cef308f34bb..4595f3a7a1b58 100644 --- a/test/Parse/self_rebinding.swift +++ b/test/Parse/self_rebinding.swift @@ -126,3 +126,8 @@ enum EnumCaseNamedSelf { self = EnumCaseNamedSelf.`self` // OK } } + +// rdar://90624344 - warning about `self` which cannot be fixed because it's located in implicitly generated code. +struct TestImplicitSelfUse : Codable { + let `self`: Int // Ok +} From 97cfeb1162d37c4adab146e03ee8ed80c041f98e Mon Sep 17 00:00:00 2001 From: Nate Chandler Date: Tue, 22 Mar 2022 20:38:57 -0700 Subject: [PATCH 82/88] Only define ValueStorageMap::dump in DEBUG. --- lib/SILOptimizer/Mandatory/AddressLowering.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/SILOptimizer/Mandatory/AddressLowering.cpp b/lib/SILOptimizer/Mandatory/AddressLowering.cpp index f82a66fce22da..a5b9c51197d8d 100644 --- a/lib/SILOptimizer/Mandatory/AddressLowering.cpp +++ b/lib/SILOptimizer/Mandatory/AddressLowering.cpp @@ -366,6 +366,7 @@ void ValueStorageMap::replaceValue(SILValue oldValue, SILValue newValue) { valueVector[ordinal].value = newValue; } +#ifndef NDEBUG void ValueStorageMap::dump() { llvm::dbgs() << "ValueStorageMap:\n"; for (unsigned ordinal : indices(valueVector)) { @@ -388,6 +389,7 @@ void ValueStorageMap::dump() { } } } +#endif //===----------------------------------------------------------------------===// // AddressLoweringState From 57d1600b99d6a9263176950535fcc6599f0eda6f Mon Sep 17 00:00:00 2001 From: Slava Pestov Date: Tue, 22 Mar 2022 23:53:09 -0400 Subject: [PATCH 83/88] RequirementMachine: Better tests for concrete type requirements with opaque archetypes Also fix a weird latent bug. In lookupConcreteNestedType(), we would push nullptr onto the concreteDecls vector if the opaque archetype did not have a nested type with this name. However, this did not turn out to be a problem, since in this code path we would only have a single element in this vector, and the later call to std::min_element() did not end up dereferencing the null pointer. However this is very dodgy, so tweak the code to prevent this from happening and add a test case (which already passed anyway). --- lib/AST/RequirementMachine/NameLookup.cpp | 6 ++-- ...paque_archetype_concrete_requirement.swift | 29 +-------------- ...chetype_concrete_requirement_invalid.swift | 25 +++++++++++++ ...etype_concrete_requirement_recursive.swift | 12 +++---- ...crete_requirement_recursive_rejected.swift | 35 +++++++++++++++++++ 5 files changed, 71 insertions(+), 36 deletions(-) create mode 100644 test/Generics/opaque_archetype_concrete_requirement_invalid.swift create mode 100644 test/Generics/opaque_archetype_concrete_requirement_recursive_rejected.swift diff --git a/lib/AST/RequirementMachine/NameLookup.cpp b/lib/AST/RequirementMachine/NameLookup.cpp index 0f8382e4ffd3a..ddb218353a656 100644 --- a/lib/AST/RequirementMachine/NameLookup.cpp +++ b/lib/AST/RequirementMachine/NameLookup.cpp @@ -34,8 +34,10 @@ swift::rewriting::lookupConcreteNestedType( auto *genericEnv = archetype->getGenericEnvironment(); auto genericSig = genericEnv->getGenericSignature(); - concreteDecls.push_back( - genericSig->lookupNestedType(archetype->getInterfaceType(), name)); + auto *typeDecl = + genericSig->lookupNestedType(archetype->getInterfaceType(), name); + if (typeDecl != nullptr) + concreteDecls.push_back(typeDecl); } } diff --git a/test/Generics/opaque_archetype_concrete_requirement.swift b/test/Generics/opaque_archetype_concrete_requirement.swift index c9472dce3ae62..37665794ca75f 100644 --- a/test/Generics/opaque_archetype_concrete_requirement.swift +++ b/test/Generics/opaque_archetype_concrete_requirement.swift @@ -1,4 +1,5 @@ // RUN: %target-swift-frontend -typecheck -verify %s -disable-availability-checking -debug-generic-signatures -requirement-machine-inferred-signatures=on -enable-requirement-machine-opaque-archetypes 2>&1 | %FileCheck %s +// RUN: %target-swift-frontend -emit-silgen %s -disable-availability-checking -requirement-machine-inferred-signatures=on -enable-requirement-machine-opaque-archetypes protocol P1 { associatedtype T : P2 @@ -51,31 +52,3 @@ extension HasP where T == DefinesOpaqueP1.T, U == G { func checkSameType1(_ t: T.T) -> DefinesOpaqueP1.T.T { return t } func checkSameType2(_ u: T.U) -> DefinesOpaqueP1.T.U { return u } } - -// FIXME: This does not work with -enable-requirement-machine-opaque-archetypes. -// See opaque_archetype_concrete_requirement_recursive.swift for a demonstration -// that it works without the flag (but more involved examples like the above -// won't work). - -protocol RecursiveP { - associatedtype T : RecursiveP -} - -struct S_RecursiveP : RecursiveP { - typealias T = S_RecursiveP -} - -struct DefinesRecursiveP : P { - var t: some RecursiveP { - return S_RecursiveP() - } -} - -protocol HasRecursiveP { - associatedtype T : RecursiveP -} - -extension HasRecursiveP where T == DefinesRecursiveP.T {} -// expected-error@-1 {{cannot build rewrite system for generic signature; rule length limit exceeded}} -// expected-note@-2 {{failed rewrite rule is τ_0_0.[HasRecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[concrete: (((((((((@_opaqueReturnTypeOf("$s37opaque_archetype_concrete_requirement17DefinesRecursivePV1tQrvp", 0) __.T).T).T).T).T).T).T).T).T).T] => τ_0_0.[HasRecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T]}} - diff --git a/test/Generics/opaque_archetype_concrete_requirement_invalid.swift b/test/Generics/opaque_archetype_concrete_requirement_invalid.swift new file mode 100644 index 0000000000000..ee15acf94ab23 --- /dev/null +++ b/test/Generics/opaque_archetype_concrete_requirement_invalid.swift @@ -0,0 +1,25 @@ +// RUN: %target-swift-frontend -typecheck -verify %s -disable-availability-checking -requirement-machine-inferred-signatures=on -enable-requirement-machine-opaque-archetypes + +protocol P1 {} + +struct S_P1 : P1 {} + +protocol P { + associatedtype T + + var t: T { get } +} + +struct DefinesOpaqueP1 : P { + var t: some P1 { + return S_P1() + } +} + +protocol HasP { + associatedtype T : P1 + associatedtype U +} + +extension HasP where T == DefinesOpaqueP1.T, U == T.DoesNotExist {} +// expected-error@-1 {{'DoesNotExist' is not a member type of type 'Self.T'}} \ No newline at end of file diff --git a/test/Generics/opaque_archetype_concrete_requirement_recursive.swift b/test/Generics/opaque_archetype_concrete_requirement_recursive.swift index d6db0d55a03a8..477b95c067470 100644 --- a/test/Generics/opaque_archetype_concrete_requirement_recursive.swift +++ b/test/Generics/opaque_archetype_concrete_requirement_recursive.swift @@ -1,4 +1,9 @@ // RUN: %target-swift-frontend -typecheck -verify %s -disable-availability-checking -debug-generic-signatures -requirement-machine-inferred-signatures=on 2>&1 | %FileCheck %s +// RUN: %target-swift-frontend -emit-silgen %s -disable-availability-checking -requirement-machine-inferred-signatures=on + +// FIXME: This does not work with -enable-requirement-machine-opaque-archetypes. +// See opaque_archetype_concrete_requirement_rejected.swift for a demonstration +// that it fails with the flag. protocol P { associatedtype T @@ -6,10 +11,6 @@ protocol P { var t: T { get } } -// FIXME: This does not work with -enable-requirement-machine-opaque-archetypes. -// See opaque_archetype_concrete_requirement.swift for a demonstration that it -// fails with the flag. - protocol RecursiveP { associatedtype T : RecursiveP } @@ -31,6 +32,5 @@ protocol HasRecursiveP { // CHECK-LABEL: ExtensionDecl line={{.*}} base=HasRecursiveP // CHECK-NEXT: Generic signature: extension HasRecursiveP where T == DefinesRecursiveP.T { - func checkSameType1(_ t: T) -> DefinesRecursiveP.T { return t } - func checkSameType2(_ t: T.T) -> DefinesRecursiveP.T.T { return t } + func checkSameType(_ t: T) -> DefinesRecursiveP.T { return t } } diff --git a/test/Generics/opaque_archetype_concrete_requirement_recursive_rejected.swift b/test/Generics/opaque_archetype_concrete_requirement_recursive_rejected.swift new file mode 100644 index 0000000000000..e002c24bbb4ac --- /dev/null +++ b/test/Generics/opaque_archetype_concrete_requirement_recursive_rejected.swift @@ -0,0 +1,35 @@ +// RUN: %target-typecheck-verify-swift -disable-availability-checking -requirement-machine-inferred-signatures=on -enable-requirement-machine-opaque-archetypes + +// FIXME: This does not work with -enable-requirement-machine-opaque-archetypes. +// See opaque_archetype_concrete_requirement_recursive.swift for a demonstration +// that it works without the flag (but more involved examples like the above +// won't work). + +protocol P { + associatedtype T + + var t: T { get } +} + +protocol RecursiveP { + associatedtype T : RecursiveP +} + +struct S_RecursiveP : RecursiveP { + typealias T = S_RecursiveP +} + +struct DefinesRecursiveP : P { + var t: some RecursiveP { + return S_RecursiveP() + } +} + +protocol HasRecursiveP { + associatedtype T : RecursiveP +} + +extension HasRecursiveP where T == DefinesRecursiveP.T {} +// expected-error@-1 {{cannot build rewrite system for generic signature; rule length limit exceeded}} +// expected-note@-2 {{failed rewrite rule is τ_0_0.[HasRecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[concrete: (((((((((@_opaqueReturnTypeOf("$s56opaque_archetype_concrete_requirement_recursive_rejected17DefinesRecursivePV1tQrvp", 0) __.T).T).T).T).T).T).T).T).T).T] => τ_0_0.[HasRecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T].[RecursiveP:T]}} + From 78de2fba7d06ff7008a3b5a32d55cf5d156c4153 Mon Sep 17 00:00:00 2001 From: John McCall Date: Tue, 22 Mar 2022 23:39:41 -0400 Subject: [PATCH 84/88] [NFC] Move generic contexts and metadata references into separate headers --- include/swift/ABI/GenericContext.h | 380 ++++++++++++++ include/swift/ABI/Metadata.h | 688 +------------------------- include/swift/ABI/MetadataRef.h | 376 ++++++++++++++ include/swift/ABI/TargetLayout.h | 2 +- include/swift/ABI/ValueWitnessTable.h | 2 +- 5 files changed, 765 insertions(+), 683 deletions(-) create mode 100644 include/swift/ABI/GenericContext.h create mode 100644 include/swift/ABI/MetadataRef.h diff --git a/include/swift/ABI/GenericContext.h b/include/swift/ABI/GenericContext.h new file mode 100644 index 0000000000000..2d64e1e3e415a --- /dev/null +++ b/include/swift/ABI/GenericContext.h @@ -0,0 +1,380 @@ +//===--- GenericContext.h - ABI for generic signatures ----------*- C++ -*-===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2022 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// +// +// This file describes runtime metadata structures for representing +// generic signatures. +// +//===----------------------------------------------------------------------===// + +#ifndef SWIFT_ABI_GENERICCONTEXT_H +#define SWIFT_ABI_GENERICCONTEXT_H + +#include "swift/ABI/TargetLayout.h" +#include "swift/ABI/MetadataValues.h" +#include "swift/ABI/MetadataRef.h" +#include "swift/ABI/TrailingObjects.h" +#include "swift/Demangling/Demangle.h" + +namespace swift { +template +struct TargetProtocolConformanceDescriptor; +template +struct TargetGenericContext; + +template +struct TargetGenericContextDescriptorHeader { + /// The number of (source-written) generic parameters, and thus + /// the number of GenericParamDescriptors associated with this + /// context. The parameter descriptors appear in the order in + /// which they were given in the source. + /// + /// A GenericParamDescriptor corresponds to a type metadata pointer + /// in the arguments layout when isKeyArgument() is true. + /// isKeyArgument() will be false if the parameter has been unified + /// unified with a different parameter or an associated type. + uint16_t NumParams; + + /// The number of GenericRequirementDescriptors in this generic + /// signature. + /// + /// A GenericRequirementDescriptor of kind Protocol corresponds + /// to a witness table pointer in the arguments layout when + /// isKeyArgument() is true. isKeyArgument() will be false if + /// the protocol is an Objective-C protocol. (Unlike generic + /// parameters, redundant conformance requirements can simply be + /// eliminated, and so that case is not impossible.) + uint16_t NumRequirements; + + /// The size of the "key" area of the argument layout, in words. + /// Key arguments include generic parameters and conformance + /// requirements which are part of the identity of the context. + /// + /// The key area of the argument layout considers of a sequence + /// of type metadata pointers (in the same order as the parameter + /// descriptors, for those parameters which satisfy hasKeyArgument()) + /// followed by a sequence of witness table pointers (in the same + /// order as the requirements, for those requirements which satisfy + /// hasKeyArgument()). + uint16_t NumKeyArguments; + + /// In principle, the size of the "extra" area of the argument + /// layout, in words. The idea was that extra arguments would + /// include generic parameters and conformances that are not part + /// of the identity of the context; however, it's unclear why we + /// would ever want such a thing. As a result, this section is + /// unused, and this field is always zero. It can be repurposed + /// as long as it remains zero in code which must be compatible + /// with existing Swift runtimes. + uint16_t NumExtraArguments; + + uint32_t getNumArguments() const { + return NumKeyArguments + NumExtraArguments; + } + + /// Return the total size of the argument layout, in words. + /// The alignment of the argument layout is the word alignment. + uint32_t getArgumentLayoutSizeInWords() const { + return getNumArguments(); + } + + bool hasArguments() const { + return getNumArguments() > 0; + } +}; +using GenericContextDescriptorHeader = + TargetGenericContextDescriptorHeader; + +template +class TargetGenericRequirementDescriptor { +public: + GenericRequirementFlags Flags; + + /// The type that's constrained, described as a mangled name. + RelativeDirectPointer Param; + + union { + /// A mangled representation of the same-type or base class the param is + /// constrained to. + /// + /// Only valid if the requirement has SameType or BaseClass kind. + RelativeDirectPointer Type; + + /// The protocol the param is constrained to. + /// + /// Only valid if the requirement has Protocol kind. + RelativeTargetProtocolDescriptorPointer Protocol; + + /// The conformance the param is constrained to use. + /// + /// Only valid if the requirement has SameConformance kind. + RelativeIndirectablePointer, + /*nullable*/ false> Conformance; + + /// The kind of layout constraint. + /// + /// Only valid if the requirement has Layout kind. + GenericRequirementLayoutKind Layout; + }; + + constexpr GenericRequirementFlags getFlags() const { + return Flags; + } + + constexpr GenericRequirementKind getKind() const { + return getFlags().getKind(); + } + + /// Retrieve the generic parameter that is the subject of this requirement, + /// as a mangled type name. + llvm::StringRef getParam() const { + return swift::Demangle::makeSymbolicMangledNameStringRef(Param.get()); + } + + /// Retrieve the protocol for a Protocol requirement. + TargetProtocolDescriptorRef getProtocol() const { + assert(getKind() == GenericRequirementKind::Protocol); + return Protocol; + } + + /// Retrieve the right-hand type for a SameType or BaseClass requirement. + llvm::StringRef getMangledTypeName() const { + assert(getKind() == GenericRequirementKind::SameType || + getKind() == GenericRequirementKind::BaseClass); + return swift::Demangle::makeSymbolicMangledNameStringRef(Type.get()); + } + + /// Retrieve the protocol conformance record for a SameConformance + /// requirement. + const TargetProtocolConformanceDescriptor *getConformance() const { + assert(getKind() == GenericRequirementKind::SameConformance); + return Conformance; + } + + /// Retrieve the layout constraint. + GenericRequirementLayoutKind getLayout() const { + assert(getKind() == GenericRequirementKind::Layout); + return Layout; + } + + /// Determine whether this generic requirement has a known kind. + /// + /// \returns \c false for any future generic requirement kinds. + bool hasKnownKind() const { + switch (getKind()) { + case GenericRequirementKind::BaseClass: + case GenericRequirementKind::Layout: + case GenericRequirementKind::Protocol: + case GenericRequirementKind::SameConformance: + case GenericRequirementKind::SameType: + return true; + } + + return false; + } +}; +using GenericRequirementDescriptor = + TargetGenericRequirementDescriptor; + +template +class TargetGenericEnvironment + : public swift::ABI::TrailingObjects, + uint16_t, GenericParamDescriptor, + TargetGenericRequirementDescriptor> { + using GenericRequirementDescriptor = + TargetGenericRequirementDescriptor; + using TrailingObjects = + swift::ABI::TrailingObjects, + uint16_t, GenericParamDescriptor, GenericRequirementDescriptor>; + friend TrailingObjects; + +#if !defined(_MSC_VER) || _MSC_VER >= 1920 + template + using OverloadToken = typename TrailingObjects::template OverloadToken; +#else +// MSVC 2017 trips parsing an using of an using, of a variadic template +#define OverloadToken typename TrailingObjects::template OverloadToken +#endif + + size_t numTrailingObjects(OverloadToken) const { + return Flags.getNumGenericParameterLevels(); + } + + size_t numTrailingObjects(OverloadToken) const { + return getGenericParameterCounts().back(); + } + + size_t numTrailingObjects(OverloadToken) const { + return Flags.getNumGenericRequirements(); + } + +#if defined(_MSC_VER) && _MSC_VER < 1920 +#undef OverloadToken +#endif + + GenericEnvironmentFlags Flags; + +public: + /// Retrieve the cumulative generic parameter counts at each level of genericity. + llvm::ArrayRef getGenericParameterCounts() const { + return llvm::makeArrayRef(this->template getTrailingObjects(), + Flags.getNumGenericParameterLevels()); + } + + /// Retrieve the generic parameters descriptors. + llvm::ArrayRef getGenericParameters() const { + return llvm::makeArrayRef( + this->template getTrailingObjects(), + getGenericParameterCounts().back()); + } + + /// Retrieve the generic requirements. + llvm::ArrayRef getGenericRequirements() const { + return llvm::makeArrayRef( + this->template getTrailingObjects(), + Flags.getNumGenericRequirements()); + } +}; + +using GenericEnvironmentDescriptor = TargetGenericEnvironment; + +/// CRTP class for a context descriptor that includes trailing generic +/// context description. +template class TargetGenericContextHeaderType = + TargetGenericContextDescriptorHeader, + typename... FollowingTrailingObjects> +class TrailingGenericContextObjects; + +// This oddity with partial specialization is necessary to get +// reasonable-looking code while also working around various kinds of +// compiler bad behavior with injected class names. +template class TargetSelf, + template class TargetGenericContextHeaderType, + typename... FollowingTrailingObjects> +class TrailingGenericContextObjects, + TargetGenericContextHeaderType, + FollowingTrailingObjects...> : + protected swift::ABI::TrailingObjects, + TargetGenericContextHeaderType, + GenericParamDescriptor, + TargetGenericRequirementDescriptor, + FollowingTrailingObjects...> +{ +protected: + using Self = TargetSelf; + using GenericContextHeaderType = TargetGenericContextHeaderType; + using GenericRequirementDescriptor = + TargetGenericRequirementDescriptor; + + using TrailingObjects = swift::ABI::TrailingObjects; + friend TrailingObjects; + +#if !defined(_MSC_VER) || _MSC_VER >= 1920 + template + using OverloadToken = typename TrailingObjects::template OverloadToken; +#else +// MSVC 2017 trips parsing an using of an using, of a variadic template +#define OverloadToken typename TrailingObjects::template OverloadToken +#endif + + const Self *asSelf() const { + return static_cast(this); + } +public: + using StoredSize = typename Runtime::StoredSize; + using StoredPointer = typename Runtime::StoredPointer; + + const GenericContextHeaderType &getFullGenericContextHeader() const { + assert(asSelf()->isGeneric()); + return *this->template getTrailingObjects(); + } + + const TargetGenericContextDescriptorHeader & + getGenericContextHeader() const { + /// HeaderType ought to be convertible to GenericContextDescriptorHeader. + return getFullGenericContextHeader(); + } + + const TargetGenericContext *getGenericContext() const { + if (!asSelf()->isGeneric()) + return nullptr; + // The generic context header should always be immediately followed in + // memory by trailing parameter and requirement descriptors. + auto *header = reinterpret_cast(&getGenericContextHeader()); + return reinterpret_cast *>( + header - sizeof(TargetGenericContext)); + } + + llvm::ArrayRef getGenericParams() const { + if (!asSelf()->isGeneric()) + return {}; + + return {this->template getTrailingObjects(), + getGenericContextHeader().NumParams}; + } + + llvm::ArrayRef getGenericRequirements() const { + if (!asSelf()->isGeneric()) + return {}; + return {this->template getTrailingObjects(), + getGenericContextHeader().NumRequirements}; + } + + /// Return the amount of space that the generic arguments take up in + /// metadata of this type. + StoredSize getGenericArgumentsStorageSize() const { + return StoredSize(getGenericContextHeader().getNumArguments()) + * sizeof(StoredPointer); + } + +protected: + size_t numTrailingObjects(OverloadToken) const { + return asSelf()->isGeneric() ? 1 : 0; + } + + size_t numTrailingObjects(OverloadToken) const { + return asSelf()->isGeneric() ? getGenericContextHeader().NumParams : 0; + } + + size_t numTrailingObjects(OverloadToken) const { + return asSelf()->isGeneric() ? getGenericContextHeader().NumRequirements : 0; + } + +#if defined(_MSC_VER) && _MSC_VER < 1920 +#undef OverloadToken +#endif + +}; + +/// Description of a generic context. +template +struct TargetGenericContext final + : TrailingGenericContextObjects, + TargetGenericContextDescriptorHeader> +{ + // This struct is supposed to be empty, but TrailingObjects respects the + // unique-address-per-object C++ rule, so even if this type is empty, the + // trailing objects will come after one byte of padding. This dummy field + // takes up space to make the offset of the trailing objects portable. + unsigned _dummy; + + bool isGeneric() const { return true; } +}; + +} // end namespace swift + +#endif diff --git a/include/swift/ABI/Metadata.h b/include/swift/ABI/Metadata.h index a8fff1e36fc94..0ff5b80a3810f 100644 --- a/include/swift/ABI/Metadata.h +++ b/include/swift/ABI/Metadata.h @@ -27,6 +27,8 @@ #include "swift/Strings.h" #include "swift/Runtime/Config.h" #include "swift/Runtime/Once.h" +#include "swift/ABI/GenericContext.h" +#include "swift/ABI/MetadataRef.h" #include "swift/ABI/MetadataValues.h" #include "swift/ABI/System.h" #include "swift/ABI/TargetLayout.h" @@ -39,9 +41,6 @@ #include "swift/Demangling/ManglingMacros.h" #include "swift/Basic/Unreachable.h" #include "../../../stdlib/public/SwiftShims/HeapObject.h" -#if SWIFT_OBJC_INTEROP -#include -#endif #include "llvm/ADT/STLExtras.h" #include "llvm/Support/Casting.h" @@ -63,37 +62,11 @@ template class TargetValueTypeDescriptor; template class TargetEnumDescriptor; template class TargetStructDescriptor; template struct TargetGenericMetadataPattern; - -/// Template for branching on native pointer types versus external ones -template class Pointee> -using TargetMetadataPointer - = typename Runtime::template Pointer>; - -template class Pointee> -using ConstTargetMetadataPointer - = typename Runtime::template Pointer>; +template struct TargetProtocolConformanceDescriptor; struct HeapObject; class WeakReference; struct UnownedReference; - -template -struct TargetAnyClassMetadataTypeImpl; -template -struct TargetAnyClassMetadataTypeImpl { - using type = TargetAnyClassMetadataObjCInterop; -}; -template -struct TargetAnyClassMetadataTypeImpl { - using type = TargetAnyClassMetadata; -}; -template -using TargetAnyClassMetadataType = - typename TargetAnyClassMetadataTypeImpl::type; - -template -using TargetClassMetadataType = - TargetClassMetadata>; /// The result of requesting type metadata. Generally the return value of /// a function. @@ -142,8 +115,6 @@ struct MetadataDependency { } }; -template struct TargetProtocolConformanceDescriptor; - /// The header before a metadata object which appears on all type /// metadata. Note that heap metadata are not necessarily type /// metadata, even for objects of a heap type: for example, objects of @@ -572,29 +543,6 @@ struct TargetVTableDescriptorHeader { } }; -template struct TargetContextDescriptor; - -template class Context = TargetContextDescriptor> -using TargetSignedContextPointer = TargetSignedPointer * __ptrauth_swift_type_descriptor>; - -template class Context = TargetContextDescriptor> -using TargetRelativeContextPointer = - RelativeIndirectablePointer, - /*nullable*/ true, int32_t, - TargetSignedContextPointer>; - -using RelativeContextPointer = TargetRelativeContextPointer; - -template class Context = TargetContextDescriptor> -using RelativeContextPointerIntPair = - RelativeIndirectablePointerIntPair, IntTy, - /*nullable*/ true, int32_t, - TargetSignedContextPointer>; - template struct TargetMethodDescriptor; template @@ -748,7 +696,9 @@ struct TargetAnyClassMetadataObjCInterop : public TargetAnyClassMetadata { using StoredPointer = typename Runtime::StoredPointer; using StoredSize = typename Runtime::StoredSize; - using TargetClassMetadataObjCInterop = TargetClassMetadataType; + using TargetClassMetadataObjCInterop = + // swift:: qualifier works around an MSVC quirk + swift::TargetClassMetadata>; constexpr TargetAnyClassMetadataObjCInterop( TargetAnyClassMetadataObjCInterop *isa, @@ -1077,18 +1027,7 @@ struct TargetClassMetadata : public TargetAnyClassMetadataVariant { return metadata->getKind() == MetadataKind::Class; } }; -#if SWIFT_OBJC_INTEROP -using ClassMetadata = - TargetClassMetadata>; -#else -using ClassMetadata = - TargetClassMetadata>; -#endif - -template -using TargetClassMetadataObjCInterop = - TargetClassMetadata>; +using ClassMetadata = TargetClassMetadataType; /// The structure of class metadata that's compatible with dispatch objects. /// This includes Swift heap metadata, followed by the vtable entries that @@ -1570,161 +1509,6 @@ TargetTupleTypeMetadata::getOffsetToNumElements() -> StoredSize { template struct TargetProtocolDescriptor; -/// Layout of a small prefix of an Objective-C protocol, used only to -/// directly extract the name of the protocol. -template -struct TargetObjCProtocolPrefix { - /// Unused by the Swift runtime. - TargetPointer _ObjC_Isa; - - /// The mangled name of the protocol. - TargetPointer Name; -}; - -/// A reference to a protocol within the runtime, which may be either -/// a Swift protocol or (when Objective-C interoperability is enabled) an -/// Objective-C protocol. -/// -/// This type always contains a single target pointer, whose lowest bit is -/// used to distinguish between a Swift protocol referent and an Objective-C -/// protocol referent. -template -class TargetProtocolDescriptorRef { - using StoredPointer = typename Runtime::StoredPointer; - using ProtocolDescriptorPointer = - ConstTargetMetadataPointer; - - enum : StoredPointer { - // The bit used to indicate whether this is an Objective-C protocol. - IsObjCBit = 0x1U, - }; - - /// A direct pointer to a protocol descriptor for either an Objective-C - /// protocol (if the low bit is set) or a Swift protocol (if the low bit - /// is clear). - StoredPointer storage; - -public: - constexpr TargetProtocolDescriptorRef(StoredPointer storage) - : storage(storage) { } - - constexpr TargetProtocolDescriptorRef() : storage() { } - - TargetProtocolDescriptorRef( - ProtocolDescriptorPointer protocol, - ProtocolDispatchStrategy dispatchStrategy) { - if (Runtime::ObjCInterop) { - storage = - reinterpret_cast(protocol) | - (dispatchStrategy == ProtocolDispatchStrategy::ObjC ? IsObjCBit : 0); - } else { - assert(dispatchStrategy == ProtocolDispatchStrategy::Swift); - storage = reinterpret_cast(protocol); - } - } - - const static TargetProtocolDescriptorRef forSwift( - ProtocolDescriptorPointer protocol) { - return TargetProtocolDescriptorRef{ - reinterpret_cast(protocol)}; - } - -#if SWIFT_OBJC_INTEROP - constexpr static TargetProtocolDescriptorRef forObjC(Protocol *objcProtocol) { - return TargetProtocolDescriptorRef{ - reinterpret_cast(objcProtocol) | IsObjCBit}; - } -#endif - - explicit constexpr operator bool() const { - return storage != 0; - } - - /// The name of the protocol. - TargetPointer getName() const { -#if SWIFT_OBJC_INTEROP - if (isObjC()) { - return reinterpret_cast *>( - getObjCProtocol())->Name; - } -#endif - - return getSwiftProtocol()->Name; - } - - /// Determine what kind of protocol this is, Swift or Objective-C. - ProtocolDispatchStrategy getDispatchStrategy() const { - if (isObjC()) { - return ProtocolDispatchStrategy::ObjC; - } - - return ProtocolDispatchStrategy::Swift; - } - - /// Determine whether this protocol has a 'class' constraint. - ProtocolClassConstraint getClassConstraint() const { - if (isObjC()) { - return ProtocolClassConstraint::Class; - } - - return getSwiftProtocol()->getProtocolContextDescriptorFlags() - .getClassConstraint(); - } - - /// Determine whether this protocol needs a witness table. - bool needsWitnessTable() const { - if (isObjC()) { - return false; - } - - return true; - } - - SpecialProtocol getSpecialProtocol() const { - if (isObjC()) { - return SpecialProtocol::None; - } - - return getSwiftProtocol()->getProtocolContextDescriptorFlags() - .getSpecialProtocol(); - } - - /// Retrieve the Swift protocol descriptor. - ProtocolDescriptorPointer getSwiftProtocol() const { - assert(!isObjC()); - - // NOTE: we explicitly use a C-style cast here because cl objects to the - // reinterpret_cast from a uintptr_t type to an unsigned type which the - // Pointer type may be depending on the instantiation. Using the C-style - // cast gives us a single path irrespective of the template type parameters. - return (ProtocolDescriptorPointer)(storage & ~IsObjCBit); - } - - /// Retrieve the raw stored pointer and discriminator bit. - constexpr StoredPointer getRawData() const { - return storage; - } - - /// Whether this references an Objective-C protocol. - bool isObjC() const { - if (Runtime::ObjCInterop) - return (storage & IsObjCBit) != 0; - else - return false; - } - -#if SWIFT_OBJC_INTEROP - /// Retrieve the Objective-C protocol. - TargetPointer getObjCProtocol() const { - assert(isObjC()); - return reinterpret_cast >( - storage & ~IsObjCBit); - } -#endif -}; - -using ProtocolDescriptorRef = TargetProtocolDescriptorRef; - /// A protocol requirement descriptor. This describes a single protocol /// requirement in a protocol descriptor. The index of the requirement in /// the descriptor determines the offset of the witness in a witness table @@ -2174,116 +1958,6 @@ using ProtocolRecord = TargetProtocolRecord; template class TargetGenericRequirementDescriptor; -/// A relative pointer to a protocol descriptor, which provides the relative- -/// pointer equivalent to \c TargetProtocolDescriptorRef. -template -class RelativeTargetProtocolDescriptorPointer { - union { - /// Relative pointer to a Swift protocol descriptor. - /// The \c bool value will be false to indicate that the protocol - /// is a Swift protocol, or true to indicate that this references - /// an Objective-C protocol. - RelativeContextPointerIntPair - swiftPointer; -#if SWIFT_OBJC_INTEROP - /// Relative pointer to an ObjC protocol descriptor. - /// The \c bool value will be false to indicate that the protocol - /// is a Swift protocol, or true to indicate that this references - /// an Objective-C protocol. - RelativeIndirectablePointerIntPair objcPointer; -#endif - }; - - bool isObjC() const { -#if SWIFT_OBJC_INTEROP - if (Runtime::ObjCInterop) - return objcPointer.getInt(); -#endif - return false; - } - -public: - /// Retrieve a reference to the protocol. - TargetProtocolDescriptorRef getProtocol() const { -#if SWIFT_OBJC_INTEROP - if (isObjC()) { - return TargetProtocolDescriptorRef::forObjC( - const_cast(objcPointer.getPointer())); - } -#endif - - return TargetProtocolDescriptorRef::forSwift( - reinterpret_cast< - ConstTargetMetadataPointer>( - swiftPointer.getPointer())); - } - - operator TargetProtocolDescriptorRef() const { - return getProtocol(); - } -}; - -/// A reference to a type. -template -struct TargetTypeReference { - template - using TargetClassMetadata = TargetClassMetadataType; - - union { - /// A direct reference to a TypeContextDescriptor or ProtocolDescriptor. - RelativeDirectPointer> - DirectTypeDescriptor; - - /// An indirect reference to a TypeContextDescriptor or ProtocolDescriptor. - RelativeDirectPointer< - TargetSignedPointer * __ptrauth_swift_type_descriptor>> - IndirectTypeDescriptor; - - /// An indirect reference to an Objective-C class. - RelativeDirectPointer< - ConstTargetMetadataPointer> - IndirectObjCClass; - - /// A direct reference to an Objective-C class name. - RelativeDirectPointer - DirectObjCClassName; - }; - - const TargetContextDescriptor * - getTypeDescriptor(TypeReferenceKind kind) const { - switch (kind) { - case TypeReferenceKind::DirectTypeDescriptor: - return DirectTypeDescriptor; - - case TypeReferenceKind::IndirectTypeDescriptor: - return *IndirectTypeDescriptor; - - case TypeReferenceKind::DirectObjCClassName: - case TypeReferenceKind::IndirectObjCClass: - return nullptr; - } - - return nullptr; - } - - /// If this type reference is one of the kinds that supports ObjC - /// references, - const TargetClassMetadataObjCInterop * - getObjCClass(TypeReferenceKind kind) const; - - const TargetClassMetadataObjCInterop * const * - getIndirectObjCClass(TypeReferenceKind kind) const { - assert(kind == TypeReferenceKind::IndirectObjCClass); - return IndirectObjCClass.get(); - } - - const char *getDirectObjCClassName(TypeReferenceKind kind) const { - assert(kind == TypeReferenceKind::DirectObjCClassName); - return DirectObjCClassName.get(); - } -}; -using TypeReference = TargetTypeReference; - /// Header containing information about the resilient witnesses in a /// protocol conformance descriptor. template @@ -2496,9 +2170,6 @@ using ExternalProtocolConformanceDescriptor = TargetProtocolConformanceDescripto template