From 6b885994121d37fd27b88475d7d2e2c3f4356f7b Mon Sep 17 00:00:00 2001 From: Michael Gottesman Date: Tue, 10 Mar 2020 16:36:53 -0700 Subject: [PATCH 1/2] Revert "Merge pull request #30327 from rintaro/revert-30289" This reverts commit 0a6ccd802fe3f2d55daab0810ac330e9141b2c55, reversing changes made to 7c5d74e86bb0fdc54e1b31a571518563345e194c. --- include/swift/Basic/FrozenMultiMap.h | 12 +- include/swift/SIL/OwnershipUtils.h | 22 + .../Transforms/SemanticARCOpts.cpp | 539 ++++++++++++++++-- test/SILOptimizer/semantic-arc-opts.sil | 417 +++++++++++++- unittests/Basic/FrozenMultiMapTest.cpp | 31 + 5 files changed, 964 insertions(+), 57 deletions(-) diff --git a/include/swift/Basic/FrozenMultiMap.h b/include/swift/Basic/FrozenMultiMap.h index 8b171dd3abcfa..dd5656ffa47db 100644 --- a/include/swift/Basic/FrozenMultiMap.h +++ b/include/swift/Basic/FrozenMultiMap.h @@ -91,11 +91,17 @@ class FrozenMultiMap { frozen = true; } + /// Reset the frozen multimap in an unfrozen state with its storage cleared. + void reset() { + storage.clear(); + frozen = false; + } + unsigned size() const { return storage.size(); } bool empty() const { return storage.empty(); } struct iterator : std::iterator>> { + std::pair> { using base_iterator = typename decltype(storage)::iterator; FrozenMultiMap ↦ @@ -159,9 +165,11 @@ class FrozenMultiMap { } }; + using RangeType = llvm::iterator_range; + /// Return a range of (key, ArrayRef) pairs. The keys are guaranteed to /// be in key sorted order and the ArrayRef are in insertion order. - llvm::iterator_range getRange() const { + RangeType getRange() const { assert(isFrozen() && "Can not create range until data structure is frozen?!"); auto *self = const_cast(this); diff --git a/include/swift/SIL/OwnershipUtils.h b/include/swift/SIL/OwnershipUtils.h index 46cb8955b41fe..aacd83dc88580 100644 --- a/include/swift/SIL/OwnershipUtils.h +++ b/include/swift/SIL/OwnershipUtils.h @@ -598,6 +598,28 @@ struct OwnedValueIntroducer { return OwnedValueIntroducer(value, *kind); } + /// Returns true if this owned introducer is able to be converted into a + /// guaranteed form if none of its uses are consuming uses (looking through + /// forwarding uses). + bool isConvertableToGuaranteed() const { + switch (kind) { + case OwnedValueIntroducerKind::Copy: + case OwnedValueIntroducerKind::LoadCopy: + return true; + case OwnedValueIntroducerKind::Apply: + case OwnedValueIntroducerKind::BeginApply: + case OwnedValueIntroducerKind::TryApply: + case OwnedValueIntroducerKind::LoadTake: + case OwnedValueIntroducerKind::Phi: + case OwnedValueIntroducerKind::FunctionArgument: + case OwnedValueIntroducerKind::PartialApplyInit: + case OwnedValueIntroducerKind::AllocBoxInit: + case OwnedValueIntroducerKind::AllocRefInit: + return false; + } + llvm_unreachable("Covered switch isn't covered?!"); + } + bool operator==(const OwnedValueIntroducer &other) const { return value == other.value; } diff --git a/lib/SILOptimizer/Transforms/SemanticARCOpts.cpp b/lib/SILOptimizer/Transforms/SemanticARCOpts.cpp index afc4b0c17d0f9..77185b5eaaca4 100644 --- a/lib/SILOptimizer/Transforms/SemanticARCOpts.cpp +++ b/lib/SILOptimizer/Transforms/SemanticARCOpts.cpp @@ -12,6 +12,7 @@ #define DEBUG_TYPE "sil-semantic-arc-opts" #include "swift/Basic/BlotSetVector.h" +#include "swift/Basic/FrozenMultiMap.h" #include "swift/Basic/STLExtras.h" #include "swift/SIL/BasicBlockUtils.h" #include "swift/SIL/DebugUtils.h" @@ -46,8 +47,9 @@ STATISTIC(NumLoadCopyConvertedToLoadBorrow, namespace { class LiveRange { - /// The parent value that introduces the live range. - SILValue value; + /// The value that we are computing the LiveRange for. Expected to be an owned + /// introducer and not to be forwarding. + OwnedValueIntroducer introducer; /// A list of destroy_values of the live range. SmallVector destroyingUses; @@ -56,7 +58,7 @@ class LiveRange { /// that are also able to forward guaranteed ownership. SmallVector generalForwardingUses; - /// Consuming users that we were not able to understand as a forwarding + /// Consuming uses that we were not able to understand as a forwarding /// instruction or a destroy_value. These must be passed a strongly control /// equivalent +1 value. SmallVector unknownConsumingUses; @@ -66,12 +68,20 @@ class LiveRange { LiveRange(const LiveRange &) = delete; LiveRange &operator=(const LiveRange &) = delete; + enum class HasConsumingUse_t { + No = 0, + YesButAllPhiArgs = 1, + Yes = 2, + }; + /// Return true if v only has invalidating uses that are destroy_value. Such /// an owned value is said to represent a dead "live range". /// /// Semantically this implies that a value is never passed off as +1 to memory /// or another function implying it can be used everywhere at +0. - bool hasConsumingUse() const { return unknownConsumingUses.size(); } + HasConsumingUse_t + hasConsumingUse(FrozenMultiMap + *phiToIncomingValueMultiMap = nullptr) const; ArrayRef getDestroyingUses() const { return destroyingUses; } @@ -83,10 +93,32 @@ class LiveRange { TransformRange, OperandToUser>; DestroyingInstsRange getDestroyingInsts() const; + /// If this LiveRange has a single destroying use, return that use. Otherwise, + /// return nullptr. + Operand *getSingleDestroyingUse() const { + if (destroyingUses.size() != 1) { + return nullptr; + } + return destroyingUses.front(); + } + + /// If this LiveRange has a single unknown destroying use, return that + /// use. Otherwise, return nullptr. + Operand *getSingleUnknownConsumingUse() const { + if (unknownConsumingUses.size() != 1) { + return nullptr; + } + return unknownConsumingUses.front(); + } + + OwnedValueIntroducer getIntroducer() const { return introducer; } + ArrayRef getNonConsumingForwardingUses() const { return generalForwardingUses; } + void convertOwnedGeneralForwardingUsesToGuaranteed(); + /// A consuming operation that: /// /// 1. If \p insertEndBorrows is true inserts end borrows at all @@ -104,6 +136,28 @@ class LiveRange { void convertToGuaranteedAndRAUW(SILValue newGuaranteedValue, InstModCallbacks callbacks) &&; + /// A consuming operation that in order: + /// + /// 1. Converts the phi argument to be guaranteed. + /// + /// 2. Inserts end_borrows at the relevant destroy_values. + /// + /// 3. Deletes all destroy_values. + /// + /// 4. Converts all of the general forwarding instructions from @owned -> + /// @guaranteed. "Like Dominoes". + /// + /// NOTE: This leaves all of the unknown consuming users alone. It is up to + /// the caller to handle converting their ownership. + /// + /// NOTE: This routine leaves inserting begin_borrows for the incoming values + /// to the caller since those are not part of the LiveRange itself. + /// + /// NOTE: Asserts that value is a phi argument. + void convertArgToGuaranteed(DeadEndBlocks &deadEndBlocks, + ValueLifetimeAnalysis::Frontier &scratch, + InstModCallbacks callbacks) &&; + /// Given a new guaranteed value, insert end_borrow for the newGuaranteedValue /// at all of our destroy_values in prepration for converting from owned to /// guaranteed. @@ -130,13 +184,13 @@ LiveRange::DestroyingInstsRange LiveRange::getDestroyingInsts() const { } LiveRange::LiveRange(SILValue value) - : value(value), destroyingUses(), generalForwardingUses(), - unknownConsumingUses() { - assert(value.getOwnershipKind() == ValueOwnershipKind::Owned); + : introducer(*OwnedValueIntroducer::get(value)), destroyingUses(), + generalForwardingUses(), unknownConsumingUses() { + assert(introducer.value.getOwnershipKind() == ValueOwnershipKind::Owned); // We know that our silvalue produces an @owned value. Look through all of our // uses and classify them as either consuming or not. - SmallVector worklist(value->getUses()); + SmallVector worklist(introducer.value->getUses()); while (!worklist.empty()) { auto *op = worklist.pop_back_val(); @@ -239,10 +293,35 @@ void LiveRange::insertEndBorrowsAtDestroys( // we do not insert multiple end_borrow. // // TODO: Hoist this out? - auto *inst = value->getDefiningInstruction(); - assert(inst && "Should only call this with value's that are actually part of " - "an instruction"); - + SILInstruction *inst = introducer.value->getDefiningInstruction(); + if (!inst) { + // If our introducer was not for an inst, it should be from an arg. In such + // a case, we handle one of two cases: + // + // 1. If we have one destroy and that destroy is the initial instruction in + // the arguments block, we just insert the end_borrow here before the + // destroy_value and bail. If the destroy is not the initial instruction in + // the arg block, we delegate to the ValueLifetimeAnalysis code. + // + // 2. If we have multiple destroys, by the properties of owned values having + // a linear lifetime, we know that the destroys can not both be first in the + // args block since the only way that we could have two such destroys in the + // arg's block is if we destructured the arg. In such a case, the + // destructure instruction would have to be between the argument and any + // destroy meaning the destroys could not be first. In such a case, we + // delegate to the ValueLifetimeAnalysis code. + auto *arg = cast(introducer.value); + auto *beginInst = &*arg->getParent()->begin(); + if (auto *singleDestroyingUse = getSingleDestroyingUse()) { + if (singleDestroyingUse->getUser() == beginInst) { + auto loc = RegularLocation::getAutoGeneratedLocation(); + SILBuilderWithScope builder(beginInst); + builder.createEndBorrow(loc, newGuaranteedValue); + return; + } + } + inst = beginInst; + } ValueLifetimeAnalysis analysis(inst, getDestroyingInsts()); bool foundCriticalEdges = !analysis.computeFrontier( scratch, ValueLifetimeAnalysis::DontModifyCFG, &deadEndBlocks); @@ -256,22 +335,7 @@ void LiveRange::insertEndBorrowsAtDestroys( } } -void LiveRange::convertToGuaranteedAndRAUW(SILValue newGuaranteedValue, - InstModCallbacks callbacks) && { - assert(isa(value) && - "Can only convert single value instruction live ranges to guaranteed"); - while (!destroyingUses.empty()) { - auto *d = destroyingUses.pop_back_val(); - callbacks.deleteInst(d->getUser()); - ++NumEliminatedInsts; - } - - callbacks.eraseAndRAUWSingleValueInst(cast(value), - newGuaranteedValue); - - // Then change all of our guaranteed forwarding insts to have guaranteed - // ownership kind instead of what ever they previously had (ignoring trivial - // results); +void LiveRange::convertOwnedGeneralForwardingUsesToGuaranteed() { while (!generalForwardingUses.empty()) { auto *i = generalForwardingUses.pop_back_val()->getUser(); @@ -330,6 +394,83 @@ void LiveRange::convertToGuaranteedAndRAUW(SILValue newGuaranteedValue, } } +void LiveRange::convertToGuaranteedAndRAUW(SILValue newGuaranteedValue, + InstModCallbacks callbacks) && { + auto *value = cast(introducer.value); + while (!destroyingUses.empty()) { + auto *d = destroyingUses.pop_back_val(); + callbacks.deleteInst(d->getUser()); + ++NumEliminatedInsts; + } + + callbacks.eraseAndRAUWSingleValueInst(value, newGuaranteedValue); + + // Then change all of our guaranteed forwarding insts to have guaranteed + // ownership kind instead of what ever they previously had (ignoring trivial + // results); + convertOwnedGeneralForwardingUsesToGuaranteed(); +} + +void LiveRange::convertArgToGuaranteed(DeadEndBlocks &deadEndBlocks, + ValueLifetimeAnalysis::Frontier &scratch, + InstModCallbacks callbacks) && { + // First convert the phi argument to be guaranteed. + auto *phiArg = cast(introducer.value); + phiArg->setOwnershipKind(ValueOwnershipKind::Guaranteed); + + // Then insert end_borrows at each of our destroys. We have to convert the phi + // to guaranteed first since otherwise, the ownership check when we create the + // end_borrows will trigger. + insertEndBorrowsAtDestroys(phiArg, deadEndBlocks, scratch); + + // Then eliminate all of the destroys... + while (!destroyingUses.empty()) { + auto *d = destroyingUses.pop_back_val(); + callbacks.deleteInst(d->getUser()); + ++NumEliminatedInsts; + } + + // and change all of our guaranteed forwarding insts to have guaranteed + // ownership kind instead of what ever they previously had (ignoring trivial + // results); + convertOwnedGeneralForwardingUsesToGuaranteed(); +} + +LiveRange::HasConsumingUse_t LiveRange::hasConsumingUse( + FrozenMultiMap + *phiToIncomingValueMultiMap) const { + // First do a quick check if we have /any/ unknown consuming + // uses. If we do not have any, return false early. + if (unknownConsumingUses.empty()) { + return HasConsumingUse_t::No; + } + + // Ok, we do have some unknown consuming uses. If we aren't asked to + // update phiToIncomingValueMultiMap, then just return true quickly. + if (!phiToIncomingValueMultiMap) { + return HasConsumingUse_t::Yes; + } + + // We do not know how to handle yet cases where an owned value is used by + // multiple phi nodes. So we bail early if unknown consuming uses is > 1. + // + // TODO: Build up phi node web. + auto *op = getSingleUnknownConsumingUse(); + if (!op) { + return HasConsumingUse_t::Yes; + } + + // Make sure our single unknown consuming use is a branch inst. If not, bail, + // this is a /real/ unknown consuming use. + if (!isa(op->getUser())) { + return HasConsumingUse_t::Yes; + } + + // Otherwise, setup the phi to incoming value map mapping the block arguments + // to our introducer. + return HasConsumingUse_t::YesButAllPhiArgs; +} + //===----------------------------------------------------------------------===// // Address Written To Analysis //===----------------------------------------------------------------------===// @@ -482,6 +623,25 @@ struct SemanticARCOptVisitor ValueLifetimeAnalysis::Frontier lifetimeFrontier; IsAddressWrittenToDefUseAnalysis isAddressWrittenToDefUseAnalysis; + /// Are we assuming that we reached a fix point and are re-processing to + /// prepare to use the phiToIncomingValueMultiMap. + bool assumingAtFixedPoint = false; + FrozenMultiMap + phiToIncomingValueMultiMap; + + /// Returns the phiToIncomingValueMultiMap if we are re-processing our + /// worklist after fixed point to initialize our phi to incoming value + /// multi-map. Otherwise returns nullptr. + FrozenMultiMap * + getPhiToIncomingValueMultiMap() { + if (assumingAtFixedPoint) + return &phiToIncomingValueMultiMap; + return nullptr; + } + + using FrozenMultiMapRange = + decltype(phiToIncomingValueMultiMap)::PairToSecondEltRange; + explicit SemanticARCOptVisitor(SILFunction &F) : F(F) {} DeadEndBlocks &getDeadEndBlocks() { @@ -513,6 +673,18 @@ struct SemanticARCOptVisitor eraseInstruction(i); } + /// Pop values off of visitedSinceLastMutation, adding .some values to the + /// worklist. + void drainVisitedSinceLastMutationIntoWorklist() { + while (!visitedSinceLastMutation.empty()) { + Optional nextValue = visitedSinceLastMutation.pop_back_val(); + if (!nextValue.hasValue()) { + continue; + } + worklist.insert(*nextValue); + } + } + /// Remove all results of the given instruction from the worklist and then /// erase the instruction. Assumes that the instruction does not have any /// users left. @@ -526,13 +698,7 @@ struct SemanticARCOptVisitor i->eraseFromParent(); // Add everything else from visitedSinceLastMutation to the worklist. - for (auto opt : visitedSinceLastMutation) { - if (!opt.hasValue()) { - continue; - } - worklist.insert(*opt); - } - visitedSinceLastMutation.clear(); + drainVisitedSinceLastMutationIntoWorklist(); } InstModCallbacks getCallbacks() { @@ -614,10 +780,12 @@ struct SemanticARCOptVisitor bool isWrittenTo(LoadInst *li, const LiveRange &lr); bool processWorklist(); + bool optimize(); bool performGuaranteedCopyValueOptimization(CopyValueInst *cvi); bool eliminateDeadLiveRangeCopyValue(CopyValueInst *cvi); bool tryJoiningCopyValueLiveRangeWithOperand(CopyValueInst *cvi); + bool performPostPeepholeOwnedArgElimination(); }; } // end anonymous namespace @@ -626,6 +794,235 @@ static llvm::cl::opt VerifyAfterTransform("sil-semantic-arc-opts-verify-after-transform", llvm::cl::init(false), llvm::cl::Hidden); +static bool canEliminatePhi( + SemanticARCOptVisitor::FrozenMultiMapRange optimizableIntroducerRange, + ArrayRef incomingValueOperandList, + SmallVectorImpl &ownedValueIntroducerAccumulator) { + // A set that we use to ensure we only add introducers to the accumulator + // once. + SmallVector scratch; + for (Operand *incomingValueOperand : incomingValueOperandList) { + SWIFT_DEFER { scratch.clear(); }; + + SILValue incomingValue = incomingValueOperand->get(); + + // Before we do anything, see if we have an incoming value with trivial + // ownership. This can occur in the case where we are working with enums due + // to trivial non-payloaded cases. + if (incomingValue.getOwnershipKind() == ValueOwnershipKind::None) { + continue; + } + + // Now that we know it is an owned value, check for introducers of the owned + // value which are the copies that we may be able to eliminate. + // + // If we can not find all of the owned value's introducers, bail. + if (!getAllOwnedValueIntroducers(incomingValue, scratch)) { + return false; + } + + // Then make sure that all of our owned value introducers are able to be + // converted to guaranteed and that we found it to have a LiveRange that we + // could have eliminated /if/ we were to get rid of this phi. + if (!llvm::all_of(scratch, [&](const OwnedValueIntroducer &introducer) { + if (!introducer.isConvertableToGuaranteed()) { + return false; + } + + // If this linear search is too slow, we can change the + // multimap to sort the mapped to list by pointer + // instead of insertion order. In such a case, we could + // then bisect. + auto iter = llvm::find(optimizableIntroducerRange, introducer); + return iter != optimizableIntroducerRange.end(); + })) { + return false; + } + + // Otherwise, append all introducers from scratch into our result array. + llvm::copy(scratch, std::back_inserter(ownedValueIntroducerAccumulator)); + } + + // Now that we are done, perform a sort unique on our result array so that we + // on return have a unique set of values. + sortUnique(ownedValueIntroducerAccumulator); + + return true; +} + +bool SemanticARCOptVisitor::performPostPeepholeOwnedArgElimination() { + bool madeChange = false; + + // First freeze our multi-map so we can use it for map queries. Also, setup a + // defer of the reset so we do not forget to reset the map when we are done. + phiToIncomingValueMultiMap.setFrozen(); + SWIFT_DEFER { phiToIncomingValueMultiMap.reset(); }; + + // Now for each phi argument that we have in our multi-map... + SmallVector incomingValueOperandList; + SmallVector ownedValueIntroducers; + for (auto pair : phiToIncomingValueMultiMap.getRange()) { + SWIFT_DEFER { + incomingValueOperandList.clear(); + ownedValueIntroducers.clear(); + }; + + // First compute the LiveRange for our phi argument. For simplicity, we only + // handle cases now where our phi argument does not have any phi unknown + // consumers. + SILPhiArgument *phiArg = pair.first; + LiveRange phiArgLiveRange(phiArg); + if (bool(phiArgLiveRange.hasConsumingUse())) { + continue; + } + + // Ok, we know that our phi argument /could/ be converted to guaranteed if + // our incoming values are able to be converted to guaranteed. Now for each + // incoming value, compute the incoming values ownership roots and see if + // all of the ownership roots are in our owned incoming value array. + if (!phiArg->getIncomingPhiOperands(incomingValueOperandList)) { + continue; + } + + // Then sort the incoming value operand list so that we can bisect search + // it. + sort(incomingValueOperandList); + + // Grab our list of introducer values paired with this SILArgument. See if + // all of these introducer values were ones that /could/ have been + // eliminated if it was not for the given phi. If all of them are, we can + // optimize! + { + auto rawFoundOptimizableIntroducerArray = pair.second; + if (!canEliminatePhi(rawFoundOptimizableIntroducerArray, + incomingValueOperandList, ownedValueIntroducers)) { + continue; + } + } + + // Ok, at this point we know that we can eliminate this phi. First go + // through the list of incomingValueOperandList and stash the value/set the + // operand's stored value to undef. We will hook them back up later. + SmallVector originalIncomingValues; + for (Operand *incomingValueOperand : incomingValueOperandList) { + originalIncomingValues.push_back(incomingValueOperand->get()); + SILType type = incomingValueOperand->get()->getType(); + auto *undef = SILUndef::get(type, *phiArg->getFunction()); + incomingValueOperand->set(undef); + } + + // Then go through all of our owned value introducers, compute their live + // ranges, and eliminate them. We know it is safe to remove them from our + // previous proofs. + // + // NOTE: If our introducer is a copy_value that is one of our + // originalIncomingValues, we need to update the originalIncomingValue array + // with that value since we are going to delete the copy_value here. This + // creates a complication since we want to binary_search on + // originalIncomingValues to detect this same condition! So, we create a + // list of updates that we apply after we no longer need to perform + // binary_search, but before we start RAUWing things. + SmallVector, 8> incomingValueUpdates; + for (auto introducer : ownedValueIntroducers) { + SILValue v = introducer.value; + LiveRange lr(v); + + // For now, we only handle copy_value for simplicity. + // + // TODO: Add support for load [copy]. + if (introducer.kind == OwnedValueIntroducerKind::Copy) { + auto *cvi = cast(v); + // Before we convert from owned to guaranteed, we need to first see if + // cvi is one of our originalIncomingValues. If so, we need to set + // originalIncomingValues to be cvi->getOperand(). Otherwise, weirdness + // results since we are deleting one of our stashed values. + auto iter = lower_bound(originalIncomingValues, cvi); + if (iter != originalIncomingValues.end() && *iter == cvi) { + // We use an auxillary array here so we can continue to bisect on + // original incoming values. Once we are done processing here, we will + // not need that property anymore. + unsigned updateOffset = + std::distance(originalIncomingValues.begin(), iter); + incomingValueUpdates.emplace_back(cvi->getOperand(), updateOffset); + } + std::move(lr).convertToGuaranteedAndRAUW(cvi->getOperand(), + getCallbacks()); + continue; + } + llvm_unreachable("Unhandled optimizable introducer!"); + } + + // Now go through and update our original incoming value array now that we + // do not need it to be sorted for bisection purposes. + while (!incomingValueUpdates.empty()) { + auto pair = incomingValueUpdates.pop_back_val(); + originalIncomingValues[pair.second] = pair.first; + } + + // Then convert the phi's live range to be guaranteed. + std::move(phiArgLiveRange) + .convertArgToGuaranteed(getDeadEndBlocks(), lifetimeFrontier, + getCallbacks()); + + // Now insert a begin_borrow along the incoming value edges and We have to + // do this after converting the incoming values to be guaranteed since + // SILBuilder checks simple ownership invariants (namely that def/use line + // up) when creating instructions. + assert(incomingValueOperandList.size() == originalIncomingValues.size()); + while (!incomingValueOperandList.empty()) { + auto *incomingValueOperand = incomingValueOperandList.pop_back_val(); + SILValue originalValue = originalIncomingValues.pop_back_val(); + + auto *br = cast(incomingValueOperand->getUser()); + if (originalValue.getOwnershipKind() != ValueOwnershipKind::None) { + auto loc = RegularLocation::getAutoGeneratedLocation(); + SILBuilderWithScope builder(br); + originalValue = builder.createBeginBorrow(loc, originalValue); + } + incomingValueOperand->set(originalValue); + } + + madeChange = true; + if (VerifyAfterTransform) { + phiArg->getFunction()->verify(); + } + } + + return madeChange; +} + +bool SemanticARCOptVisitor::optimize() { + bool madeChange = false; + + // First process the worklist until we reach a fixed point. + madeChange |= processWorklist(); + + { + // If we made a change, set that we assume we are at fixed point and then + // re-run the worklist so that we can + // properly seeded the ARC peephole map. + assumingAtFixedPoint = true; + SWIFT_DEFER { assumingAtFixedPoint = false; }; + + // Add everything in visitedSinceLastMutation to the worklist so we + // recompute our fixed point. + drainVisitedSinceLastMutationIntoWorklist(); + + // Then re-run the worklist. We shouldn't modify anything since we are at a + // fixed point and are just using this to seed the + // phiToIncomingValueMultiMap after we have finished changing things. If we + // did change something, we did something weird, so assert! + bool madeAdditionalChanges = processWorklist(); + (void)madeAdditionalChanges; + assert(!madeAdditionalChanges && "Should be at the fixed point"); + } + + // Then use the newly seeded peephole map to + madeChange |= performPostPeepholeOwnedArgElimination(); + + return madeChange; +} + bool SemanticARCOptVisitor::processWorklist() { // NOTE: The madeChange here is not strictly necessary since we only have // items added to the worklist today if we have already made /some/ sort of @@ -658,6 +1055,8 @@ bool SemanticARCOptVisitor::processWorklist() { // the instruction). if (auto *defInst = next->getDefiningInstruction()) { if (isInstructionTriviallyDead(defInst)) { + assert(!assumingAtFixedPoint && + "Assumed was at fixed point and recomputing state?!"); deleteAllDebugUses(defInst); eraseInstruction(defInst); madeChange = true; @@ -672,6 +1071,8 @@ bool SemanticARCOptVisitor::processWorklist() { // perhaps), try to visit that value recursively. if (auto *svi = dyn_cast(next)) { bool madeSingleChange = visit(svi); + assert((!madeSingleChange || !assumingAtFixedPoint) && + "Assumed was at fixed point and modified state?!"); madeChange |= madeSingleChange; if (VerifyAfterTransform && madeSingleChange) { F.verify(); @@ -772,13 +1173,17 @@ bool SemanticARCOptVisitor::performGuaranteedCopyValueOptimization(CopyValueInst return false; // Then go over all of our uses and see if the value returned by our copy - // value forms a dead live range. If we do not have a dead live range, there - // must be some consuming use that we either do not understand is /actually/ - // forwarding or a user that truly represents a necessary consume of the - // value (e.x. storing into memory). + // value forms a dead live range or a live range that would be dead if it was + // not consumed by phi nodes. If we do not have such a live range, there must + // be some consuming use that we either do not understand is /actually/ + // forwarding or a user that truly represents a necessary consume of the value + // (e.x. storing into memory). LiveRange lr(cvi); - if (lr.hasConsumingUse()) + auto hasConsumingUseState = + lr.hasConsumingUse(getPhiToIncomingValueMultiMap()); + if (hasConsumingUseState == LiveRange::HasConsumingUse_t::Yes) { return false; + } // Next check if we do not have any destroys of our copy_value and are // processing a local borrow scope. In such a case, due to the way we ignore @@ -879,7 +1284,52 @@ bool SemanticARCOptVisitor::performGuaranteedCopyValueOptimization(CopyValueInst } // Otherwise, we know that our copy_value/destroy_values are all completely - // within the guaranteed value scope. So RAUW and convert to guaranteed! + // within the guaranteed value scope. So we /could/ optimize it. Now check if + // we were truly dead or if we are dead if we can eliminate phi arg uses. If + // we need to handle the phi arg uses, we bail. When we checked if the value + // was consumed, the hasConsumedUse code updated phiToIncomingValueMultiMap + // for us before returning its prognosis. After we reach a fixed point, we + // will try to eliminate this value then. + if (hasConsumingUseState == LiveRange::HasConsumingUse_t::YesButAllPhiArgs) { + auto *op = lr.getSingleUnknownConsumingUse(); + assert(op); + unsigned opNum = op->getOperandNumber(); + auto *br = cast(op->getUser()); + SmallVector scratchSpace; + SmallPtrSet visitedBlocks; + + for (auto *succBlock : br->getSuccessorBlocks()) { + SWIFT_DEFER { + scratchSpace.clear(); + visitedBlocks.clear(); + }; + + auto *arg = succBlock->getSILPhiArguments()[opNum]; + LiveRange phiArgLR(arg); + if (bool(phiArgLR.hasConsumingUse())) { + return false; + } + + if (llvm::any_of(borrowScopeIntroducers, + [&](BorrowScopeIntroducingValue borrowScope) { + return !borrowScope.areUsesWithinScope( + phiArgLR.getDestroyingUses(), scratchSpace, + visitedBlocks, getDeadEndBlocks()); + })) { + return false; + } + } + + for (auto *succBlock : br->getSuccessorBlocks()) { + auto *arg = succBlock->getSILPhiArguments()[opNum]; + phiToIncomingValueMultiMap.insert(arg, lr.getIntroducer()); + } + + return false; + } + + // Otherwise, our copy must truly not be needed, o RAUW and convert to + // guaranteed! std::move(lr).convertToGuaranteedAndRAUW(cvi->getOperand(), getCallbacks()); ++NumEliminatedInsts; return true; @@ -1338,7 +1788,7 @@ bool SemanticARCOptVisitor::visitLoadInst(LoadInst *li) { // -> load_borrow if we can put a copy_value on a cold path and thus // eliminate RR traffic on a hot path. LiveRange lr(li); - if (lr.hasConsumingUse()) + if (bool(lr.hasConsumingUse())) return false; // Then check if our address is ever written to. If it is, then we cannot use @@ -1398,8 +1848,9 @@ struct SemanticARCOpts : SILFunctionTransform { // Then process the worklist. We only destroy instructions, so invalidate // that. Once we modify the ownership of block arguments, we will need to // perhaps invalidate branches as well. - if (visitor.processWorklist()) { - invalidateAnalysis(SILAnalysis::InvalidationKind::Instructions); + if (visitor.optimize()) { + invalidateAnalysis( + SILAnalysis::InvalidationKind::BranchesAndInstructions); } } }; diff --git a/test/SILOptimizer/semantic-arc-opts.sil b/test/SILOptimizer/semantic-arc-opts.sil index fb20187fc60cf..852b8a2d71d29 100644 --- a/test/SILOptimizer/semantic-arc-opts.sil +++ b/test/SILOptimizer/semantic-arc-opts.sil @@ -44,15 +44,15 @@ struct MyInt { var value: Builtin.Int32 } -struct AnotherStruct { - var i : Builtin.Int32 - var c : Klass +struct StructWithDataAndOwner { + var data : Builtin.Int32 + var owner : Klass } struct StructMemberTest { var c : Klass - var s : AnotherStruct - var t : (Builtin.Int32, AnotherStruct) + var s : StructWithDataAndOwner + var t : (Builtin.Int32, StructWithDataAndOwner) } class ClassLet { @@ -337,12 +337,12 @@ bb3: sil [ossa] @destructure_test : $@convention(thin) (@guaranteed StructMemberTest) -> Builtin.Int32 { bb0(%0 : @guaranteed $StructMemberTest): %2 = struct_extract %0 : $StructMemberTest, #StructMemberTest.t - %3 = copy_value %2 : $(Builtin.Int32, AnotherStruct) - (%4, %5) = destructure_tuple %3 : $(Builtin.Int32, AnotherStruct) - %6 = begin_borrow %5 : $AnotherStruct - %7 = struct_extract %6 : $AnotherStruct, #AnotherStruct.i - end_borrow %6 : $AnotherStruct - destroy_value %5 : $AnotherStruct + %3 = copy_value %2 : $(Builtin.Int32, StructWithDataAndOwner) + (%4, %5) = destructure_tuple %3 : $(Builtin.Int32, StructWithDataAndOwner) + %6 = begin_borrow %5 : $StructWithDataAndOwner + %7 = struct_extract %6 : $StructWithDataAndOwner, #StructWithDataAndOwner.data + end_borrow %6 : $StructWithDataAndOwner + destroy_value %5 : $StructWithDataAndOwner return %7 : $Builtin.Int32 } @@ -1809,3 +1809,398 @@ bb0(%0 : @guaranteed $Klass): %9999 = tuple() return %9999 : $() } + +/////////////////// +// Phi Web Tests // +/////////////////// + +// CHECK-LABEL: sil [ossa] @copy_of_guaranteed_simple_case : $@convention(thin) (@guaranteed Klass, @guaranteed Klass) -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'copy_of_guaranteed_simple_case' +sil [ossa] @copy_of_guaranteed_simple_case : $@convention(thin) (@guaranteed Klass, @guaranteed Klass) -> () { +bb0(%0 : @guaranteed $Klass, %1 : @guaranteed $Klass): + cond_br undef, bb1, bb2 + +bb1: + %0a = copy_value %0 : $Klass + br bb3(%0a : $Klass) + +bb2: + %1a = copy_value %1 : $Klass + br bb3(%1a : $Klass) + +bb3(%2 : @owned $Klass): + %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () + apply %f(%2) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %2 : $Klass + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @copy_of_guaranteed_forwarding_use : $@convention(thin) (@guaranteed Klass, @guaranteed Klass) -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'copy_of_guaranteed_forwarding_use' +sil [ossa] @copy_of_guaranteed_forwarding_use : $@convention(thin) (@guaranteed Klass, @guaranteed Klass) -> () { +bb0(%0 : @guaranteed $Klass, %1 : @guaranteed $Klass): + cond_br undef, bb1, bb2 + +bb1: + %0a = copy_value %0 : $Klass + %0b = unchecked_ref_cast %0a : $Klass to $Klass + br bb3(%0b : $Klass) + +bb2: + %1a = copy_value %1 : $Klass + %1b = unchecked_ref_cast %1a : $Klass to $Klass + br bb3(%1b : $Klass) + +bb3(%2 : @owned $Klass): + %f = function_ref @guaranteed_klass_user : $@convention(thin) (@guaranteed Klass) -> () + apply %f(%2) : $@convention(thin) (@guaranteed Klass) -> () + destroy_value %2 : $Klass + %9999 = tuple() + return %9999 : $() +} + +// A combined test of a common pattern, casting in an optional diamond. +// +// CHECK-LABEL: sil [ossa] @optional_cast_diamond : $@convention(thin) (@guaranteed FakeOptional) -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'optional_cast_diamond' +sil [ossa] @optional_cast_diamond : $@convention(thin) (@guaranteed FakeOptional) -> () { +bb0(%0 : @guaranteed $FakeOptional): + %1 = copy_value %0 : $FakeOptional + switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt.1: bb2, case #FakeOptional.none!enumelt: bb1 + +bb1: + %2 = enum $FakeOptional, #FakeOptional.none!enumelt + br bb3(%2 : $FakeOptional) + +bb2(%3 : @owned $Klass): + %4 = unchecked_ref_cast %3 : $Klass to $Klass + %5 = enum $FakeOptional, #FakeOptional.some!enumelt.1, %4 : $Klass + br bb3(%5 : $FakeOptional) + +bb3(%6 : @owned $FakeOptional): + %f = function_ref @guaranteed_fakeoptional_klass_user : $@convention(thin) (@guaranteed FakeOptional) -> () + apply %f(%6) : $@convention(thin) (@guaranteed FakeOptional) -> () + destroy_value %6 : $FakeOptional + %9999 = tuple() + return %9999 : $() +} + +// A larger chained example. We can not handle this today, but we should be able +// to. +// +// CHECK-LABEL: sil [ossa] @optional_cast_diamond_chained : $@convention(thin) (@guaranteed FakeOptional) -> () { +// CHECK: copy_value +// CHECK: } // end sil function 'optional_cast_diamond_chained' +sil [ossa] @optional_cast_diamond_chained : $@convention(thin) (@guaranteed FakeOptional) -> () { +bb0(%0 : @guaranteed $FakeOptional): + %f = function_ref @guaranteed_fakeoptional_klass_user : $@convention(thin) (@guaranteed FakeOptional) -> () + %1 = copy_value %0 : $FakeOptional + switch_enum %1 : $FakeOptional, case #FakeOptional.some!enumelt.1: bb2, case #FakeOptional.none!enumelt: bb1 + +bb1: + %2 = enum $FakeOptional, #FakeOptional.none!enumelt + br bb3(%2 : $FakeOptional) + +bb2(%3 : @owned $Klass): + %4 = unchecked_ref_cast %3 : $Klass to $Klass + %5 = enum $FakeOptional, #FakeOptional.some!enumelt.1, %4 : $Klass + br bb3(%5 : $FakeOptional) + +bb3(%6 : @owned $FakeOptional): + apply %f(%6) : $@convention(thin) (@guaranteed FakeOptional) -> () + switch_enum %6 : $FakeOptional, case #FakeOptional.some!enumelt.1: bb5, case #FakeOptional.none!enumelt: bb4 + +bb4: + %2a = enum $FakeOptional, #FakeOptional.none!enumelt + br bb6(%2a : $FakeOptional) + +bb5(%3a : @owned $Klass): + %4a = unchecked_ref_cast %3a : $Klass to $Klass + %5a = enum $FakeOptional, #FakeOptional.some!enumelt.1, %4a : $Klass + br bb6(%5a : $FakeOptional) + +bb6(%6a : @owned $FakeOptional): + apply %f(%6a) : $@convention(thin) (@guaranteed FakeOptional) -> () + destroy_value %6a : $FakeOptional + %9999 = tuple() + return %9999 : $() +} + +// Make sure we do not crash here. We need to be able to think about multiple +// phi node at the same time. +// +// CHECK-LABEL: sil [ossa] @multiple_phi_node_uses_of_one_copy : $@convention(thin) (@guaranteed Klass) -> () { +// CHECK: copy_value +// CHECK: } // end sil function 'multiple_phi_node_uses_of_one_copy' +sil [ossa] @multiple_phi_node_uses_of_one_copy : $@convention(thin) (@guaranteed Klass) -> () { +bb0(%0 : @guaranteed $Klass): + %1 = copy_value %0 : $Klass + cond_br undef, bb1, bb2 + +bb1: + br bb3(%1 : $Klass) + +bb2: + br bb3(%1 : $Klass) + +bb3(%2 : @owned $Klass): + destroy_value %2 : $Klass + %9999 = tuple() + return %9999 : $() +} + +// Lets do a phi tree. +// +// CHECK-LABEL: sil [ossa] @copy_guaranteed_three_copy_simple : $@convention(thin) (@guaranteed Klass) -> () { +// CHECK-NOT: copy_value +// CHECK: } // end sil function 'copy_guaranteed_three_copy_simple' +sil [ossa] @copy_guaranteed_three_copy_simple : $@convention(thin) (@guaranteed Klass) -> () { +bb0(%0 : @guaranteed $Klass): + cond_br undef, bb1, bb2 + +bb1: + cond_br undef, bb3, bb4 + +bb2: + %1 = copy_value %0 : $Klass + br bb5(%1 : $Klass) + +bb3: + %2 = copy_value %0 : $Klass + br bb5(%2 : $Klass) + +bb4: + %3 = copy_value %0 : $Klass + br bb5(%3 : $Klass) + +bb5(%end : @owned $Klass): + destroy_value %end : $Klass + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @cast_with_optional_result_and_default_simple : $@convention(thin) (@guaranteed StructWithDataAndOwner) -> () { +// CHECK-NOT: copy_value +// CHECK-NOT: destroy_value +// CHECK: } // end sil function 'cast_with_optional_result_and_default_simple' +sil [ossa] @cast_with_optional_result_and_default_simple : $@convention(thin) (@guaranteed StructWithDataAndOwner) -> () { +bb0(%0 : @guaranteed $StructWithDataAndOwner): + %1 = struct_extract %0 : $StructWithDataAndOwner, #StructWithDataAndOwner.owner + %2 = copy_value %1 : $Klass + checked_cast_br %2 : $Klass to Builtin.NativeObject, bb1, bb2 + +bb1(%3 : @owned $Builtin.NativeObject): + %4 = enum $FakeOptional, #FakeOptional.some!enumelt.1, %3 : $Builtin.NativeObject + br bb3(%4 : $FakeOptional) + +bb2(%5 : @owned $Klass): + destroy_value %5 : $Klass + %6 = enum $FakeOptional, #FakeOptional.none!enumelt + br bb3(%6 : $FakeOptional) + +bb3(%7 : @owned $FakeOptional): + destroy_value %7 : $FakeOptional + %9999 = tuple() + return %9999 : $() +} + +// CHECK-LABEL: sil [ossa] @cast_with_optional_result_and_default_simple_unremoved_store : $@convention(thin) (@guaranteed StructWithDataAndOwner) -> @out FakeOptional { +// CHECK-NOT: destroy_value +// CHECK: copy_value +// CHECK-NOT: destroy_value +// CHECK: } // end sil function 'cast_with_optional_result_and_default_simple_unremoved_store' +sil [ossa] @cast_with_optional_result_and_default_simple_unremoved_store : $@convention(thin) (@guaranteed StructWithDataAndOwner) -> @out FakeOptional { +bb0(%result : $*FakeOptional, %0 : @guaranteed $StructWithDataAndOwner): + %1 = struct_extract %0 : $StructWithDataAndOwner, #StructWithDataAndOwner.owner + %2 = copy_value %1 : $Klass + checked_cast_br %2 : $Klass to Builtin.NativeObject, bb1, bb2 + +bb1(%3 : @owned $Builtin.NativeObject): + %4 = enum $FakeOptional, #FakeOptional.some!enumelt.1, %3 : $Builtin.NativeObject + br bb3(%4 : $FakeOptional) + +bb2(%5 : @owned $Klass): + destroy_value %5 : $Klass + %6 = enum $FakeOptional, #FakeOptional.none!enumelt + br bb3(%6 : $FakeOptional) + +bb3(%7 : @owned $FakeOptional): + %8 = copy_value %7 : $FakeOptional + store %8 to [init] %result : $*FakeOptional + destroy_value %7 : $FakeOptional + %9999 = tuple() + return %9999 : $() +} + +// The pass visits the blocks in order, so we know that the failure to do the +// copy_value in block 1 will occur before any copy removal in later +// blocks. Lets take advantage of that to make sure that if we fail to copy +// multiple times, we ignore the duplicate copy_value in the phi list. +// +// CHECK-LABEL: sil [ossa] @cast_with_optional_result_and_default_simple_unremoved_store_multiple_mods : $@convention(thin) (@guaranteed StructWithDataAndOwner) -> @out FakeOptional { +// CHECK: copy_value +// CHECK-NOT: copy_value +// CHECK-NOT: destroy_value +// CHECK: } // end sil function 'cast_with_optional_result_and_default_simple_unremoved_store_multiple_mods' +sil [ossa] @cast_with_optional_result_and_default_simple_unremoved_store_multiple_mods : $@convention(thin) (@guaranteed StructWithDataAndOwner) -> @out FakeOptional { +bb0(%result : $*FakeOptional, %0 : @guaranteed $StructWithDataAndOwner): + %1 = struct_extract %0 : $StructWithDataAndOwner, #StructWithDataAndOwner.owner + %2 = copy_value %1 : $Klass + checked_cast_br %2 : $Klass to Builtin.NativeObject, bb1, bb2 + +bb1(%3 : @owned $Builtin.NativeObject): + %4 = enum $FakeOptional, #FakeOptional.some!enumelt.1, %3 : $Builtin.NativeObject + br bb3(%4 : $FakeOptional) + +bb2(%5 : @owned $Klass): + destroy_value %5 : $Klass + %6 = enum $FakeOptional, #FakeOptional.none!enumelt + br bb3(%6 : $FakeOptional) + +bb3(%7 : @owned $FakeOptional): + %8 = copy_value %7 : $FakeOptional + %9 = copy_value %8 : $FakeOptional + destroy_value %9 : $FakeOptional + store %8 to [init] %result : $*FakeOptional + destroy_value %7 : $FakeOptional + %9999 = tuple() + return %9999 : $() +} + +// We can not eliminate the copy_value here since we store it into the out +// parameter. +// +// CHECK-LABEL: sil [ossa] @cast_with_optional_result_and_default_and_switchenum_after : $@convention(thin) (@guaranteed StructWithDataAndOwner) -> @out FakeOptional { +// CHECK: bb0( +// CHECK: copy_value +// CHECK: checked_cast_br +// CHECK: } // end sil function 'cast_with_optional_result_and_default_and_switchenum_after' +sil [ossa] @cast_with_optional_result_and_default_and_switchenum_after : $@convention(thin) (@guaranteed StructWithDataAndOwner) -> @out FakeOptional { +bb0(%result : $*FakeOptional, %0 : @guaranteed $StructWithDataAndOwner): + %1 = struct_extract %0 : $StructWithDataAndOwner, #StructWithDataAndOwner.owner + %2 = copy_value %1 : $Klass + checked_cast_br %2 : $Klass to Builtin.NativeObject, bb1, bb2 + +bb1(%3 : @owned $Builtin.NativeObject): + %4 = enum $FakeOptional, #FakeOptional.some!enumelt.1, %3 : $Builtin.NativeObject + br bb3(%4 : $FakeOptional) + +bb2(%5 : @owned $Klass): + destroy_value %5 : $Klass + %6 = enum $FakeOptional, #FakeOptional.none!enumelt + br bb3(%6 : $FakeOptional) + +bb3(%7 : @owned $FakeOptional): + switch_enum %7 : $FakeOptional, case #FakeOptional.some!enumelt.1: bb5, case #FakeOptional.none!enumelt: bb4 + +bb4: + %8 = enum $FakeOptional, #FakeOptional.none!enumelt + store %8 to [init] %result : $*FakeOptional + br bb6 + +bb5(%9 : @owned $Builtin.NativeObject): + %10 = enum $FakeOptional, #FakeOptional.some!enumelt.1, %9 : $Builtin.NativeObject + store %10 to [init] %result : $*FakeOptional + br bb6 + +bb6: + %9999 = tuple() + return %9999 : $() +} + +// Once we support converting struct_extract to a destructure here (since only +// one non-trivial leaf field), we should be able to optimize this case. +// +// CHECK-LABEL: sil [ossa] @cast_with_optional_result_and_default_and_switchenum_after_owned_arg : $@convention(thin) (@owned StructWithDataAndOwner) -> @out FakeOptional { +// CHECK: bb0( +// CHECK: copy_value +// CHECK: checked_cast_br +// CHECK: } // end sil function 'cast_with_optional_result_and_default_and_switchenum_after_owned_arg' +sil [ossa] @cast_with_optional_result_and_default_and_switchenum_after_owned_arg : $@convention(thin) (@owned StructWithDataAndOwner) -> @out FakeOptional { +bb0(%result : $*FakeOptional, %0 : @owned $StructWithDataAndOwner): + %0a = begin_borrow %0 : $StructWithDataAndOwner + %1 = struct_extract %0a : $StructWithDataAndOwner, #StructWithDataAndOwner.owner + %2 = copy_value %1 : $Klass + checked_cast_br %2 : $Klass to Builtin.NativeObject, bb1, bb2 + +bb1(%3 : @owned $Builtin.NativeObject): + %4 = enum $FakeOptional, #FakeOptional.some!enumelt.1, %3 : $Builtin.NativeObject + br bb3(%4 : $FakeOptional) + +bb2(%5 : @owned $Klass): + destroy_value %5 : $Klass + %6 = enum $FakeOptional, #FakeOptional.none!enumelt + br bb3(%6 : $FakeOptional) + +bb3(%7 : @owned $FakeOptional): + switch_enum %7 : $FakeOptional, case #FakeOptional.some!enumelt.1: bb5, case #FakeOptional.none!enumelt: bb4 + +bb4: + %8 = enum $FakeOptional, #FakeOptional.none!enumelt + store %8 to [init] %result : $*FakeOptional + br bb6 + +bb5(%9 : @owned $Builtin.NativeObject): + %10 = enum $FakeOptional, #FakeOptional.some!enumelt.1, %9 : $Builtin.NativeObject + store %10 to [init] %result : $*FakeOptional + br bb6 + +bb6: + end_borrow %0a : $StructWithDataAndOwner + destroy_value %0 : $StructWithDataAndOwner + %9999 = tuple() + return %9999 : $() +} + +// We can not eliminate this copy_value since the scope for %0a ends before the +// begin_borrow. +// CHECK-LABEL: sil [ossa] @cast_with_optional_result_and_default_and_switchenum_after_owned_arg_1 : $@convention(thin) (@owned StructWithDataAndOwner) -> @out FakeOptional { +// CHECK: bb0( +// CHECK: copy_value +// CHECK: checked_cast_br +// CHECK: } // end sil function 'cast_with_optional_result_and_default_and_switchenum_after_owned_arg_1' +sil [ossa] @cast_with_optional_result_and_default_and_switchenum_after_owned_arg_1 : $@convention(thin) (@owned StructWithDataAndOwner) -> @out FakeOptional { +bb0(%result : $*FakeOptional, %0 : @owned $StructWithDataAndOwner): + %0a = begin_borrow %0 : $StructWithDataAndOwner + %1 = struct_extract %0a : $StructWithDataAndOwner, #StructWithDataAndOwner.owner + %2 = copy_value %1 : $Klass + checked_cast_br %2 : $Klass to Builtin.NativeObject, bb1, bb2 + +bb1(%3 : @owned $Builtin.NativeObject): + %4 = enum $FakeOptional, #FakeOptional.some!enumelt.1, %3 : $Builtin.NativeObject + br bb3(%4 : $FakeOptional) + +bb2(%5 : @owned $Klass): + destroy_value %5 : $Klass + %6 = enum $FakeOptional, #FakeOptional.none!enumelt + br bb3(%6 : $FakeOptional) + +bb3(%7 : @owned $FakeOptional): + switch_enum %7 : $FakeOptional, case #FakeOptional.some!enumelt.1: bb5, case #FakeOptional.none!enumelt: bb4 + +bb4: + end_borrow %0a : $StructWithDataAndOwner + destroy_value %0 : $StructWithDataAndOwner + %8 = enum $FakeOptional, #FakeOptional.none!enumelt + store %8 to [init] %result : $*FakeOptional + br bb6 + +bb5(%9 : @owned $Builtin.NativeObject): + end_borrow %0a : $StructWithDataAndOwner + %9a = begin_borrow %9 : $Builtin.NativeObject + %9b = copy_value %9a : $Builtin.NativeObject + %10 = enum $FakeOptional, #FakeOptional.some!enumelt.1, %9b : $Builtin.NativeObject + store %10 to [init] %result : $*FakeOptional + end_borrow %9a : $Builtin.NativeObject + destroy_value %9 : $Builtin.NativeObject + destroy_value %0 : $StructWithDataAndOwner + br bb6 + +bb6: + %9999 = tuple() + return %9999 : $() +} diff --git a/unittests/Basic/FrozenMultiMapTest.cpp b/unittests/Basic/FrozenMultiMapTest.cpp index e5b37393c6087..4645ce4707679 100644 --- a/unittests/Basic/FrozenMultiMapTest.cpp +++ b/unittests/Basic/FrozenMultiMapTest.cpp @@ -90,6 +90,37 @@ TEST(FrozenMultiMapCustomTest, SimpleFind) { } } +TEST(FrozenMultiMapCustomTest, TestResetWorks) { + Canary::resetIDs(); + FrozenMultiMap map; + + auto key1 = Canary(); + auto key2 = Canary(); + map.insert(key1, Canary()); + map.insert(key1, Canary()); + map.insert(key1, Canary()); + map.insert(key2, Canary()); + map.insert(key2, Canary()); + + map.setFrozen(); + map.reset(); + map.insert(key1, Canary()); + map.insert(key1, Canary()); + map.insert(key1, Canary()); + map.insert(key2, Canary()); + map.insert(key2, Canary()); + + map.setFrozen(); + + // Just do a quick sanity test. + auto range = map.getRange(); + auto begin = range.begin(); + auto end = range.end(); + ++begin; + ++begin; + EXPECT_EQ(std::distance(begin, end), 0); +} + TEST(FrozenMultiMapCustomTest, SimpleIter) { Canary::resetIDs(); FrozenMultiMap map; From 7323ea95eaa98230b7a1b5e9ed247f048bfb5f1d Mon Sep 17 00:00:00 2001 From: Michael Gottesman Date: Tue, 10 Mar 2020 16:39:24 -0700 Subject: [PATCH 2/2] [semantic-arc-opts] Eliminate unneeded use of bisection that caused ASAN to fire. I was being too clever here and in the process of "foot-gunned" myself. Specifically, I was hoping to use bisection to do some is contained in list tests. But the list that I actually bisected upon was not the original list and was just based on the original sorted list! So the pointer comparison for the bisect no longer worked. Since the bisection was on pointer addresses, if we were lucky and the new addresses in the new array were sorted the same as the original array, we wouldn't hit anything. rdar://60262326 --- lib/SILOptimizer/Transforms/SemanticARCOpts.cpp | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/lib/SILOptimizer/Transforms/SemanticARCOpts.cpp b/lib/SILOptimizer/Transforms/SemanticARCOpts.cpp index 77185b5eaaca4..fdf9509c9e176 100644 --- a/lib/SILOptimizer/Transforms/SemanticARCOpts.cpp +++ b/lib/SILOptimizer/Transforms/SemanticARCOpts.cpp @@ -884,10 +884,6 @@ bool SemanticARCOptVisitor::performPostPeepholeOwnedArgElimination() { continue; } - // Then sort the incoming value operand list so that we can bisect search - // it. - sort(incomingValueOperandList); - // Grab our list of introducer values paired with this SILArgument. See if // all of these introducer values were ones that /could/ have been // eliminated if it was not for the given phi. If all of them are, we can @@ -936,8 +932,8 @@ bool SemanticARCOptVisitor::performPostPeepholeOwnedArgElimination() { // cvi is one of our originalIncomingValues. If so, we need to set // originalIncomingValues to be cvi->getOperand(). Otherwise, weirdness // results since we are deleting one of our stashed values. - auto iter = lower_bound(originalIncomingValues, cvi); - if (iter != originalIncomingValues.end() && *iter == cvi) { + auto iter = find(originalIncomingValues, cvi); + if (iter != originalIncomingValues.end()) { // We use an auxillary array here so we can continue to bisect on // original incoming values. Once we are done processing here, we will // not need that property anymore.