From b3c9f6b2a44f7512458b468b03ffbc2ffa5c088b Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Wed, 28 Feb 2024 08:36:58 -0800 Subject: [PATCH 01/17] [LV, VP]VP intrinsics support for the Loop Vectorizer + adding new tail-folding mode using EVL. This patch introduces generating VP intrinsics in the Loop Vectorizer. Currently the Loop Vectorizer supports vector predication in a very limited capacity via tail-folding and masked load/store/gather/scatter intrinsics. However, this does not let architectures with active vector length predication support take advantage of their capabilities. Architectures with general masked predication support also can only take advantage of predication on memory operations. By having a way for the Loop Vectorizer to generate Vector Predication intrinsics, which (will) provide a target-independent way to model predicated vector instructions, These architectures can make better use of their predication capabilities. Our first approach (implemented in this patch) builds on top of the existing tail-folding mechanism in the LV (just adds a new tail-folding mode using EVL), but instead of generating masked intrinsics for memory operations it generates VP intrinsics for loads/stores instructions. The patch adds a new VPlanTransforms to replace the wide header predicate compare with EVL and updates codegen for load/stores to use VP store/load with EVL. Other important part of this approach is how the Explicit Vector Length is computed. (VP intrinsics define this vector length parameter as Explicit Vector Length (EVL)). We use an experimental intrinsic `get_vector_length`, that can be lowered to architecture specific instruction(s) to compute EVL. Also, added a new recipe to emit instructions for computing EVL. Using VPlan in this way will eventually help build and compare VPlans corresponding to different strategies and alternatives. Differential Revision: https://reviews.llvm.org/D99750 --- .../llvm/Analysis/TargetTransformInfo.h | 5 +- .../Target/RISCV/RISCVTargetTransformInfo.cpp | 4 + .../Target/RISCV/RISCVTargetTransformInfo.h | 16 ++ .../Transforms/Vectorize/LoopVectorize.cpp | 167 ++++++++++++++++- llvm/lib/Transforms/Vectorize/VPlan.h | 45 +++++ .../Transforms/Vectorize/VPlanAnalysis.cpp | 16 +- .../lib/Transforms/Vectorize/VPlanRecipes.cpp | 65 +++++++ .../Transforms/Vectorize/VPlanTransforms.cpp | 119 ++++++++++-- .../Transforms/Vectorize/VPlanTransforms.h | 7 + llvm/lib/Transforms/Vectorize/VPlanValue.h | 1 + .../Transforms/Vectorize/VPlanVerifier.cpp | 51 ++++++ .../LoopVectorize/RISCV/inloop-reduction.ll | 68 ++++++- .../RISCV/vectorize-vp-intrinsics.ll | 142 +++++++++++++++ .../RISCV/vplan-vp-intrinsics.ll | 134 ++++++++++++++ .../X86/vectorize-vp-intrinsics.ll | 127 +++++++++++++ .../LoopVectorize/X86/vplan-vp-intrinsics.ll | 89 +++++++++ .../vectorize-vp-intrinsics-gather-scatter.ll | 48 +++++ .../vectorize-vp-intrinsics-interleave.ll | 170 ++++++++++++++++++ .../vectorize-vp-intrinsics-iv32.ll | 85 +++++++++ ...ectorize-vp-intrinsics-masked-loadstore.ll | 60 +++++++ .../vectorize-vp-intrinsics-no-masking.ll | 36 ++++ ...torize-vp-intrinsics-reverse-load-store.ll | 49 +++++ .../LoopVectorize/vectorize-vp-intrinsics.ll | 98 ++++++++++ .../LoopVectorize/vplan-vp-intrinsics.ll | 37 ++++ 24 files changed, 1608 insertions(+), 31 deletions(-) create mode 100644 llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll create mode 100644 llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll create mode 100644 llvm/test/Transforms/LoopVectorize/X86/vectorize-vp-intrinsics.ll create mode 100644 llvm/test/Transforms/LoopVectorize/X86/vplan-vp-intrinsics.ll create mode 100644 llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-gather-scatter.ll create mode 100644 llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll create mode 100644 llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-iv32.ll create mode 100644 llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-masked-loadstore.ll create mode 100644 llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-no-masking.ll create mode 100644 llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-reverse-load-store.ll create mode 100644 llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics.ll create mode 100644 llvm/test/Transforms/LoopVectorize/vplan-vp-intrinsics.ll diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h index bad0a77b0f2da..110a00ef2d70f 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -190,7 +190,10 @@ enum class TailFoldingStyle { /// Use predicate to control both data and control flow, but modify /// the trip count so that a runtime overflow check can be avoided /// and such that the scalar epilogue loop can always be removed. - DataAndControlFlowWithoutRuntimeCheck + DataAndControlFlowWithoutRuntimeCheck, + /// Use predicated EVL instructions for tail-folding. + /// Indicates that VP intrinsics should be used if tail-folding is enabled. + DataWithEVL, }; struct TailFoldingInfo { diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index 27a4d78d6df77..aeec06313c753 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -245,6 +245,10 @@ RISCVTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, return TTI::TCC_Free; } +bool RISCVTTIImpl::hasActiveVectorLength(unsigned, Type *DataTy, Align) const { + return ST->hasVInstructions(); +} + TargetTransformInfo::PopcntSupportKind RISCVTTIImpl::getPopcntSupport(unsigned TyWidth) { assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h index ac32aea4ce2b8..5c1d6aab04f3f 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h @@ -78,6 +78,22 @@ class RISCVTTIImpl : public BasicTTIImplBase { const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind); + /// \name Vector Predication Information + /// Whether the target supports the %evl parameter of VP intrinsic efficiently + /// in hardware, for the given opcode and type/alignment. (see LLVM Language + /// Reference - "Vector Predication Intrinsics", + /// https://llvm.org/docs/LangRef.html#vector-predication-intrinsics and + /// "IR-level VP intrinsics", + /// https://llvm.org/docs/Proposals/VectorPredication.html#ir-level-vp-intrinsics). + /// \param Opcode the opcode of the instruction checked for predicated version + /// support. + /// \param DataType the type of the instruction with the \p Opcode checked for + /// prediction support. + /// \param Alignment the alignment for memory access operation checked for + /// predicated version support. + bool hasActiveVectorLength(unsigned Opcode, Type *DataType, + Align Alignment) const; + TargetTransformInfo::PopcntSupportKind getPopcntSupport(unsigned TyWidth); bool shouldExpandReduction(const IntrinsicInst *II) const; diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 0834865173b2f..dc3f8a4f7d989 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -124,6 +124,7 @@ #include "llvm/IR/User.h" #include "llvm/IR/Value.h" #include "llvm/IR/ValueHandle.h" +#include "llvm/IR/VectorBuilder.h" #include "llvm/IR/Verifier.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" @@ -248,10 +249,12 @@ static cl::opt ForceTailFoldingStyle( clEnumValN(TailFoldingStyle::DataAndControlFlow, "data-and-control", "Create lane mask using active.lane.mask intrinsic, and use " "it for both data and control flow"), - clEnumValN( - TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck, - "data-and-control-without-rt-check", - "Similar to data-and-control, but remove the runtime check"))); + clEnumValN(TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck, + "data-and-control-without-rt-check", + "Similar to data-and-control, but remove the runtime check"), + clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", + "Use predicated EVL instructions for tail folding if the " + "target supports vector length predication"))); static cl::opt MaximizeBandwidth( "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, @@ -1511,7 +1514,9 @@ class LoopVectorizationCostModel { /// Selects and saves TailFoldingStyle for 2 options - if IV update may /// overflow or not. - void setTailFoldingStyles() { + /// \param IsScalableVF true if scalable vector factors enabled. + /// \param UserIC User specific interleave count. + void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC) { assert(ChosenTailFoldingStyle.first == TailFoldingStyle::None && ChosenTailFoldingStyle.second == TailFoldingStyle::None && "Tail folding must not be selected yet."); @@ -1521,6 +1526,36 @@ class LoopVectorizationCostModel { if (ForceTailFoldingStyle.getNumOccurrences()) { ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second = ForceTailFoldingStyle; + if (ChosenTailFoldingStyle.first == TailFoldingStyle::DataWithEVL) { + // FIXME: use actual opcode/data type for analysis here. + // FIXME: Investigate opportunity for fixed vector factor. + bool EVLIsLegal = + IsScalableVF && UserIC <= 1 && + TTI.hasActiveVectorLength(0, nullptr, Align()) && + !EnableVPlanNativePath && + // FIXME: implement support for max safe dependency distance. + Legal->isSafeForAnyVectorWidth() && + // FIXME: remove this once reductions are supported. + Legal->getReductionVars().empty() && + // FIXME: remove this once vp_reverse is supported. + none_of(WideningDecisions, + [](const std::pair, + std::pair> + &Data) { + return Data.second.first == CM_Widen_Reverse; + }); + if (!EVLIsLegal) { + ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second = + TailFoldingStyle::DataWithoutLaneMask; + LLVM_DEBUG(dbgs() + << "LV: Preference for VP intrinsics indicated. Will " + "not try to generate VP Intrinsics since " + << (UserIC > 1 + ? "interleave count specified is greater than 1.\n" + : "the target does not support vector length " + "predication.\n")); + } + } return; } @@ -1544,6 +1579,23 @@ class LoopVectorizationCostModel { return foldTailByMasking() || Legal->blockNeedsPredication(BB); } + /// Returns true if VP intrinsics with explicit vector length support should + /// be generated in the tail folded loop. + bool useVPIWithVPEVLVectorization() const { + return PreferEVL && !EnableVPlanNativePath && + getTailFoldingStyle() == TailFoldingStyle::DataWithEVL && + // FIXME: implement support for max safe dependency distance. + Legal->isSafeForAnyVectorWidth() && + // FIXME: remove this once reductions are supported. + Legal->getReductionVars().empty() && + // FIXME: remove this once vp_reverse is supported. + none_of( + WideningDecisions, + [](const std::pair, + std::pair> + &Data) { return Data.second.first == CM_Widen_Reverse; }); + } + /// Returns true if the Phi is part of an inloop reduction. bool isInLoopReduction(PHINode *Phi) const { return InLoopReductions.contains(Phi); @@ -1691,6 +1743,10 @@ class LoopVectorizationCostModel { std::pair ChosenTailFoldingStyle = std::make_pair(TailFoldingStyle::None, TailFoldingStyle::None); + /// Control whether to generate VP intrinsics with explicit-vector-length + /// support in vectorized code. + bool PreferEVL = false; + /// A map holding scalar costs for different vectorization factors. The /// presence of a cost for an instruction in the mapping indicates that the /// instruction will be scalarized when vectorizing with the associated @@ -4647,9 +4703,22 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { // found modulo the vectorization factor is not zero, try to fold the tail // by masking. // FIXME: look for a smaller MaxVF that does divide TC rather than masking. - setTailFoldingStyles(); - if (foldTailByMasking()) + setTailFoldingStyles(MaxFactors.ScalableVF.isScalable(), UserIC); + if (foldTailByMasking()) { + if (getTailFoldingStyle() == TailFoldingStyle::DataWithEVL) { + LLVM_DEBUG(dbgs() << "LV: Preference for VP intrinsics indicated. Will " + "try to generate VP Intrinsics with scalable vector " + "factors only.\n"); + // Tail folded loop using VP intrinsics restricts the VF to be scalable + // for now. + // TODO: extend it for fixed vectors, if required. + assert(MaxFactors.ScalableVF.isScalable() && + "Expected scalable vector factor."); + + MaxFactors.FixedVF = ElementCount::getFixed(1); + } return MaxFactors; + } // If there was a tail-folding hint/switch, but we can't fold the tail by // masking, fallback to a vectorization with a scalar epilogue. @@ -5257,6 +5326,10 @@ LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, if (!isScalarEpilogueAllowed()) return 1; + // Do not interleave if EVL is preferred and no User IC is specified. + if (useVPIWithVPEVLVectorization()) + return 1; + // We used the distance for the interleave count. if (!Legal->isSafeForAnyVectorWidth()) return 1; @@ -8487,6 +8560,8 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, VPlanTransforms::truncateToMinimalBitwidths( *Plan, CM.getMinimalBitwidths(), PSE.getSE()->getContext()); VPlanTransforms::optimize(*Plan, *PSE.getSE()); + if (CM.useVPIWithVPEVLVectorization()) + VPlanTransforms::addExplicitVectorLength(*Plan); assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid"); VPlans.push_back(std::move(Plan)); } @@ -9307,6 +9382,52 @@ void VPReplicateRecipe::execute(VPTransformState &State) { State.ILV->scalarizeInstruction(UI, this, VPIteration(Part, Lane), State); } +/// Creates either vp_store or vp_scatter intrinsics calls to represent +/// predicated store/scatter. +static Instruction * +lowerStoreUsingVectorIntrinsics(IRBuilderBase &Builder, Value *Addr, + Value *StoredVal, bool IsScatter, Value *Mask, + Value *EVLPart, const Align &Alignment) { + CallInst *Call; + if (IsScatter) { + Call = Builder.CreateIntrinsic(Type::getVoidTy(EVLPart->getContext()), + Intrinsic::vp_scatter, + {StoredVal, Addr, Mask, EVLPart}); + } else { + VectorBuilder VBuilder(Builder); + VBuilder.setEVL(EVLPart).setMask(Mask); + Call = cast(VBuilder.createVectorInstruction( + Instruction::Store, Type::getVoidTy(EVLPart->getContext()), + {StoredVal, Addr})); + } + Call->addParamAttr( + 1, Attribute::getWithAlignment(Call->getContext(), Alignment)); + return Call; +} + +/// Creates either vp_load or vp_gather intrinsics calls to represent +/// predicated load/gather. +static Instruction *lowerLoadUsingVectorIntrinsics(IRBuilderBase &Builder, + VectorType *DataTy, + Value *Addr, bool IsGather, + Value *Mask, Value *EVLPart, + const Align &Alignment) { + CallInst *Call; + if (IsGather) { + Call = Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, + {Addr, Mask, EVLPart}, nullptr, + "wide.masked.gather"); + } else { + VectorBuilder VBuilder(Builder); + VBuilder.setEVL(EVLPart).setMask(Mask); + Call = cast(VBuilder.createVectorInstruction( + Instruction::Load, DataTy, Addr, "vp.op.load")); + } + Call->addParamAttr( + 0, Attribute::getWithAlignment(Call->getContext(), Alignment)); + return Call; +} + void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { VPValue *StoredValue = isStore() ? getStoredValue() : nullptr; @@ -9345,7 +9466,21 @@ void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { for (unsigned Part = 0; Part < State.UF; ++Part) { Instruction *NewSI = nullptr; Value *StoredVal = State.get(StoredValue, Part); - if (CreateGatherScatter) { + if (State.EVL) { + assert(State.UF == 1 && + "Expected only UF==1 for predicated vectorization."); + Value *EVLPart = State.get(State.EVL, Part); + // If EVL is not nullptr, then EVL must be a valid value set during plan + // creation, possibly default value = whole vector register length. EVL + // is created only if TTI prefers predicated vectorization, thus if EVL + // is not nullptr it also implies preference for predicated + // vectorization. + // FIXME: Support reverse store after vp_reverse is added. + Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; + NewSI = lowerStoreUsingVectorIntrinsics( + Builder, State.get(getAddr(), Part), StoredVal, CreateGatherScatter, + MaskPart, EVLPart, Alignment); + } else if (CreateGatherScatter) { Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; Value *VectorGep = State.get(getAddr(), Part); NewSI = Builder.CreateMaskedScatter(StoredVal, VectorGep, Alignment, @@ -9375,7 +9510,21 @@ void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { State.setDebugLocFrom(getDebugLoc()); for (unsigned Part = 0; Part < State.UF; ++Part) { Value *NewLI; - if (CreateGatherScatter) { + if (State.EVL) { + assert(State.UF == 1 && + "Expected only UF==1 for predicated vectorization."); + Value *EVLPart = State.get(State.EVL, Part); + // If EVL is not nullptr, then EVL must be a valid value set during plan + // creation, possibly default value = whole vector register length. EVL + // is created only if TTI prefers predicated vectorization, thus if EVL + // is not nullptr it also implies preference for predicated + // vectorization. + // FIXME: Support reverse loading after vp_reverse is added. + Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; + NewLI = lowerLoadUsingVectorIntrinsics( + Builder, DataTy, State.get(getAddr(), Part), CreateGatherScatter, + MaskPart, EVLPart, Alignment); + } else if (CreateGatherScatter) { Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; Value *VectorGep = State.get(getAddr(), Part); NewLI = Builder.CreateMaskedGather(DataTy, VectorGep, Alignment, MaskPart, diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 813ebda29ffd9..434f4e220c478 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -242,6 +242,12 @@ struct VPTransformState { ElementCount VF; unsigned UF; + /// If EVL is not nullptr, then EVL must be a valid value set during plan + /// transformation, possibly a default value = whole vector register length. + /// EVL is created only if TTI prefers predicated vectorization, thus if EVL + /// is not nullptr it also implies preference for predicated vectorization. + VPValue *EVL = nullptr; + /// Hold the indices to generate specific scalar instructions. Null indicates /// that all instances are to be generated, using either scalar or vector /// instructions. @@ -1159,6 +1165,8 @@ class VPInstruction : public VPRecipeWithIRFlags { SLPLoad, SLPStore, ActiveLaneMask, + ExplicitVectorLength, + ExplicitVectorLengthIVIncrement, CalculateTripCountMinusVF, // Increment the canonical IV separately for each unrolled part. CanonicalIVIncrementForPart, @@ -2489,6 +2497,43 @@ class VPActiveLaneMaskPHIRecipe : public VPHeaderPHIRecipe { #endif }; +/// A recipe for generating the phi node for the current index of elements, +/// adjusted in accordance with EVL value. It starts at StartIV value and gets +/// incremented by EVL in each iteration of the vector loop. +class VPEVLBasedIVPHIRecipe : public VPHeaderPHIRecipe { +public: + VPEVLBasedIVPHIRecipe(VPValue *StartMask, DebugLoc DL) + : VPHeaderPHIRecipe(VPDef::VPEVLBasedIVPHISC, nullptr, StartMask, DL) {} + + ~VPEVLBasedIVPHIRecipe() override = default; + + VPEVLBasedIVPHIRecipe *clone() override { + return new VPEVLBasedIVPHIRecipe(getOperand(0), getDebugLoc()); + } + + VP_CLASSOF_IMPL(VPDef::VPEVLBasedIVPHISC) + + static inline bool classof(const VPHeaderPHIRecipe *D) { + return D->getVPDefID() == VPDef::VPEVLBasedIVPHISC; + } + + /// Generate phi for handling IV based on EVL over iterations correctly. + void execute(VPTransformState &State) override; + + /// Returns true if the recipe only uses the first lane of operand \p Op. + bool onlyFirstLaneUsed(const VPValue *Op) const override { + assert(is_contained(operands(), Op) && + "Op must be an operand of the recipe"); + return true; + } + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) + /// Print the recipe. + void print(raw_ostream &O, const Twine &Indent, + VPSlotTracker &SlotTracker) const override; +#endif +}; + /// A Recipe for widening the canonical induction variable of the vector loop. class VPWidenCanonicalIVRecipe : public VPSingleDefRecipe { public: diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp index 04e30312dc23a..72bf1d402cf97 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp @@ -216,14 +216,14 @@ Type *VPTypeAnalysis::inferScalarType(const VPValue *V) { Type *ResultTy = TypeSwitch(V->getDefiningRecipe()) .Case( - [this](const auto *R) { - // Handle header phi recipes, except VPWienIntOrFpInduction - // which needs special handling due it being possibly truncated. - // TODO: consider inferring/caching type of siblings, e.g., - // backedge value, here and in cases below. - return inferScalarType(R->getStartValue()); - }) + VPReductionPHIRecipe, VPWidenPointerInductionRecipe, + VPEVLBasedIVPHIRecipe>([this](const auto *R) { + // Handle header phi recipes, except VPWienIntOrFpInduction + // which needs special handling due it being possibly truncated. + // TODO: consider inferring/caching type of siblings, e.g., + // backedge value, here and in cases below. + return inferScalarType(R->getStartValue()); + }) .Case( [](const auto *R) { return R->getScalarType(); }) .CasegetType(), 0); return Builder.CreateSelect(Cmp, Sub, Zero); } + case VPInstruction::ExplicitVectorLength: { + // Compute EVL + auto GetSetVL = [=](VPTransformState &State, Value *EVL) { + assert(EVL->getType()->isIntegerTy() && + "Requested vector length should be an integer."); + + // TODO: Add support for MaxSafeDist for correct loop emission. + assert(State.VF.isScalable() && "Expected scalable vector factor."); + Value *VFArg = State.Builder.getInt32(State.VF.getKnownMinValue()); + + Value *GVL = State.Builder.CreateIntrinsic( + State.Builder.getInt32Ty(), Intrinsic::experimental_get_vector_length, + {EVL, VFArg, State.Builder.getTrue()}); + return GVL; + }; + // TODO: Restructure this code with an explicit remainder loop, vsetvli can + // be outside of the main loop. + assert(Part == 0 && "No unrolling expected for predicated vectorization."); + // Compute VTC - IV as the EVL(requested vector length). + Value *Index = State.get(getOperand(0), 0); + Value *TripCount = State.get(getOperand(1), VPIteration(0, 0)); + Value *EVL = State.Builder.CreateSub(TripCount, Index); + Value *SetVL = GetSetVL(State, EVL); + assert(!State.EVL && "multiple EVL recipes"); + State.EVL = this; + return SetVL; + } + // TODO: remove this once a regular Add VPInstruction is supported. + case VPInstruction::ExplicitVectorLengthIVIncrement: { + assert(Part == 0 && "Expected unroll factor 1 for VP vectorization."); + Value *Phi = State.get(getOperand(0), VPIteration(Part, 0)); + Value *EVL = State.get(getOperand(1), VPIteration(Part, 0)); + assert(EVL->getType() == Phi->getType() && + "EVL and Phi must have the same type."); + return Builder.CreateAdd(Phi, EVL, Name, hasNoUnsignedWrap(), + hasNoSignedWrap()); + } case VPInstruction::CanonicalIVIncrementForPart: { auto *IV = State.get(getOperand(0), VPIteration(0, 0)); if (Part == 0) @@ -628,6 +665,12 @@ void VPInstruction::print(raw_ostream &O, const Twine &Indent, case VPInstruction::ActiveLaneMask: O << "active lane mask"; break; + case VPInstruction::ExplicitVectorLength: + O << "EXPLICIT-VECTOR-LENGTH"; + break; + case VPInstruction::ExplicitVectorLengthIVIncrement: + O << "EXPLICIT-VECTOR-LENGTH +"; + break; case VPInstruction::FirstOrderRecurrenceSplice: O << "first-order splice"; break; @@ -1974,3 +2017,25 @@ void VPActiveLaneMaskPHIRecipe::print(raw_ostream &O, const Twine &Indent, printOperands(O, SlotTracker); } #endif + +void VPEVLBasedIVPHIRecipe::execute(VPTransformState &State) { + BasicBlock *VectorPH = State.CFG.getPreheaderBBFor(this); + assert(State.UF == 1 && "Expected unroll factor 1 for VP vectorization."); + Value *Start = State.get(getOperand(0), VPIteration(0, 0)); + PHINode *EntryPart = + State.Builder.CreatePHI(Start->getType(), 2, "evl.based.iv"); + EntryPart->addIncoming(Start, VectorPH); + EntryPart->setDebugLoc(getDebugLoc()); + State.set(this, EntryPart, 0); +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +void VPEVLBasedIVPHIRecipe::print(raw_ostream &O, const Twine &Indent, + VPSlotTracker &SlotTracker) const { + O << Indent << "EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI "; + + printAsOperand(O, SlotTracker); + O << " = phi "; + printOperands(O, SlotTracker); +} +#endif diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 7d4e54d099455..8ba8019834017 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1209,6 +1209,45 @@ static VPActiveLaneMaskPHIRecipe *addVPLaneMaskPhiAndUpdateExitBranch( return LaneMaskPhi; } +/// Replaces (ICMP_ULE, WideCanonicalIV, backedge-taken-count) pattern using +/// the given idiom \p Idiom. +static void replaceHeaderPredicateWithIdiom( + VPlan &Plan, VPValue &Idiom, + function_ref Cond = {}) { + auto *FoundWidenCanonicalIVUser = + find_if(Plan.getCanonicalIV()->users(), + [](VPUser *U) { return isa(U); }); + if (FoundWidenCanonicalIVUser == Plan.getCanonicalIV()->users().end()) + return; + auto *WideCanonicalIV = + cast(*FoundWidenCanonicalIVUser); + // Walk users of WideCanonicalIV and replace all compares of the form + // (ICMP_ULE, WideCanonicalIV, backedge-taken-count) with + // the given idiom VPValue. + VPValue *BTC = Plan.getOrCreateBackedgeTakenCount(); + for (VPUser *U : SmallVector(WideCanonicalIV->users())) { + auto *CompareToReplace = dyn_cast(U); + if (!CompareToReplace || + CompareToReplace->getOpcode() != Instruction::ICmp || + CompareToReplace->getPredicate() != CmpInst::ICMP_ULE || + CompareToReplace->getOperand(1) != BTC) + continue; + + assert(CompareToReplace->getOperand(0) == WideCanonicalIV && + "WidenCanonicalIV must be the first operand of the compare"); + if (Cond) { + CompareToReplace->replaceUsesWithIf(&Idiom, Cond); + if (!CompareToReplace->getNumUsers()) + CompareToReplace->eraseFromParent(); + } else { + CompareToReplace->replaceAllUsesWith(&Idiom); + CompareToReplace->eraseFromParent(); + } + } + if (!WideCanonicalIV->getNumUsers()) + WideCanonicalIV->eraseFromParent(); +} + void VPlanTransforms::addActiveLaneMask( VPlan &Plan, bool UseActiveLaneMaskForControlFlow, bool DataAndControlFlowWithoutRuntimeCheck) { @@ -1238,20 +1277,76 @@ void VPlanTransforms::addActiveLaneMask( // Walk users of WideCanonicalIV and replace all compares of the form // (ICMP_ULE, WideCanonicalIV, backedge-taken-count) with an // active-lane-mask. - VPValue *BTC = Plan.getOrCreateBackedgeTakenCount(); - for (VPUser *U : SmallVector(WideCanonicalIV->users())) { - auto *CompareToReplace = dyn_cast(U); - if (!CompareToReplace || - CompareToReplace->getOpcode() != Instruction::ICmp || - CompareToReplace->getPredicate() != CmpInst::ICMP_ULE || - CompareToReplace->getOperand(1) != BTC) - continue; + replaceHeaderPredicateWithIdiom(Plan, *LaneMask); +} - assert(CompareToReplace->getOperand(0) == WideCanonicalIV && - "WidenCanonicalIV must be the first operand of the compare"); - CompareToReplace->replaceAllUsesWith(LaneMask); - CompareToReplace->eraseFromParent(); +// Add a VPEVLBasedIVPHIRecipe and related recipes to \p Plan and +// replaces all uses except the canonical IV increment of VPCanonicalIVPHIRecipe +// with a VPEVLBasedIVPHIRecipe. VPCanonicalIVPHIRecipe is used only +// for loop iterations counting after this transformation. +// +// The function uses the following definitions: +// %StartV is the canonical induction start value. +// +// The function adds the following recipes: +// +// vector.ph: +// ... +// +// vector.body: +// ... +// %P = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ], [ %NextEVL, +// %vector.body ] +// %EVL = EXPLICIT-VECTOR-LENGTH %P, original TC +// ... +// %NextEVL = EXPLICIT-VECTOR-LENGTH + %P, %EVL +// ... +// +void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) { + VPBasicBlock *Header = Plan.getVectorLoopRegion()->getEntryBasicBlock(); + auto *CanonicalIVPHI = Plan.getCanonicalIV(); + VPValue *StartV = CanonicalIVPHI->getStartValue(); + + // Walk users of WideCanonicalIV and replace all compares of the form + // (ICMP_ULE, WideCanonicalIV, backedge-taken-count) with an + // all-true-mask. + Value *TrueMask = + ConstantInt::getTrue(CanonicalIVPHI->getScalarType()->getContext()); + VPValue *VPTrueMask = Plan.getVPValueOrAddLiveIn(TrueMask); + replaceHeaderPredicateWithIdiom(Plan, *VPTrueMask, [](VPUser &U, unsigned) { + return isa(U); + }); + // Now create the ExplicitVectorLengthPhi recipe in the main loop. + auto *EVLPhi = new VPEVLBasedIVPHIRecipe(StartV, DebugLoc()); + EVLPhi->insertAfter(CanonicalIVPHI); + auto *VPEVL = new VPInstruction(VPInstruction::ExplicitVectorLength, + {EVLPhi, Plan.getTripCount()}); + VPEVL->insertBefore(*Header, Header->getFirstNonPhi()); + + auto *CanonicalIVIncrement = + cast(CanonicalIVPHI->getBackedgeValue()); + VPSingleDefRecipe *OpVPEVL = VPEVL; + if (CanonicalIVPHI->getScalarType() != + IntegerType::get(CanonicalIVPHI->getScalarType()->getContext(), + /*NumBits=*/32)) { + OpVPEVL = new VPScalarCastRecipe(Instruction::ZExt, OpVPEVL, + CanonicalIVPHI->getScalarType()); + OpVPEVL->insertBefore(CanonicalIVIncrement); } + auto *NextEVLIV = new VPInstruction( + VPInstruction::ExplicitVectorLengthIVIncrement, {OpVPEVL, EVLPhi}, + {CanonicalIVIncrement->hasNoUnsignedWrap(), + CanonicalIVIncrement->hasNoSignedWrap()}, + CanonicalIVIncrement->getDebugLoc(), "index.evl.next"); + NextEVLIV->insertBefore(CanonicalIVIncrement); + EVLPhi->addOperand(NextEVLIV); + + // Replace all uses of VPCanonicalIVPHIRecipe by + // VPEVLBasedIVPHIRecipe except for VPInstruction::CanonicalIVIncrement. + CanonicalIVPHI->replaceAllUsesWith(EVLPhi); + CanonicalIVIncrement->setOperand(0, CanonicalIVPHI); + // TODO: support unroll factor > 1. + Plan.setUF(1); } void VPlanTransforms::dropPoisonGeneratingRecipes( diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h index ff83c3f083b09..0cbc70713d9c1 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h @@ -98,6 +98,13 @@ struct VPlanTransforms { /// VPlan directly. static void dropPoisonGeneratingRecipes( VPlan &Plan, function_ref BlockNeedsPredication); + + /// Add a VPEVLBasedIVPHIRecipe and related recipes to \p Plan and + /// replaces all uses except the canonical IV increment of + /// VPCanonicalIVPHIRecipe with a VPEVLBasedIVPHIRecipe. + /// VPCanonicalIVPHIRecipe is only used to control the loop after + /// this transformation. + static void addExplicitVectorLength(VPlan &Plan); }; } // namespace llvm diff --git a/llvm/lib/Transforms/Vectorize/VPlanValue.h b/llvm/lib/Transforms/Vectorize/VPlanValue.h index 1d2c17e91b7ab..8b221d30e5254 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanValue.h +++ b/llvm/lib/Transforms/Vectorize/VPlanValue.h @@ -368,6 +368,7 @@ class VPDef { // VPHeaderPHIRecipe need to be kept together. VPCanonicalIVPHISC, VPActiveLaneMaskPHISC, + VPEVLBasedIVPHISC, VPFirstOrderRecurrencePHISC, VPWidenIntOrFpInductionSC, VPWidenPointerInductionSC, diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp index 7ebdb914fb852..364e9cda4fa04 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp @@ -92,7 +92,58 @@ static bool verifyVPBasicBlock(const VPBasicBlock *VPBB, for (const VPRecipeBase &R : *VPBB) RecipeNumbering[&R] = Cnt++; + // Check if EVL recipes exist only in Entry block and only once. + DenseSet EVLFound; + const VPBlockBase *Header = nullptr; + const VPBlockBase *Exit = nullptr; + const VPlan *Plan = VPBB->getPlan(); + if (Plan && Plan->getEntry()->getNumSuccessors() == 1) { + Header = Plan->getVectorLoopRegion()->getEntry(); + Exit = Plan->getVectorLoopRegion()->getExiting(); + } + auto CheckEVLRecipiesInsts = [&](const VPRecipeBase *R) { + if (isa(R)) { + if (!Header || VPBB != Header) { + errs() << "EVL PHI recipe not in entry block!\n"; + return false; + } + if (EVLFound.contains(VPDef::VPEVLBasedIVPHISC)) { + errs() << "EVL PHI recipe inserted more than once!\n"; + return false; + } + EVLFound.insert(VPDef::VPEVLBasedIVPHISC); + return true; + } + auto *RInst = dyn_cast(R); + if (!RInst) + return true; + switch (RInst->getOpcode()) { + case VPInstruction::ExplicitVectorLength: + if (!Header || VPBB != Header) { + errs() << "EVL instruction not in entry block!\n"; + return false; + } + break; + case VPInstruction::ExplicitVectorLengthIVIncrement: + if (!Exit || VPBB != Exit) { + errs() << "EVL inc instruction not in exit block!\n"; + return false; + } + break; + default: + return true; + } + if (EVLFound.contains(RInst->getOpcode() + VPDef::VPLastPHISC)) { + errs() << "EVL instruction inserted more than once!\n"; + return false; + } + EVLFound.insert(RInst->getOpcode() + VPDef::VPLastPHISC); + return true; + }; + for (const VPRecipeBase &R : *VPBB) { + if (!CheckEVLRecipiesInsts(&R)) + return false; for (const VPValue *V : R.definedValues()) { for (const VPUser *U : V->users()) { auto *UI = dyn_cast(U); diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll index 57e1dc9051f4d..dc2298ed21dfc 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll @@ -1,11 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -mtriple riscv64-linux-gnu -mattr=+v,+d -passes=loop-vectorize < %s -S -o - | FileCheck %s -check-prefix=OUTLOOP ; RUN: opt -mtriple riscv64-linux-gnu -mattr=+v,+d -passes=loop-vectorize -prefer-inloop-reductions < %s -S -o - | FileCheck %s -check-prefix=INLOOP - +; RUN: opt -passes=loop-vectorize -force-tail-folding-style=data-with-evl -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple=riscv64 -mattr=+v -S < %s 2>&1 | FileCheck --check-prefix=IF-EVL %s target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128" target triple = "riscv64" +; FIXME: inloop reductions are not supported yet with predicated vectorization. + define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) { ; OUTLOOP-LABEL: @add_i16_i32( ; OUTLOOP-NEXT: entry: @@ -115,6 +117,70 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) { ; INLOOP-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] ; INLOOP-NEXT: ret i32 [[R_0_LCSSA]] ; +; IF-EVL-LABEL: @add_i16_i32( +; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[N:%.*]], 0 +; IF-EVL-NEXT: br i1 [[CMP6]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]] +; IF-EVL: for.body.preheader: +; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() +; IF-EVL-NEXT: [[TMP1:%.*]] = mul i32 [[TMP0]], 4 +; IF-EVL-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() +; IF-EVL-NEXT: [[TMP3:%.*]] = mul i32 [[TMP2]], 4 +; IF-EVL-NEXT: [[TMP4:%.*]] = sub i32 [[TMP3]], 1 +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i32 [[N]], [[TMP4]] +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i32 [[N]], 1 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[TRIP_COUNT_MINUS_1]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32() +; IF-EVL-NEXT: [[TMP6:%.*]] = mul i32 [[TMP5]], 4 +; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[TMP7:%.*]] = add i32 [[INDEX]], 0 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i32 [[INDEX]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; IF-EVL-NEXT: [[TMP8:%.*]] = call @llvm.experimental.stepvector.nxv4i32() +; IF-EVL-NEXT: [[TMP9:%.*]] = add zeroinitializer, [[TMP8]] +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add [[BROADCAST_SPLAT2]], [[TMP9]] +; IF-EVL-NEXT: [[TMP10:%.*]] = icmp ule [[VEC_IV]], [[BROADCAST_SPLAT]] +; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[TMP7]] +; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i16, ptr [[TMP11]], i32 0 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i16.p0(ptr [[TMP12]], i32 2, [[TMP10]], poison) +; IF-EVL-NEXT: [[TMP13:%.*]] = sext [[WIDE_MASKED_LOAD]] to +; IF-EVL-NEXT: [[TMP14]] = add [[VEC_PHI]], [[TMP13]] +; IF-EVL-NEXT: [[TMP15:%.*]] = select [[TMP10]], [[TMP14]], [[VEC_PHI]] +; IF-EVL-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], [[TMP6]] +; IF-EVL-NEXT: [[TMP16:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32( [[TMP15]]) +; IF-EVL-NEXT: br i1 true, label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] +; IF-EVL-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[FOR_BODY_PREHEADER]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] +; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: for.body: +; IF-EVL-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; IF-EVL-NEXT: [[R_07:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, ptr [[X]], i32 [[I_08]] +; IF-EVL-NEXT: [[TMP18:%.*]] = load i16, ptr [[ARRAYIDX]], align 2 +; IF-EVL-NEXT: [[CONV:%.*]] = sext i16 [[TMP18]] to i32 +; IF-EVL-NEXT: [[ADD]] = add nsw i32 [[R_07]], [[CONV]] +; IF-EVL-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 +; IF-EVL-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; IF-EVL: for.cond.cleanup.loopexit: +; IF-EVL-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP17]], [[MIDDLE_BLOCK]] ] +; IF-EVL-NEXT: br label [[FOR_COND_CLEANUP]] +; IF-EVL: for.cond.cleanup: +; IF-EVL-NEXT: [[R_0_LCSSA:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[ADD_LCSSA]], [[FOR_COND_CLEANUP_LOOPEXIT]] ] +; IF-EVL-NEXT: ret i32 [[R_0_LCSSA]] +; entry: %cmp6 = icmp sgt i32 %n, 0 br i1 %cmp6, label %for.body, label %for.cond.cleanup diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll new file mode 100644 index 0000000000000..60d61f25e9f0c --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll @@ -0,0 +1,142 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=IF-EVL %s + +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=NO-VP %s + +define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { +; IF-EVL-LABEL: @foo( +; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N:%.*]] +; IF-EVL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 4 +; IF-EVL-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; IF-EVL-NEXT: br i1 [[TMP3]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4 +; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP8]] +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 4 +; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]] +; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true) +; IF-EVL-NEXT: [[TMP13:%.*]] = add i64 [[EVL_BASED_IV]], 0 +; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP13]] +; IF-EVL-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[TMP14]], i32 0 +; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP15]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer), i32 [[TMP12]]) +; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP13]] +; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP16]], i32 0 +; IF-EVL-NEXT: [[VP_OP_LOAD1:%.*]] = call @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP17]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer), i32 [[TMP12]]) +; IF-EVL-NEXT: [[TMP18:%.*]] = add nsw [[VP_OP_LOAD1]], [[VP_OP_LOAD]] +; IF-EVL-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP13]] +; IF-EVL-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP19]], i32 0 +; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0( [[TMP18]], ptr align 4 [[TMP20]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer), i32 [[TMP12]]) +; IF-EVL-NEXT: [[TMP21:%.*]] = zext i32 [[TMP12]] to i64 +; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP21]], [[EVL_BASED_IV]] +; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP10]] +; IF-EVL-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: for.body: +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; IF-EVL-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] +; IF-EVL-NEXT: [[TMP24:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 +; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP24]], [[TMP23]] +; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 +; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; IF-EVL: for.cond.cleanup: +; IF-EVL-NEXT: ret void +; +; NO-VP-LABEL: @foo( +; NO-VP-NEXT: entry: +; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; NO-VP: vector.ph: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] +; NO-VP: vector.body: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 0 +; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP6]] +; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 0 +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP8]], align 4 +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP6]] +; NO-VP-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i32 0 +; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP10]], align 4 +; NO-VP-NEXT: [[TMP11:%.*]] = add nsw [[WIDE_LOAD1]], [[WIDE_LOAD]] +; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP6]] +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 0 +; NO-VP-NEXT: store [[TMP11]], ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; NO-VP-NEXT: br label [[FOR_BODY:%.*]] +; NO-VP: for.body: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; NO-VP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] +; NO-VP-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 +; NO-VP-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP15]] +; NO-VP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; NO-VP: for.cond.cleanup: +; NO-VP-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32, ptr %b, i64 %iv + %0 = load i32, ptr %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds i32, ptr %c, i64 %iv + %1 = load i32, ptr %arrayidx2, align 4 + %add = add nsw i32 %1, %0 + %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %iv + store i32 %add, ptr %arrayidx4, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll new file mode 100644 index 0000000000000..9f03d945acead --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll @@ -0,0 +1,134 @@ +; REQUIRES: asserts + +; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefixes=IF-EVL,CHECK %s + +; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefixes=NO-VP,CHECK %s + +define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { +; IF-EVL: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF={1}' { +; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF +; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count +; IF-EVL-NEXT: Live-in ir<%N> = original trip-count +; IF-EVL-EMPTY: +; IF-EVL: vector.ph: +; IF-EVL-NEXT: Successor(s): vector loop +; IF-EVL-EMPTY: +; IF-EVL-NEXT: vector loop: { +; IF-EVL-NEXT: vector.body: +; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION +; IF-EVL-NEXT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI vp<[[EVL_PHI:%[0-9]+]]> = phi ir<0>, vp<[[IV_NEXT:%[0-9]+]]> +; IF-EVL-NEXT: EMIT vp<[[EVL:%.+]]> = EXPLICIT-VECTOR-LENGTH vp<[[EVL_PHI]]>, ir<%N> +; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[EVL_PHI]]>, ir<1> +; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> +; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> +; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = load vp<[[PTR1]]>, ir +; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%c>, vp<[[ST]]> +; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> +; IF-EVL-NEXT: WIDEN ir<[[LD2:%.+]]> = load vp<[[PTR2]]>, ir +; IF-EVL-NEXT: WIDEN ir<[[ADD:%.+]]> = add nsw ir<[[LD2]]>, ir<[[LD1]]> +; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> +; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]> +; IF-EVL-NEXT: WIDEN store vp<[[PTR3]]>, ir<[[ADD]]>, ir +; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 +; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = EXPLICIT-VECTOR-LENGTH + vp<[[CAST]]>, vp<[[EVL_PHI]]> +; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%[0-9]+]]> = add vp<[[IV]]>, vp<[[VFUF]]> +; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> +; IF-EVL-NEXT: No successors +; IF-EVL-NEXT: } + +; NO-VP: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2,vscale x 4},UF>=1' { +; NO-VP-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF +; NO-VP-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count +; NO-VP-NEXT: Live-in ir<%N> = original trip-count +; NO-VP-EMPTY: +; NO-VP: vector.ph: +; NO-VP-NEXT: Successor(s): vector loop +; NO-VP-EMPTY: +; NO-VP-NEXT: vector loop: { +; NO-VP-NEXT: vector.body: +; NO-VP-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION +; NO-VP-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[IV]]>, ir<1> +; NO-VP-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> +; NO-VP-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> +; NO-VP-NEXT: WIDEN ir<[[LD1:%.+]]> = load vp<[[PTR1]]> +; NO-VP-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%c>, vp<[[ST]]> +; NO-VP-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> +; NO-VP-NEXT: WIDEN ir<[[LD2:%.+]]> = load vp<[[PTR2]]> +; NO-VP-NEXT: WIDEN ir<[[ADD:%.+]]> = add nsw ir<[[LD2]]>, ir<[[LD1]]> +; NO-VP-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> +; NO-VP-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]> +; NO-VP-NEXT: WIDEN store vp<[[PTR3]]>, ir<[[ADD]]> +; NO-VP-NEXT: EMIT vp<[[IV_NEXT:%[0-9]+]]> = add nuw vp<[[IV]]>, vp<[[VFUF]]> +; NO-VP-NEXT: EMIT branch-on-count vp<[[IV_NEXT]]>, vp<[[VTC]]> +; NO-VP-NEXT: No successors +; NO-VP-NEXT: } + +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32, ptr %b, i64 %iv + %0 = load i32, ptr %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds i32, ptr %c, i64 %iv + %1 = load i32, ptr %arrayidx2, align 4 + %add = add nsw i32 %1, %0 + %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %iv + store i32 %add, ptr %arrayidx4, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: + ret void +} + +define void @safe_dep(ptr %p) { +; CHECK: VPlan 'Initial VPlan for VF={vscale x 1,vscale x 2},UF>=1' { +; CHECK-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF +; CHECK-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count +; CHECK-NEXT: Live-in ir<512> = original trip-count +; CHECK-EMPTY: +; CHECK: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION +; CHECK-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[IV]]>, ir<1> +; CHECK-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr ir<%p>, vp<[[ST]]> +; CHECK-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> +; CHECK-NEXT: WIDEN ir<[[V:%.+]]> = load vp<[[PTR1]]> +; CHECK-NEXT: CLONE ir<[[OFFSET:.+]]> = add vp<[[ST]]>, ir<100> +; CHECK-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr ir<%p>, ir<[[OFFSET]]> +; CHECK-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> +; CHECK-NEXT: WIDEN store vp<[[PTR2]]>, ir<[[V]]> +; CHECK-NEXT: EMIT vp<[[IV_NEXT:%[0-9]+]]> = add nuw vp<[[IV]]>, vp<[[VFUF]]> +; CHECK-NEXT: EMIT branch-on-count vp<[[IV_NEXT]]>, vp<[[VTC]]> +; CHECK-NEXT: No successors +; CHECK-NEXT: } + +entry: + br label %loop + +loop: + %iv = phi i64 [0, %entry], [%iv.next, %loop] + %a1 = getelementptr i64, ptr %p, i64 %iv + %v = load i64, ptr %a1, align 32 + %offset = add i64 %iv, 100 + %a2 = getelementptr i64, ptr %p, i64 %offset + store i64 %v, ptr %a2, align 32 + %iv.next = add i64 %iv, 1 + %cmp = icmp ne i64 %iv, 511 + br i1 %cmp, label %loop, label %exit + +exit: + ret void +} + diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-vp-intrinsics.ll new file mode 100644 index 0000000000000..cc9711f869d5e --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-vp-intrinsics.ll @@ -0,0 +1,127 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=x86_64 -mattr=+avx512f -S < %s 2>&1 | FileCheck --check-prefix=IF-EVL %s + +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=x86_64 -mattr=+avx512f -S < %s 2>&1 | FileCheck --check-prefix=NO-VP %s + +define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { +; IF-EVL-LABEL: @foo( +; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 15 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16 +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT]], <16 x i64> poison, <16 x i32> zeroinitializer +; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <16 x i64> poison, i64 [[INDEX]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT1]], <16 x i64> poison, <16 x i32> zeroinitializer +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <16 x i64> [[BROADCAST_SPLAT2]], +; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule <16 x i64> [[VEC_IV]], [[BROADCAST_SPLAT]] +; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] +; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 0 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p0(ptr [[TMP3]], i32 4, <16 x i1> [[TMP1]], <16 x i32> poison) +; IF-EVL-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP0]] +; IF-EVL-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0 +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p0(ptr [[TMP5]], i32 4, <16 x i1> [[TMP1]], <16 x i32> poison) +; IF-EVL-NEXT: [[TMP6:%.*]] = add nsw <16 x i32> [[WIDE_MASKED_LOAD3]], [[WIDE_MASKED_LOAD]] +; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] +; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 0 +; IF-EVL-NEXT: call void @llvm.masked.store.v16i32.p0(<16 x i32> [[TMP6]], ptr [[TMP8]], i32 4, <16 x i1> [[TMP1]]) +; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 +; IF-EVL-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: for.body: +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; IF-EVL-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] +; IF-EVL-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 +; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]] +; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 +; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; IF-EVL: for.cond.cleanup: +; IF-EVL-NEXT: ret void +; +; NO-VP-LABEL: @foo( +; NO-VP-NEXT: entry: +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 16 +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; NO-VP: vector.ph: +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 16 +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] +; NO-VP: vector.body: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; NO-VP-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] +; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0 +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i32>, ptr [[TMP2]], align 4 +; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP0]] +; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0 +; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i32>, ptr [[TMP4]], align 4 +; NO-VP-NEXT: [[TMP5:%.*]] = add nsw <16 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]] +; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] +; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 0 +; NO-VP-NEXT: store <16 x i32> [[TMP5]], ptr [[TMP7]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; NO-VP-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; NO-VP-NEXT: br label [[FOR_BODY:%.*]] +; NO-VP: for.body: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; NO-VP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] +; NO-VP-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 +; NO-VP-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] +; NO-VP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; NO-VP: for.cond.cleanup: +; NO-VP-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32, ptr %b, i64 %iv + %0 = load i32, ptr %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds i32, ptr %c, i64 %iv + %1 = load i32, ptr %arrayidx2, align 4 + %add = add nsw i32 %1, %0 + %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %iv + store i32 %add, ptr %arrayidx4, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/X86/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/X86/vplan-vp-intrinsics.ll new file mode 100644 index 0000000000000..395339faaa60d --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/X86/vplan-vp-intrinsics.ll @@ -0,0 +1,89 @@ +; REQUIRES: asserts + +; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize -force-vector-width=4 \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=x86_64 -mattr=+avx512f -disable-output < %s 2>&1 | FileCheck --check-prefix=IF-EVL %s + +; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize -force-vector-width=4 \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=x86_64 -mattr=+avx512f -disable-output < %s 2>&1 | FileCheck --check-prefix=NO-VP %s + +define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { +; IF-EVL: VPlan 'Initial VPlan for VF={4},UF>=1' { +; IF-EVL-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF +; IF-EVL-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count +; IF-EVL-NEXT: Live-in vp<[[BETC:%[0-9]+]]> = backedge-taken count +; IF-EVL-NEXT: Live-in ir<%N> = original trip-count +; IF-EVL-EMPTY: +; IF-EVL: vector.ph: +; IF-EVL-NEXT: Successor(s): vector loop +; IF-EVL-EMPTY: +; IF-EVL-NEXT: vector loop: { +; IF-EVL-NEXT: vector.body: +; IF-EVL-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION +; IF-EVL-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[IV]]>, ir<1> +; IF-EVL-NEXT: EMIT vp<[[VIV:%[0-9]+]]> = WIDEN-CANONICAL-INDUCTION vp<[[IV]]> +; IF-EVL-NEXT: EMIT vp<[[MASK:%[0-9]+]]> = icmp ule vp<[[VIV]]>, vp<[[BETC]]> +; IF-EVL-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> +; IF-EVL-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> +; IF-EVL-NEXT: WIDEN ir<[[LD1:%.+]]> = load vp<[[PTR1]]>, vp<[[MASK]]> +; IF-EVL-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%c>, vp<[[ST]]> +; IF-EVL-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> +; IF-EVL-NEXT: WIDEN ir<[[LD2:%.+]]> = load vp<[[PTR2]]>, vp<[[MASK]]> +; IF-EVL-NEXT: WIDEN ir<[[ADD:%.+]]> = add nsw ir<[[LD2]]>, ir<[[LD1]]> +; IF-EVL-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> +; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]> +; IF-EVL-NEXT: WIDEN store vp<[[PTR3]]>, ir<[[ADD]]>, vp<[[MASK]]> +; IF-EVL-NEXT: EMIT vp<[[IV_NEXT:%[0-9]+]]> = add vp<[[IV]]>, vp<[[VFUF]]> +; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT]]>, vp<[[VTC]]> +; IF-EVL-NEXT: No successors +; IF-EVL-NEXT: } + +; NO-VP: VPlan 'Initial VPlan for VF={4},UF>=1' { +; NO-VP-NEXT: Live-in vp<[[VFUF:%[0-9]+]]> = VF * UF +; NO-VP-NEXT: Live-in vp<[[VTC:%[0-9]+]]> = vector-trip-count +; NO-VP-NEXT: Live-in ir<%N> = original trip-count +; NO-VP-EMPTY: +; NO-VP: vector.ph: +; NO-VP-NEXT: Successor(s): vector loop +; NO-VP-EMPTY: +; NO-VP-NEXT: vector loop: { +; NO-VP-NEXT: vector.body: +; NO-VP-NEXT: EMIT vp<[[IV:%[0-9]+]]> = CANONICAL-INDUCTION +; NO-VP-NEXT: vp<[[ST:%[0-9]+]]> = SCALAR-STEPS vp<[[IV]]>, ir<1> +; NO-VP-NEXT: CLONE ir<[[GEP1:%.+]]> = getelementptr inbounds ir<%b>, vp<[[ST]]> +; NO-VP-NEXT: vp<[[PTR1:%[0-9]+]]> = vector-pointer ir<[[GEP1]]> +; NO-VP-NEXT: WIDEN ir<[[LD1:%.+]]> = load vp<[[PTR1]]> +; NO-VP-NEXT: CLONE ir<[[GEP2:%.+]]> = getelementptr inbounds ir<%c>, vp<[[ST]]> +; NO-VP-NEXT: vp<[[PTR2:%[0-9]+]]> = vector-pointer ir<[[GEP2]]> +; NO-VP-NEXT: WIDEN ir<[[LD2:%.+]]> = load vp<[[PTR2]]> +; NO-VP-NEXT: WIDEN ir<[[ADD:%.+]]> = add nsw ir<[[LD2]]>, ir<[[LD1]]> +; NO-VP-NEXT: CLONE ir<[[GEP3:%.+]]> = getelementptr inbounds ir<%a>, vp<[[ST]]> +; NO-VP-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]> +; NO-VP-NEXT: WIDEN store vp<[[PTR3]]>, ir<[[ADD]]> +; NO-VP-NEXT: EMIT vp<[[IV_NEXT:%[0-9]+]]> = add nuw vp<[[IV]]>, vp<[[VFUF]]> +; NO-VP-NEXT: EMIT branch-on-count vp<[[IV_NEXT]]>, vp<[[VTC]]> +; NO-VP-NEXT: No successors +; NO-VP-NEXT: } + +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32, ptr %b, i64 %iv + %0 = load i32, ptr %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds i32, ptr %c, i64 %iv + %1 = load i32, ptr %arrayidx2, align 4 + %add = add nsw i32 %1, %0 + %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %iv + store i32 %add, ptr %arrayidx4, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-gather-scatter.ll new file mode 100644 index 0000000000000..98d2938523ce4 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-gather-scatter.ll @@ -0,0 +1,48 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s + +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s + +; The target does not support predicated vectorization. +define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %index, i64 %n) { +; CHECK-LABEL: @gather_scatter( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], i64 [[INDVARS_IV]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8 +; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX5]], align 4 +; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], i64 [[TMP0]] +; CHECK-NEXT: store float [[TMP1]], ptr [[ARRAYIDX7]], align 4 +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx3 = getelementptr inbounds i32, ptr %index, i64 %indvars.iv + %0 = load i64, ptr %arrayidx3, align 8 + %arrayidx5 = getelementptr inbounds float, ptr %in, i64 %0 + %1 = load float, ptr %arrayidx5, align 4 + %arrayidx7 = getelementptr inbounds float, ptr %out, i64 %0 + store float %1, ptr %arrayidx7, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond.not = icmp eq i64 %indvars.iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body + +for.end: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll new file mode 100644 index 0000000000000..9d0ae16c265da --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll @@ -0,0 +1,170 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=IF-EVL %s + +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=NO-VP %s + +; FIXME: interleaved accesses are not supported yet with predicated vectorization. +define void @interleave(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { +; IF-EVL-LABEL: @interleave( +; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 1 +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ] +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add i64 [[INDEX]], 0 +; IF-EVL-NEXT: [[VEC_IV1:%.*]] = add i64 [[INDEX]], 1 +; IF-EVL-NEXT: [[TMP0:%.*]] = icmp ule i64 [[VEC_IV]], [[TRIP_COUNT_MINUS_1]] +; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule i64 [[VEC_IV1]], [[TRIP_COUNT_MINUS_1]] +; IF-EVL-NEXT: br i1 [[TMP0]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] +; IF-EVL: pred.store.if: +; IF-EVL-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0 +; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP2]] +; IF-EVL-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 +; IF-EVL-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP2]] +; IF-EVL-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 +; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP2]] +; IF-EVL-NEXT: [[TMP8:%.*]] = add nsw i32 [[TMP6]], [[TMP4]] +; IF-EVL-NEXT: store i32 [[TMP8]], ptr [[TMP7]], align 4 +; IF-EVL-NEXT: br label [[PRED_STORE_CONTINUE]] +; IF-EVL: pred.store.continue: +; IF-EVL-NEXT: [[TMP9:%.*]] = phi i32 [ poison, [[VECTOR_BODY]] ], [ [[TMP4]], [[PRED_STORE_IF]] ] +; IF-EVL-NEXT: [[TMP10:%.*]] = phi i32 [ poison, [[VECTOR_BODY]] ], [ [[TMP6]], [[PRED_STORE_IF]] ] +; IF-EVL-NEXT: br i1 [[TMP1]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3]] +; IF-EVL: pred.store.if2: +; IF-EVL-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 1 +; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP11]] +; IF-EVL-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 +; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[TMP11]] +; IF-EVL-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4 +; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP11]] +; IF-EVL-NEXT: [[TMP17:%.*]] = add nsw i32 [[TMP15]], [[TMP13]] +; IF-EVL-NEXT: store i32 [[TMP17]], ptr [[TMP16]], align 4 +; IF-EVL-NEXT: br label [[PRED_STORE_CONTINUE3]] +; IF-EVL: pred.store.continue3: +; IF-EVL-NEXT: [[TMP18:%.*]] = phi i32 [ poison, [[PRED_STORE_CONTINUE]] ], [ [[TMP13]], [[PRED_STORE_IF2]] ] +; IF-EVL-NEXT: [[TMP19:%.*]] = phi i32 [ poison, [[PRED_STORE_CONTINUE]] ], [ [[TMP15]], [[PRED_STORE_IF2]] ] +; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 +; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: for.body: +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] +; IF-EVL-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 +; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP22]], [[TMP21]] +; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 +; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; IF-EVL: for.cond.cleanup: +; IF-EVL-NEXT: ret void +; +; NO-VP-LABEL: @interleave( +; NO-VP-NEXT: entry: +; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; NO-VP: vector.ph: +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2 +; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] +; NO-VP: vector.body: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 0 +; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], 0 +; NO-VP-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 1 +; NO-VP-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], [[TMP9]] +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP6]] +; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP10]] +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP11]], i32 0 +; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[TMP11]], i64 [[TMP14]] +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP13]], align 4 +; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP15]], align 4 +; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP6]] +; NO-VP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[TMP10]] +; NO-VP-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP16]], i32 0 +; NO-VP-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP16]], i64 [[TMP19]] +; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP18]], align 4 +; NO-VP-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP20]], align 4 +; NO-VP-NEXT: [[TMP21:%.*]] = add nsw [[WIDE_LOAD2]], [[WIDE_LOAD]] +; NO-VP-NEXT: [[TMP22:%.*]] = add nsw [[WIDE_LOAD3]], [[WIDE_LOAD1]] +; NO-VP-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP6]] +; NO-VP-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP10]] +; NO-VP-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[TMP23]], i32 0 +; NO-VP-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[TMP23]], i64 [[TMP26]] +; NO-VP-NEXT: store [[TMP21]], ptr [[TMP25]], align 4 +; NO-VP-NEXT: store [[TMP22]], ptr [[TMP27]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; NO-VP-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; NO-VP-NEXT: br label [[FOR_BODY:%.*]] +; NO-VP: for.body: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; NO-VP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] +; NO-VP-NEXT: [[TMP30:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 +; NO-VP-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP30]], [[TMP29]] +; NO-VP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; NO-VP: for.cond.cleanup: +; NO-VP-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32, ptr %b, i64 %iv + %0 = load i32, ptr %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds i32, ptr %c, i64 %iv + %1 = load i32, ptr %arrayidx2, align 4 + %add = add nsw i32 %1, %0 + %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %iv + store i32 %add, ptr %arrayidx4, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !0 + +for.cond.cleanup: + ret void +} + +!0 = distinct !{!0, !1, !2} +!1 = !{!"llvm.loop.interleave.count", i32 2} +!2 = !{!"llvm.loop.vectorize.enable", i1 true} diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-iv32.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-iv32.ll new file mode 100644 index 0000000000000..5df0229ded283 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-iv32.ll @@ -0,0 +1,85 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=IF-EVL %s + +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=NO-VP %s + +; The target does not support predicated vectorization. +define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) { +; IF-EVL-LABEL: @iv32( +; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: for.body: +; IF-EVL-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[IV]] +; IF-EVL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[IV]] +; IF-EVL-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX4]], align 4 +; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], [[N:%.*]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; IF-EVL: for.cond.cleanup: +; IF-EVL-NEXT: ret void +; +; NO-VP-LABEL: @iv32( +; NO-VP-NEXT: entry: +; NO-VP-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N:%.*]], [[TMP0]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; NO-VP: vector.ph: +; NO-VP-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32() +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], [[TMP1]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() +; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] +; NO-VP: vector.body: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0 +; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[TMP3]] +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0 +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP5]], align 4 +; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[TMP3]] +; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 0 +; NO-VP-NEXT: store [[WIDE_LOAD]], ptr [[TMP7]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP2]] +; NO-VP-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; NO-VP-NEXT: br label [[FOR_BODY:%.*]] +; NO-VP: for.body: +; NO-VP-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[IV]] +; NO-VP-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; NO-VP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[IV]] +; NO-VP-NEXT: store i32 [[TMP9]], ptr [[ARRAYIDX4]], align 4 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 +; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; NO-VP: for.cond.cleanup: +; NO-VP-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32, ptr %b, i32 %iv + %0 = load i32, ptr %arrayidx, align 4 + %arrayidx4 = getelementptr inbounds i32, ptr %a, i32 %iv + store i32 %0, ptr %arrayidx4, align 4 + %iv.next = add nuw nsw i32 %iv, 1 + %exitcond.not = icmp eq i32 %iv.next, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-masked-loadstore.ll new file mode 100644 index 0000000000000..efa45a3374681 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-masked-loadstore.ll @@ -0,0 +1,60 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s + +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s + +; The target does not support predicated vectorization. +define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) { +; CHECK-LABEL: @masked_loadstore( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[I_011]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[TMP0]], 0 +; CHECK-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] +; CHECK: if.then: +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[I_011]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP0]], [[TMP1]] +; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX3]], align 4 +; CHECK-NEXT: br label [[FOR_INC]] +; CHECK: for.inc: +; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_011]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N:%.*]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT:%.*]], label [[FOR_BODY]] +; CHECK: exit: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %i.011 = phi i64 [ %inc, %for.inc ], [ 0, %entry ] + %arrayidx = getelementptr inbounds i32, ptr %b, i64 %i.011 + %0 = load i32, ptr %arrayidx, align 4 + %cmp1 = icmp ne i32 %0, 0 + br i1 %cmp1, label %if.then, label %for.inc + +if.then: + %arrayidx3 = getelementptr inbounds i32, ptr %a, i64 %i.011 + %1 = load i32, ptr %arrayidx3, align 4 + %add = add i32 %0, %1 + store i32 %add, ptr %arrayidx3, align 4 + br label %for.inc + +for.inc: + %inc = add nuw nsw i64 %i.011, 1 + %exitcond.not = icmp eq i64 %inc, %n + br i1 %exitcond.not, label %exit, label %for.body + +exit: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-no-masking.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-no-masking.ll new file mode 100644 index 0000000000000..ed736602565e8 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-no-masking.ll @@ -0,0 +1,36 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s + +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s + +; No need to emit predicated vector code if the vector instructions with masking are not required. +define i32 @no_masking() { +; CHECK-LABEL: @no_masking( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[BODY:%.*]] +; CHECK: body: +; CHECK-NEXT: [[P:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[BODY]] ] +; CHECK-NEXT: [[INC]] = add i32 [[P]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[INC]], 0 +; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[BODY]] +; CHECK: end: +; CHECK-NEXT: ret i32 0 +; +entry: + br label %body + +body: + %p = phi i32 [ 1, %entry ], [ %inc, %body ] + %inc = add i32 %p, 1 + %cmp = icmp eq i32 %inc, 0 + br i1 %cmp, label %end, label %body + +end: + ret i32 0 +} diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-reverse-load-store.ll new file mode 100644 index 0000000000000..f455cf633ff7c --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-reverse-load-store.ll @@ -0,0 +1,49 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s + +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s + +; FIXME: reversed loads/stores are not supported yet with predicated vectorization. + +define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %ptr2) { +; CHECK-LABEL: @reverse_load_store( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL:%.*]], [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 +; CHECK-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[ADD]] +; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 +; CHECK-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2:%.*]], i64 [[ADD]] +; CHECK-NEXT: store i32 [[TMP]], ptr [[GEPS]], align 4 +; CHECK-NEXT: [[INC]] = add i32 [[I]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024 +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND:%.*]] +; CHECK: loopend: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %add.phi = phi i64 [ %startval, %entry ], [ %add, %for.body ] + %i = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %add = add i64 %add.phi, -1 + %gepl = getelementptr inbounds i32, ptr %ptr, i64 %add + %tmp = load i32, ptr %gepl, align 4 + %geps = getelementptr inbounds i32, ptr %ptr2, i64 %add + store i32 %tmp, ptr %geps, align 4 + %inc = add i32 %i, 1 + %exitcond = icmp ne i32 %inc, 1024 + br i1 %exitcond, label %for.body, label %loopend + +loopend: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics.ll new file mode 100644 index 0000000000000..a3625658fcd69 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics.ll @@ -0,0 +1,98 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=IF-EVL %s + +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=NO-VP %s + +; The target does not support predicated vectorization. +define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { +; IF-EVL-LABEL: @foo( +; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: for.body: +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[IV]] +; IF-EVL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[IV]] +; IF-EVL-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 +; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP0]] +; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IV]] +; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 +; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; IF-EVL: for.cond.cleanup: +; IF-EVL-NEXT: ret void +; +; NO-VP-LABEL: @foo( +; NO-VP-NEXT: entry: +; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP0]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; NO-VP: vector.ph: +; NO-VP-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP1]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] +; NO-VP: vector.body: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 +; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP3]] +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0 +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP5]], align 4 +; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP3]] +; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 0 +; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP7]], align 4 +; NO-VP-NEXT: [[TMP8:%.*]] = add nsw [[WIDE_LOAD1]], [[WIDE_LOAD]] +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP3]] +; NO-VP-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i32 0 +; NO-VP-NEXT: store [[TMP8]], ptr [[TMP10]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; NO-VP-NEXT: br label [[FOR_BODY:%.*]] +; NO-VP: for.body: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] +; NO-VP-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; NO-VP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] +; NO-VP-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 +; NO-VP-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP12]] +; NO-VP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; NO-VP: for.cond.cleanup: +; NO-VP-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32, ptr %b, i64 %iv + %0 = load i32, ptr %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds i32, ptr %c, i64 %iv + %1 = load i32, ptr %arrayidx2, align 4 + %add = add nsw i32 %1, %0 + %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %iv + store i32 %add, ptr %arrayidx4, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/vplan-vp-intrinsics.ll new file mode 100644 index 0000000000000..30e0f055acfc3 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/vplan-vp-intrinsics.ll @@ -0,0 +1,37 @@ +; REQUIRES: asserts + +; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on \ +; RUN: -disable-output < %s 2>&1 | FileCheck --check-prefixes=NO-VP %s + +; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on \ +; RUN: -disable-output < %s 2>&1 | FileCheck --check-prefixes=NO-VP %s + +; The target does not support predicated vectorization. +define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { +; NO-VP-NOT: EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI + +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32, ptr %b, i64 %iv + %0 = load i32, ptr %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds i32, ptr %c, i64 %iv + %1 = load i32, ptr %arrayidx2, align 4 + %add = add nsw i32 %1, %0 + %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %iv + store i32 %add, ptr %arrayidx4, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: + ret void +} + From e677862fecf7ce7f2b8bc3b15fa02fb49d02f44f Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Mon, 5 Feb 2024 07:59:30 -0800 Subject: [PATCH 02/17] Address comments --- .../llvm/Analysis/TargetTransformInfo.h | 2 +- .../Target/RISCV/RISCVTargetTransformInfo.h | 2 +- .../Transforms/Vectorize/LoopVectorize.cpp | 73 +++++------ llvm/lib/Transforms/Vectorize/VPlan.h | 1 + .../Transforms/Vectorize/VPlanAnalysis.cpp | 2 +- .../lib/Transforms/Vectorize/VPlanRecipes.cpp | 25 ++-- .../Transforms/Vectorize/VPlanTransforms.cpp | 2 +- .../Transforms/Vectorize/VPlanVerifier.cpp | 23 ++-- .../PowerPC/vectorize-vp-intrinsics.ll | 51 ++++++++ .../PowerPC/vplan-vp-intrinsics.ll | 117 ++++++++++++++++++ .../vectorize-vp-intrinsics-interleave.ll | 50 ++++---- 11 files changed, 259 insertions(+), 89 deletions(-) create mode 100644 llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-vp-intrinsics.ll create mode 100644 llvm/test/Transforms/LoopVectorize/PowerPC/vplan-vp-intrinsics.ll diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h index 110a00ef2d70f..fa9392b86c15b 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfo.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h @@ -192,7 +192,7 @@ enum class TailFoldingStyle { /// and such that the scalar epilogue loop can always be removed. DataAndControlFlowWithoutRuntimeCheck, /// Use predicated EVL instructions for tail-folding. - /// Indicates that VP intrinsics should be used if tail-folding is enabled. + /// Indicates that VP intrinsics should be used. DataWithEVL, }; diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h index 5c1d6aab04f3f..c0169ea1ad537 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.h @@ -78,7 +78,7 @@ class RISCVTTIImpl : public BasicTTIImplBase { const APInt &Imm, Type *Ty, TTI::TargetCostKind CostKind); - /// \name Vector Predication Information + /// \name EVL Support for predicated vectorization. /// Whether the target supports the %evl parameter of VP intrinsic efficiently /// in hardware, for the given opcode and type/alignment. (see LLVM Language /// Reference - "Vector Predication Intrinsics", diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index dc3f8a4f7d989..14855e9c3dd96 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -253,8 +253,7 @@ static cl::opt ForceTailFoldingStyle( "data-and-control-without-rt-check", "Similar to data-and-control, but remove the runtime check"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", - "Use predicated EVL instructions for tail folding if the " - "target supports vector length predication"))); + "Use predicated EVL instructions for tail folding"))); static cl::opt MaximizeBandwidth( "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, @@ -1523,6 +1522,7 @@ class LoopVectorizationCostModel { if (!Legal->prepareToFoldTailByMasking()) return; +<<<<<<< HEAD if (ForceTailFoldingStyle.getNumOccurrences()) { ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second = ForceTailFoldingStyle; @@ -1558,6 +1558,16 @@ class LoopVectorizationCostModel { } return; } +||||||| parent of 73e3b4948fb4 (Address comments) + if (ForceTailFoldingStyle.getNumOccurrences()) + return ForceTailFoldingStyle; +======= + if (ChosenTailFoldingStyle) + return *ChosenTailFoldingStyle; + + if (ForceTailFoldingStyle.getNumOccurrences()) + return ForceTailFoldingStyle; +>>>>>>> 73e3b4948fb4 (Address comments) ChosenTailFoldingStyle.first = TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/true); @@ -1582,18 +1592,7 @@ class LoopVectorizationCostModel { /// Returns true if VP intrinsics with explicit vector length support should /// be generated in the tail folded loop. bool useVPIWithVPEVLVectorization() const { - return PreferEVL && !EnableVPlanNativePath && - getTailFoldingStyle() == TailFoldingStyle::DataWithEVL && - // FIXME: implement support for max safe dependency distance. - Legal->isSafeForAnyVectorWidth() && - // FIXME: remove this once reductions are supported. - Legal->getReductionVars().empty() && - // FIXME: remove this once vp_reverse is supported. - none_of( - WideningDecisions, - [](const std::pair, - std::pair> - &Data) { return Data.second.first == CM_Widen_Reverse; }); + return getTailFoldingStyle() == TailFoldingStyle::DataWithEVL; } /// Returns true if the Phi is part of an inloop reduction. @@ -1743,9 +1742,8 @@ class LoopVectorizationCostModel { std::pair ChosenTailFoldingStyle = std::make_pair(TailFoldingStyle::None, TailFoldingStyle::None); - /// Control whether to generate VP intrinsics with explicit-vector-length - /// support in vectorized code. - bool PreferEVL = false; + /// Control finally chosen tail folding style. + std::optional ChosenTailFoldingStyle; /// A map holding scalar costs for different vectorization factors. The /// presence of a cost for an instruction in the mapping indicates that the @@ -5327,8 +5325,11 @@ LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, return 1; // Do not interleave if EVL is preferred and no User IC is specified. - if (useVPIWithVPEVLVectorization()) + if (useVPIWithVPEVLVectorization()) { + LLVM_DEBUG(dbgs() << "LV: Preference for VP intrinsics indicated. " + "Unroll factor forced to be 1.\n"); return 1; + } // We used the distance for the interleave count. if (!Legal->isSafeForAnyVectorWidth()) @@ -9387,17 +9388,17 @@ void VPReplicateRecipe::execute(VPTransformState &State) { static Instruction * lowerStoreUsingVectorIntrinsics(IRBuilderBase &Builder, Value *Addr, Value *StoredVal, bool IsScatter, Value *Mask, - Value *EVLPart, const Align &Alignment) { + Value *EVL, const Align &Alignment) { CallInst *Call; if (IsScatter) { - Call = Builder.CreateIntrinsic(Type::getVoidTy(EVLPart->getContext()), + Call = Builder.CreateIntrinsic(Type::getVoidTy(EVL->getContext()), Intrinsic::vp_scatter, - {StoredVal, Addr, Mask, EVLPart}); + {StoredVal, Addr, Mask, EVL}); } else { VectorBuilder VBuilder(Builder); - VBuilder.setEVL(EVLPart).setMask(Mask); + VBuilder.setEVL(EVL).setMask(Mask); Call = cast(VBuilder.createVectorInstruction( - Instruction::Store, Type::getVoidTy(EVLPart->getContext()), + Instruction::Store, Type::getVoidTy(EVL->getContext()), {StoredVal, Addr})); } Call->addParamAttr( @@ -9410,16 +9411,16 @@ lowerStoreUsingVectorIntrinsics(IRBuilderBase &Builder, Value *Addr, static Instruction *lowerLoadUsingVectorIntrinsics(IRBuilderBase &Builder, VectorType *DataTy, Value *Addr, bool IsGather, - Value *Mask, Value *EVLPart, + Value *Mask, Value *EVL, const Align &Alignment) { CallInst *Call; if (IsGather) { - Call = Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, - {Addr, Mask, EVLPart}, nullptr, - "wide.masked.gather"); + Call = + Builder.CreateIntrinsic(DataTy, Intrinsic::vp_gather, {Addr, Mask, EVL}, + nullptr, "wide.masked.gather"); } else { VectorBuilder VBuilder(Builder); - VBuilder.setEVL(EVLPart).setMask(Mask); + VBuilder.setEVL(EVL).setMask(Mask); Call = cast(VBuilder.createVectorInstruction( Instruction::Load, DataTy, Addr, "vp.op.load")); } @@ -9467,9 +9468,9 @@ void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { Instruction *NewSI = nullptr; Value *StoredVal = State.get(StoredValue, Part); if (State.EVL) { - assert(State.UF == 1 && - "Expected only UF==1 for predicated vectorization."); - Value *EVLPart = State.get(State.EVL, Part); + assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with " + "explicit vector length."); + Value *EVL = State.get(State.EVL, VPIteration(0, 0)); // If EVL is not nullptr, then EVL must be a valid value set during plan // creation, possibly default value = whole vector register length. EVL // is created only if TTI prefers predicated vectorization, thus if EVL @@ -9479,7 +9480,7 @@ void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; NewSI = lowerStoreUsingVectorIntrinsics( Builder, State.get(getAddr(), Part), StoredVal, CreateGatherScatter, - MaskPart, EVLPart, Alignment); + MaskPart, EVL, Alignment); } else if (CreateGatherScatter) { Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; Value *VectorGep = State.get(getAddr(), Part); @@ -9511,9 +9512,9 @@ void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { for (unsigned Part = 0; Part < State.UF; ++Part) { Value *NewLI; if (State.EVL) { - assert(State.UF == 1 && - "Expected only UF==1 for predicated vectorization."); - Value *EVLPart = State.get(State.EVL, Part); + assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with " + "explicit vector length."); + Value *EVL = State.get(State.EVL, VPIteration(0, 0)); // If EVL is not nullptr, then EVL must be a valid value set during plan // creation, possibly default value = whole vector register length. EVL // is created only if TTI prefers predicated vectorization, thus if EVL @@ -9523,7 +9524,7 @@ void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; NewLI = lowerLoadUsingVectorIntrinsics( Builder, DataTy, State.get(getAddr(), Part), CreateGatherScatter, - MaskPart, EVLPart, Alignment); + MaskPart, EVL, Alignment); } else if (CreateGatherScatter) { Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; Value *VectorGep = State.get(getAddr(), Part); diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 434f4e220c478..09fd3bdf795b4 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -2518,6 +2518,7 @@ class VPEVLBasedIVPHIRecipe : public VPHeaderPHIRecipe { } /// Generate phi for handling IV based on EVL over iterations correctly. + // TODO: investigate if it can share the code with VPCanonicalIVPHIRecipe. void execute(VPTransformState &State) override; /// Returns true if the recipe only uses the first lane of operand \p Op. diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp index 72bf1d402cf97..c8ae2ee5a30fe 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp @@ -218,7 +218,7 @@ Type *VPTypeAnalysis::inferScalarType(const VPValue *V) { .Case([this](const auto *R) { - // Handle header phi recipes, except VPWienIntOrFpInduction + // Handle header phi recipes, except VPWidenIntOrFpInduction // which needs special handling due it being possibly truncated. // TODO: consider inferring/caching type of siblings, e.g., // backedge value, here and in cases below. diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index a45062a61ecc5..b9f86d47706a4 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -388,40 +388,41 @@ Value *VPInstruction::generatePerPart(VPTransformState &State, unsigned Part) { } case VPInstruction::ExplicitVectorLength: { // Compute EVL - auto GetSetVL = [=](VPTransformState &State, Value *EVL) { - assert(EVL->getType()->isIntegerTy() && + auto GetSetVL = [=](VPTransformState &State, Value *AVL) { + assert(AVL->getType()->isIntegerTy() && "Requested vector length should be an integer."); // TODO: Add support for MaxSafeDist for correct loop emission. assert(State.VF.isScalable() && "Expected scalable vector factor."); Value *VFArg = State.Builder.getInt32(State.VF.getKnownMinValue()); - Value *GVL = State.Builder.CreateIntrinsic( + Value *EVL = State.Builder.CreateIntrinsic( State.Builder.getInt32Ty(), Intrinsic::experimental_get_vector_length, - {EVL, VFArg, State.Builder.getTrue()}); - return GVL; + {AVL, VFArg, State.Builder.getTrue()}); + return EVL; }; // TODO: Restructure this code with an explicit remainder loop, vsetvli can // be outside of the main loop. assert(Part == 0 && "No unrolling expected for predicated vectorization."); // Compute VTC - IV as the EVL(requested vector length). - Value *Index = State.get(getOperand(0), 0); + Value *Index = State.get(getOperand(0), VPIteration(0, 0)); Value *TripCount = State.get(getOperand(1), VPIteration(0, 0)); - Value *EVL = State.Builder.CreateSub(TripCount, Index); - Value *SetVL = GetSetVL(State, EVL); + Value *AVL = State.Builder.CreateSub(TripCount, Index); + Value *EVL = GetSetVL(State, AVL); assert(!State.EVL && "multiple EVL recipes"); State.EVL = this; - return SetVL; + return EVL; } // TODO: remove this once a regular Add VPInstruction is supported. case VPInstruction::ExplicitVectorLengthIVIncrement: { assert(Part == 0 && "Expected unroll factor 1 for VP vectorization."); - Value *Phi = State.get(getOperand(0), VPIteration(Part, 0)); - Value *EVL = State.get(getOperand(1), VPIteration(Part, 0)); + Value *Phi = State.get(getOperand(0), VPIteration(0, 0)); + Value *EVL = State.get(getOperand(1), VPIteration(0, 0)); assert(EVL->getType() == Phi->getType() && "EVL and Phi must have the same type."); return Builder.CreateAdd(Phi, EVL, Name, hasNoUnsignedWrap(), hasNoSignedWrap()); + return EVL; } case VPInstruction::CanonicalIVIncrementForPart: { auto *IV = State.get(getOperand(0), VPIteration(0, 0)); @@ -629,6 +630,8 @@ bool VPInstruction::onlyFirstLaneUsed(const VPValue *Op) const { // TODO: Cover additional opcodes. return vputils::onlyFirstLaneUsed(this); case VPInstruction::ActiveLaneMask: + case VPInstruction::ExplicitVectorLength: + case VPInstruction::ExplicitVectorLengthIVIncrement: case VPInstruction::CalculateTripCountMinusVF: case VPInstruction::CanonicalIVIncrementForPart: case VPInstruction::BranchOnCount: diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 8ba8019834017..2990aa8160886 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1342,7 +1342,7 @@ void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) { EVLPhi->addOperand(NextEVLIV); // Replace all uses of VPCanonicalIVPHIRecipe by - // VPEVLBasedIVPHIRecipe except for VPInstruction::CanonicalIVIncrement. + // VPEVLBasedIVPHIRecipe except for the canonical IV increment.. CanonicalIVPHI->replaceAllUsesWith(EVLPhi); CanonicalIVIncrement->setOperand(0, CanonicalIVPHI); // TODO: support unroll factor > 1. diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp index 364e9cda4fa04..37ea02b302102 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp @@ -92,26 +92,26 @@ static bool verifyVPBasicBlock(const VPBasicBlock *VPBB, for (const VPRecipeBase &R : *VPBB) RecipeNumbering[&R] = Cnt++; - // Check if EVL recipes exist only in Entry block and only once. + // Set of recipe types along with VPInstruction Opcodes of all EVL-related + // recipes that must appear at most once in Entry or Exiting blocks. DenseSet EVLFound; - const VPBlockBase *Header = nullptr; - const VPBlockBase *Exit = nullptr; + bool IsHeader = false; + bool IsExiting = false; const VPlan *Plan = VPBB->getPlan(); if (Plan && Plan->getEntry()->getNumSuccessors() == 1) { - Header = Plan->getVectorLoopRegion()->getEntry(); - Exit = Plan->getVectorLoopRegion()->getExiting(); + IsHeader = Plan->getVectorLoopRegion()->getEntry() == VPBB; + IsExiting = Plan->getVectorLoopRegion()->getExiting() == VPBB; } auto CheckEVLRecipiesInsts = [&](const VPRecipeBase *R) { if (isa(R)) { - if (!Header || VPBB != Header) { + if (!IsHeader) { errs() << "EVL PHI recipe not in entry block!\n"; return false; } - if (EVLFound.contains(VPDef::VPEVLBasedIVPHISC)) { + if (!EVLFound.insert(VPDef::VPEVLBasedIVPHISC).second) { errs() << "EVL PHI recipe inserted more than once!\n"; return false; } - EVLFound.insert(VPDef::VPEVLBasedIVPHISC); return true; } auto *RInst = dyn_cast(R); @@ -119,13 +119,13 @@ static bool verifyVPBasicBlock(const VPBasicBlock *VPBB, return true; switch (RInst->getOpcode()) { case VPInstruction::ExplicitVectorLength: - if (!Header || VPBB != Header) { + if (!IsHeader) { errs() << "EVL instruction not in entry block!\n"; return false; } break; case VPInstruction::ExplicitVectorLengthIVIncrement: - if (!Exit || VPBB != Exit) { + if (!IsExiting) { errs() << "EVL inc instruction not in exit block!\n"; return false; } @@ -133,11 +133,10 @@ static bool verifyVPBasicBlock(const VPBasicBlock *VPBB, default: return true; } - if (EVLFound.contains(RInst->getOpcode() + VPDef::VPLastPHISC)) { + if (!EVLFound.insert(RInst->getOpcode() + VPDef::VPLastPHISC).second) { errs() << "EVL instruction inserted more than once!\n"; return false; } - EVLFound.insert(RInst->getOpcode() + VPDef::VPLastPHISC); return true; }; diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-vp-intrinsics.ll new file mode 100644 index 0000000000000..2ce2a45a811ab --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-vp-intrinsics.ll @@ -0,0 +1,51 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=powerpc64le-unknown-linux-gnu \ +; RUN: -mcpu=pwr10 -S < %s | FileCheck %s + +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=powerpc64le-unknown-linux-gnu \ +; RUN: -mcpu=pwr10 -S < %s | FileCheck %s + +define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { +; CHECK-LABEL: @foo( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[IV]] +; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[IV]] +; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP0]] +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[IV]] +; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] +; CHECK: for.cond.cleanup: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32, ptr %b, i64 %iv + %0 = load i32, ptr %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds i32, ptr %c, i64 %iv + %1 = load i32, ptr %arrayidx2, align 4 + %add = add nsw i32 %1, %0 + %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %iv + store i32 %add, ptr %arrayidx4, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/vplan-vp-intrinsics.ll new file mode 100644 index 0000000000000..5d1a471c5d166 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/vplan-vp-intrinsics.ll @@ -0,0 +1,117 @@ +; REQUIRES: asserts + +; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=powerpc64le-unknown-linux-gnu \ +; RUN: -mcpu=pwr10 -disable-output < %s 2>&1 | FileCheck %s + +define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { +; CHECK-LABEL: VPlan 'Initial VPlan for VF={2,4},UF>=1' { +; CHECK-NEXT: Live-in vp<%0> = VF * UF +; CHECK-NEXT: Live-in vp<%1> = vector-trip-count +; CHECK-NEXT: Live-in vp<%2> = backedge-taken count +; CHECK-NEXT: Live-in ir<%N> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<%3> = CANONICAL-INDUCTION ir<0>, vp<%16> +; CHECK-NEXT: WIDEN-INDUCTION %iv = phi 0, %iv.next, ir<1> +; CHECK-NEXT: EMIT vp<%5> = icmp ule ir<%iv>, vp<%2> +; CHECK-NEXT: Successor(s): pred.store +; CHECK-EMPTY: +; CHECK-NEXT: pred.store: { +; CHECK-NEXT: pred.store.entry: +; CHECK-NEXT: BRANCH-ON-MASK vp<%5> +; CHECK-NEXT: Successor(s): pred.store.if, pred.store.continue +; CHECK-EMPTY: +; CHECK-NEXT: pred.store.if: +; CHECK-NEXT: vp<%6> = SCALAR-STEPS vp<%3>, ir<1> +; CHECK-NEXT: REPLICATE ir<%arrayidx> = getelementptr inbounds ir<%b>, vp<%6> +; CHECK-NEXT: REPLICATE ir<%0> = load ir<%arrayidx> +; CHECK-NEXT: REPLICATE ir<%arrayidx2> = getelementptr inbounds ir<%c>, vp<%6> +; CHECK-NEXT: REPLICATE ir<%1> = load ir<%arrayidx2> +; CHECK-NEXT: REPLICATE ir<%arrayidx4> = getelementptr inbounds ir<%a>, vp<%6> +; CHECK-NEXT: REPLICATE ir<%add> = add nsw ir<%1>, ir<%0> +; CHECK-NEXT: REPLICATE store ir<%add>, ir<%arrayidx4> +; CHECK-NEXT: Successor(s): pred.store.continue +; CHECK-EMPTY: +; CHECK-NEXT: pred.store.continue: +; CHECK-NEXT: PHI-PREDICATED-INSTRUCTION vp<%14> = ir<%0> +; CHECK-NEXT: PHI-PREDICATED-INSTRUCTION vp<%15> = ir<%1> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; CHECK-NEXT: Successor(s): for.body.2 +; CHECK-EMPTY: +; CHECK-NEXT: for.body.2: +; CHECK-NEXT: EMIT vp<%16> = add vp<%3>, vp<%0> +; CHECK-NEXT: EMIT branch-on-count vp<%16>, vp<%1> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32, ptr %b, i64 %iv + %0 = load i32, ptr %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds i32, ptr %c, i64 %iv + %1 = load i32, ptr %arrayidx2, align 4 + %add = add nsw i32 %1, %0 + %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %iv + store i32 %add, ptr %arrayidx4, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: + ret void +} + +define void @safe_dep(ptr %p) { +; CHECK-LABEL: VPlan 'Initial VPlan for VF={2},UF>=1' { +; CHECK-NEXT: Live-in vp<%0> = VF * UF +; CHECK-NEXT: Live-in vp<%1> = vector-trip-count +; CHECK-NEXT: Live-in ir<512> = original trip-count +; CHECK-EMPTY: +; CHECK-NEXT: vector.ph: +; CHECK-NEXT: Successor(s): vector loop +; CHECK-EMPTY: +; CHECK-NEXT: vector loop: { +; CHECK-NEXT: vector.body: +; CHECK-NEXT: EMIT vp<%2> = CANONICAL-INDUCTION ir<0>, vp<%10> +; CHECK-NEXT: vp<%3> = SCALAR-STEPS vp<%2>, ir<1> +; CHECK-NEXT: CLONE ir<%a1> = getelementptr ir<%p>, vp<%3> +; CHECK-NEXT: vp<%5> = vector-pointer ir<%a1> +; CHECK-NEXT: WIDEN ir<%v> = load vp<%5> +; CHECK-NEXT: CLONE ir<%offset> = add vp<%3>, ir<100> +; CHECK-NEXT: CLONE ir<%a2> = getelementptr ir<%p>, ir<%offset> +; CHECK-NEXT: vp<%9> = vector-pointer ir<%a2> +; CHECK-NEXT: WIDEN store vp<%9>, ir<%v> +; CHECK-NEXT: EMIT vp<%10> = add nuw vp<%2>, vp<%0> +; CHECK-NEXT: EMIT branch-on-count vp<%10>, vp<%1> +; CHECK-NEXT: No successors +; CHECK-NEXT: } +; +entry: + br label %loop + +loop: + %iv = phi i64 [0, %entry], [%iv.next, %loop] + %a1 = getelementptr i64, ptr %p, i64 %iv + %v = load i64, ptr %a1, align 32 + %offset = add i64 %iv, 100 + %a2 = getelementptr i64, ptr %p, i64 %offset + store i64 %v, ptr %a2, align 32 + %iv.next = add i64 %iv, 1 + %cmp = icmp ne i64 %iv, 511 + br i1 %cmp, label %loop, label %exit + +exit: + ret void +} + diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll index 9d0ae16c265da..5f528e8cd9ebf 100644 --- a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll @@ -21,39 +21,37 @@ define void @interleave(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) ; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: -; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ] -; IF-EVL-NEXT: [[VEC_IV:%.*]] = add i64 [[INDEX]], 0 -; IF-EVL-NEXT: [[VEC_IV1:%.*]] = add i64 [[INDEX]], 1 -; IF-EVL-NEXT: [[TMP0:%.*]] = icmp ule i64 [[VEC_IV]], [[TRIP_COUNT_MINUS_1]] -; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule i64 [[VEC_IV1]], [[TRIP_COUNT_MINUS_1]] -; IF-EVL-NEXT: br i1 [[TMP0]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] +; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ] +; IF-EVL-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; IF-EVL-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; IF-EVL-NEXT: [[TMP2:%.*]] = icmp ule i64 [[TMP0]], [[TRIP_COUNT_MINUS_1]] +; IF-EVL-NEXT: [[TMP3:%.*]] = icmp ule i64 [[TMP1]], [[TRIP_COUNT_MINUS_1]] +; IF-EVL-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] ; IF-EVL: pred.store.if: -; IF-EVL-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0 -; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP2]] -; IF-EVL-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4 -; IF-EVL-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP2]] -; IF-EVL-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4 -; IF-EVL-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP2]] -; IF-EVL-NEXT: [[TMP8:%.*]] = add nsw i32 [[TMP6]], [[TMP4]] -; IF-EVL-NEXT: store i32 [[TMP8]], ptr [[TMP7]], align 4 +; IF-EVL-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] +; IF-EVL-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 +; IF-EVL-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP0]] +; IF-EVL-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 +; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] +; IF-EVL-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP7]], [[TMP5]] +; IF-EVL-NEXT: store i32 [[TMP9]], ptr [[TMP8]], align 4 ; IF-EVL-NEXT: br label [[PRED_STORE_CONTINUE]] ; IF-EVL: pred.store.continue: -; IF-EVL-NEXT: [[TMP9:%.*]] = phi i32 [ poison, [[VECTOR_BODY]] ], [ [[TMP4]], [[PRED_STORE_IF]] ] -; IF-EVL-NEXT: [[TMP10:%.*]] = phi i32 [ poison, [[VECTOR_BODY]] ], [ [[TMP6]], [[PRED_STORE_IF]] ] -; IF-EVL-NEXT: br i1 [[TMP1]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3]] -; IF-EVL: pred.store.if2: -; IF-EVL-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 1 -; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP11]] +; IF-EVL-NEXT: [[TMP10:%.*]] = phi i32 [ poison, [[VECTOR_BODY]] ], [ [[TMP5]], [[PRED_STORE_IF]] ] +; IF-EVL-NEXT: [[TMP11:%.*]] = phi i32 [ poison, [[VECTOR_BODY]] ], [ [[TMP7]], [[PRED_STORE_IF]] ] +; IF-EVL-NEXT: br i1 [[TMP3]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]] +; IF-EVL: pred.store.if1: +; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP1]] ; IF-EVL-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 -; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[TMP11]] +; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[TMP1]] ; IF-EVL-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4 -; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP11]] +; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP1]] ; IF-EVL-NEXT: [[TMP17:%.*]] = add nsw i32 [[TMP15]], [[TMP13]] ; IF-EVL-NEXT: store i32 [[TMP17]], ptr [[TMP16]], align 4 -; IF-EVL-NEXT: br label [[PRED_STORE_CONTINUE3]] -; IF-EVL: pred.store.continue3: -; IF-EVL-NEXT: [[TMP18:%.*]] = phi i32 [ poison, [[PRED_STORE_CONTINUE]] ], [ [[TMP13]], [[PRED_STORE_IF2]] ] -; IF-EVL-NEXT: [[TMP19:%.*]] = phi i32 [ poison, [[PRED_STORE_CONTINUE]] ], [ [[TMP15]], [[PRED_STORE_IF2]] ] +; IF-EVL-NEXT: br label [[PRED_STORE_CONTINUE2]] +; IF-EVL: pred.store.continue2: +; IF-EVL-NEXT: [[TMP18:%.*]] = phi i32 [ poison, [[PRED_STORE_CONTINUE]] ], [ [[TMP13]], [[PRED_STORE_IF1]] ] +; IF-EVL-NEXT: [[TMP19:%.*]] = phi i32 [ poison, [[PRED_STORE_CONTINUE]] ], [ [[TMP15]], [[PRED_STORE_IF1]] ] ; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 ; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; IF-EVL-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] From 26b944cee4a6a1bb5e39b088b5c80db1bff9167e Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Wed, 7 Feb 2024 09:26:24 -0800 Subject: [PATCH 03/17] Adjust comments, lambda name --- llvm/lib/Transforms/Vectorize/VPlan.h | 2 +- .../lib/Transforms/Vectorize/VPlanRecipes.cpp | 4 +- .../Transforms/Vectorize/VPlanTransforms.cpp | 44 +++++++++---------- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 09fd3bdf795b4..bba955f499672 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -2518,7 +2518,7 @@ class VPEVLBasedIVPHIRecipe : public VPHeaderPHIRecipe { } /// Generate phi for handling IV based on EVL over iterations correctly. - // TODO: investigate if it can share the code with VPCanonicalIVPHIRecipe. + /// TODO: investigate if it can share the code with VPCanonicalIVPHIRecipe. void execute(VPTransformState &State) override; /// Returns true if the recipe only uses the first lane of operand \p Op. diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index b9f86d47706a4..f6e38952ebbf8 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -388,7 +388,7 @@ Value *VPInstruction::generatePerPart(VPTransformState &State, unsigned Part) { } case VPInstruction::ExplicitVectorLength: { // Compute EVL - auto GetSetVL = [=](VPTransformState &State, Value *AVL) { + auto GetEVL = [=](VPTransformState &State, Value *AVL) { assert(AVL->getType()->isIntegerTy() && "Requested vector length should be an integer."); @@ -408,7 +408,7 @@ Value *VPInstruction::generatePerPart(VPTransformState &State, unsigned Part) { Value *Index = State.get(getOperand(0), VPIteration(0, 0)); Value *TripCount = State.get(getOperand(1), VPIteration(0, 0)); Value *AVL = State.Builder.CreateSub(TripCount, Index); - Value *EVL = GetSetVL(State, AVL); + Value *EVL = GetEVL(State, AVL); assert(!State.EVL && "multiple EVL recipes"); State.EVL = this; return EVL; diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 2990aa8160886..094ee8b365eb5 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1280,28 +1280,28 @@ void VPlanTransforms::addActiveLaneMask( replaceHeaderPredicateWithIdiom(Plan, *LaneMask); } -// Add a VPEVLBasedIVPHIRecipe and related recipes to \p Plan and -// replaces all uses except the canonical IV increment of VPCanonicalIVPHIRecipe -// with a VPEVLBasedIVPHIRecipe. VPCanonicalIVPHIRecipe is used only -// for loop iterations counting after this transformation. -// -// The function uses the following definitions: -// %StartV is the canonical induction start value. -// -// The function adds the following recipes: -// -// vector.ph: -// ... -// -// vector.body: -// ... -// %P = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ], [ %NextEVL, -// %vector.body ] -// %EVL = EXPLICIT-VECTOR-LENGTH %P, original TC -// ... -// %NextEVL = EXPLICIT-VECTOR-LENGTH + %P, %EVL -// ... -// +/// Add a VPEVLBasedIVPHIRecipe and related recipes to \p Plan and +/// replaces all uses except the canonical IV increment of VPCanonicalIVPHIRecipe +/// with a VPEVLBasedIVPHIRecipe. VPCanonicalIVPHIRecipe is used only +/// for loop iterations counting after this transformation. +/// +/// The function uses the following definitions: +/// %StartV is the canonical induction start value. +/// +/// The function adds the following recipes: +/// +/// vector.ph: +/// ... +/// +/// vector.body: +/// ... +/// %P = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ], [ %NextEVL, +/// %vector.body ] +/// %EVL = EXPLICIT-VECTOR-LENGTH %P, original TC +/// ... +/// %NextEVL = EXPLICIT-VECTOR-LENGTH + %P, %EVL +/// ... +/// void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) { VPBasicBlock *Header = Plan.getVectorLoopRegion()->getEntryBasicBlock(); auto *CanonicalIVPHI = Plan.getCanonicalIV(); From ce9cc439b05fbe04429d8f61a42523a9f022b166 Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Wed, 7 Feb 2024 09:50:22 -0800 Subject: [PATCH 04/17] Fix formatting --- llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 094ee8b365eb5..c04868396f26a 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1281,9 +1281,9 @@ void VPlanTransforms::addActiveLaneMask( } /// Add a VPEVLBasedIVPHIRecipe and related recipes to \p Plan and -/// replaces all uses except the canonical IV increment of VPCanonicalIVPHIRecipe -/// with a VPEVLBasedIVPHIRecipe. VPCanonicalIVPHIRecipe is used only -/// for loop iterations counting after this transformation. +/// replaces all uses except the canonical IV increment of +/// VPCanonicalIVPHIRecipe with a VPEVLBasedIVPHIRecipe. VPCanonicalIVPHIRecipe +/// is used only for loop iterations counting after this transformation. /// /// The function uses the following definitions: /// %StartV is the canonical induction start value. @@ -1295,9 +1295,8 @@ void VPlanTransforms::addActiveLaneMask( /// /// vector.body: /// ... -/// %P = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ], [ %NextEVL, -/// %vector.body ] -/// %EVL = EXPLICIT-VECTOR-LENGTH %P, original TC +/// %P = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ], [ +/// %NextEVL, %vector.body ] %EVL = EXPLICIT-VECTOR-LENGTH %P, original TC /// ... /// %NextEVL = EXPLICIT-VECTOR-LENGTH + %P, %EVL /// ... From 0611c30edbb19b321f6a3d450b059685663f743d Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Fri, 16 Feb 2024 06:10:19 -0800 Subject: [PATCH 05/17] Rebase --- llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index c04868396f26a..3d640097f6e43 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1371,9 +1371,7 @@ void VPlanTransforms::dropPoisonGeneratingRecipes( // handled. if (isa(CurRec) || isa(CurRec) || - isa(CurRec) || - isa(CurRec) || - isa(CurRec)) + isa(CurRec) || isa(CurRec)) continue; // This recipe contributes to the address computation of a widen From 6914229908506e862dd1328f6199c57ef4025822 Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Wed, 28 Feb 2024 08:39:29 -0800 Subject: [PATCH 06/17] Rebase --- .../Transforms/Vectorize/LoopVectorize.cpp | 22 +-- llvm/lib/Transforms/Vectorize/VPlan.cpp | 16 ++- .../lib/Transforms/Vectorize/VPlanRecipes.cpp | 2 +- .../LoopVectorize/RISCV/inloop-reduction.ll | 12 +- .../RISCV/vectorize-vp-intrinsics.ll | 2 +- .../RISCV/vplan-vp-intrinsics.ll | 2 +- .../X86/vectorize-vp-intrinsics.ll | 128 +++++++++++++----- .../LoopVectorize/X86/vplan-vp-intrinsics.ll | 2 +- .../vectorize-vp-intrinsics-interleave.ll | 2 +- .../vectorize-vp-intrinsics-iv32.ll | 2 +- .../LoopVectorize/vectorize-vp-intrinsics.ll | 2 +- 11 files changed, 122 insertions(+), 70 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 14855e9c3dd96..3d6ea9e383c66 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1522,7 +1522,6 @@ class LoopVectorizationCostModel { if (!Legal->prepareToFoldTailByMasking()) return; -<<<<<<< HEAD if (ForceTailFoldingStyle.getNumOccurrences()) { ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second = ForceTailFoldingStyle; @@ -1558,16 +1557,6 @@ class LoopVectorizationCostModel { } return; } -||||||| parent of 73e3b4948fb4 (Address comments) - if (ForceTailFoldingStyle.getNumOccurrences()) - return ForceTailFoldingStyle; -======= - if (ChosenTailFoldingStyle) - return *ChosenTailFoldingStyle; - - if (ForceTailFoldingStyle.getNumOccurrences()) - return ForceTailFoldingStyle; ->>>>>>> 73e3b4948fb4 (Address comments) ChosenTailFoldingStyle.first = TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/true); @@ -1742,9 +1731,6 @@ class LoopVectorizationCostModel { std::pair ChosenTailFoldingStyle = std::make_pair(TailFoldingStyle::None, TailFoldingStyle::None); - /// Control finally chosen tail folding style. - std::optional ChosenTailFoldingStyle; - /// A map holding scalar costs for different vectorization factors. The /// presence of a cost for an instruction in the mapping indicates that the /// instruction will be scalarized when vectorizing with the associated @@ -9479,8 +9465,8 @@ void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { // FIXME: Support reverse store after vp_reverse is added. Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; NewSI = lowerStoreUsingVectorIntrinsics( - Builder, State.get(getAddr(), Part), StoredVal, CreateGatherScatter, - MaskPart, EVL, Alignment); + Builder, State.get(getAddr(), Part, !CreateGatherScatter), + StoredVal, CreateGatherScatter, MaskPart, EVL, Alignment); } else if (CreateGatherScatter) { Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; Value *VectorGep = State.get(getAddr(), Part); @@ -9523,8 +9509,8 @@ void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { // FIXME: Support reverse loading after vp_reverse is added. Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; NewLI = lowerLoadUsingVectorIntrinsics( - Builder, DataTy, State.get(getAddr(), Part), CreateGatherScatter, - MaskPart, EVL, Alignment); + Builder, DataTy, State.get(getAddr(), Part, !CreateGatherScatter), + CreateGatherScatter, MaskPart, EVL, Alignment); } else if (CreateGatherScatter) { Value *MaskPart = isMaskRequired ? BlockInMaskParts[Part] : nullptr; Value *VectorGep = State.get(getAddr(), Part); diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp index f0b7008992d7b..8ebd75da34654 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -871,13 +871,15 @@ void VPlan::execute(VPTransformState *State) { // only a single part is generated, which provides the last part from the // previous iteration. For non-ordered reductions all UF parts are // generated. - bool SinglePartNeeded = isa(PhiR) || - isa(PhiR) || - (isa(PhiR) && - cast(PhiR)->isOrdered()); - bool NeedsScalar = isa(PhiR) || - (isa(PhiR) && - cast(PhiR)->isInLoop()); + bool SinglePartNeeded = + isa(PhiR) || + isa(PhiR) || + (isa(PhiR) && + cast(PhiR)->isOrdered()); + bool NeedsScalar = + isa(PhiR) || + (isa(PhiR) && + cast(PhiR)->isInLoop()); unsigned LastPartForNewPhi = SinglePartNeeded ? 1 : State->UF; for (unsigned Part = 0; Part < LastPartForNewPhi; ++Part) { diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index f6e38952ebbf8..d5f310e8ea76f 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -2029,7 +2029,7 @@ void VPEVLBasedIVPHIRecipe::execute(VPTransformState &State) { State.Builder.CreatePHI(Start->getType(), 2, "evl.based.iv"); EntryPart->addIncoming(Start, VectorPH); EntryPart->setDebugLoc(getDebugLoc()); - State.set(this, EntryPart, 0); + State.set(this, EntryPart, 0, /*IsScalar=*/true); } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll index dc2298ed21dfc..b876e9d2c1a5c 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll @@ -133,21 +133,21 @@ define i32 @add_i16_i32(ptr nocapture readonly %x, i32 %n) { ; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP1]] ; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i32 [[N]], 1 -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[TRIP_COUNT_MINUS_1]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; IF-EVL-NEXT: [[TMP5:%.*]] = call i32 @llvm.vscale.i32() ; IF-EVL-NEXT: [[TMP6:%.*]] = mul i32 [[TMP5]], 4 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i32 [[TRIP_COUNT_MINUS_1]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[VEC_PHI:%.*]] = phi [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP7:%.*]] = add i32 [[INDEX]], 0 -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i32 [[INDEX]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i32 [[INDEX]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; IF-EVL-NEXT: [[TMP8:%.*]] = call @llvm.experimental.stepvector.nxv4i32() ; IF-EVL-NEXT: [[TMP9:%.*]] = add zeroinitializer, [[TMP8]] -; IF-EVL-NEXT: [[VEC_IV:%.*]] = add [[BROADCAST_SPLAT2]], [[TMP9]] -; IF-EVL-NEXT: [[TMP10:%.*]] = icmp ule [[VEC_IV]], [[BROADCAST_SPLAT]] +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add [[BROADCAST_SPLAT]], [[TMP9]] +; IF-EVL-NEXT: [[TMP10:%.*]] = icmp ule [[VEC_IV]], [[BROADCAST_SPLAT2]] ; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i16, ptr [[X:%.*]], i32 [[TMP7]] ; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i16, ptr [[TMP11]], i32 0 ; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i16.p0(ptr [[TMP12]], i32 2, [[TMP10]], poison) diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll index 60d61f25e9f0c..c69bb17f698aa 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll @@ -6,7 +6,7 @@ ; RUN: opt -passes=loop-vectorize \ ; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=NO-VP %s define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll index 9f03d945acead..b0417c61b1c4b 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll @@ -7,7 +7,7 @@ ; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ ; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefixes=NO-VP,CHECK %s define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-vp-intrinsics.ll index cc9711f869d5e..1cf71360adf72 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vectorize-vp-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vectorize-vp-intrinsics.ll @@ -6,7 +6,7 @@ ; RUN: opt -passes=loop-vectorize \ ; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=x86_64 -mattr=+avx512f -S < %s 2>&1 | FileCheck --check-prefix=NO-VP %s define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { @@ -18,16 +18,16 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16 ; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT]], <16 x i64> poison, <16 x i32> zeroinitializer +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <16 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT1]], <16 x i64> poison, <16 x i32> zeroinitializer ; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] ; IF-EVL: vector.body: ; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; IF-EVL-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <16 x i64> poison, i64 [[INDEX]], i64 0 -; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT1]], <16 x i64> poison, <16 x i32> zeroinitializer -; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <16 x i64> [[BROADCAST_SPLAT2]], -; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule <16 x i64> [[VEC_IV]], [[BROADCAST_SPLAT]] +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i64> poison, i64 [[INDEX]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT]], <16 x i64> poison, <16 x i32> zeroinitializer +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add <16 x i64> [[BROADCAST_SPLAT]], +; IF-EVL-NEXT: [[TMP1:%.*]] = icmp ule <16 x i64> [[VEC_IV]], [[BROADCAST_SPLAT2]] ; IF-EVL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] ; IF-EVL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 0 ; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <16 x i32> @llvm.masked.load.v16i32.p0(ptr [[TMP3]], i32 4, <16 x i1> [[TMP1]], <16 x i32> poison) @@ -62,47 +62,111 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; IF-EVL-NEXT: ret void ; ; NO-VP-LABEL: @foo( -; NO-VP-NEXT: entry: -; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 16 -; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; NO-VP-NEXT: iter.check: +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 8 +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] +; NO-VP: vector.main.loop.iter.check: +; NO-VP-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N]], 64 +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] ; NO-VP: vector.ph: -; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 16 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 64 ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; NO-VP-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] -; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0 -; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i32>, ptr [[TMP2]], align 4 -; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP0]] -; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0 -; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i32>, ptr [[TMP4]], align 4 -; NO-VP-NEXT: [[TMP5:%.*]] = add nsw <16 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]] -; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] -; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 0 -; NO-VP-NEXT: store <16 x i32> [[TMP5]], ptr [[TMP7]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; NO-VP-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; NO-VP-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 16 +; NO-VP-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 32 +; NO-VP-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 48 +; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP1]] +; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP2]] +; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP3]] +; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0 +; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 16 +; NO-VP-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 32 +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 48 +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i32>, ptr [[TMP8]], align 4 +; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i32>, ptr [[TMP9]], align 4 +; NO-VP-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i32>, ptr [[TMP10]], align 4 +; NO-VP-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i32>, ptr [[TMP11]], align 4 +; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP0]] +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[TMP1]] +; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[TMP2]] +; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[TMP3]] +; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 0 +; NO-VP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 16 +; NO-VP-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 32 +; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i32 48 +; NO-VP-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i32>, ptr [[TMP16]], align 4 +; NO-VP-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i32>, ptr [[TMP17]], align 4 +; NO-VP-NEXT: [[WIDE_LOAD7:%.*]] = load <16 x i32>, ptr [[TMP18]], align 4 +; NO-VP-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i32>, ptr [[TMP19]], align 4 +; NO-VP-NEXT: [[TMP20:%.*]] = add nsw <16 x i32> [[WIDE_LOAD5]], [[WIDE_LOAD]] +; NO-VP-NEXT: [[TMP21:%.*]] = add nsw <16 x i32> [[WIDE_LOAD6]], [[WIDE_LOAD2]] +; NO-VP-NEXT: [[TMP22:%.*]] = add nsw <16 x i32> [[WIDE_LOAD7]], [[WIDE_LOAD3]] +; NO-VP-NEXT: [[TMP23:%.*]] = add nsw <16 x i32> [[WIDE_LOAD8]], [[WIDE_LOAD4]] +; NO-VP-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] +; NO-VP-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP1]] +; NO-VP-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP2]] +; NO-VP-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP3]] +; NO-VP-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i32 0 +; NO-VP-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i32 16 +; NO-VP-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i32 32 +; NO-VP-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i32 48 +; NO-VP-NEXT: store <16 x i32> [[TMP20]], ptr [[TMP28]], align 4 +; NO-VP-NEXT: store <16 x i32> [[TMP21]], ptr [[TMP29]], align 4 +; NO-VP-NEXT: store <16 x i32> [[TMP22]], ptr [[TMP30]], align 4 +; NO-VP-NEXT: store <16 x i32> [[TMP23]], ptr [[TMP31]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64 +; NO-VP-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: middle.block: ; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] -; NO-VP: scalar.ph: -; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] +; NO-VP: vec.epilog.iter.check: +; NO-VP-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 8 +; NO-VP-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] +; NO-VP: vec.epilog.ph: +; NO-VP-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] +; NO-VP-NEXT: [[N_MOD_VF9:%.*]] = urem i64 [[N]], 8 +; NO-VP-NEXT: [[N_VEC10:%.*]] = sub i64 [[N]], [[N_MOD_VF9]] +; NO-VP-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] +; NO-VP: vec.epilog.vector.body: +; NO-VP-NEXT: [[INDEX12:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT15:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP33:%.*]] = add i64 [[INDEX12]], 0 +; NO-VP-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP33]] +; NO-VP-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[TMP34]], i32 0 +; NO-VP-NEXT: [[WIDE_LOAD13:%.*]] = load <8 x i32>, ptr [[TMP35]], align 4 +; NO-VP-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[TMP33]] +; NO-VP-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[TMP36]], i32 0 +; NO-VP-NEXT: [[WIDE_LOAD14:%.*]] = load <8 x i32>, ptr [[TMP37]], align 4 +; NO-VP-NEXT: [[TMP38:%.*]] = add nsw <8 x i32> [[WIDE_LOAD14]], [[WIDE_LOAD13]] +; NO-VP-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP33]] +; NO-VP-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[TMP39]], i32 0 +; NO-VP-NEXT: store <8 x i32> [[TMP38]], ptr [[TMP40]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT15]] = add nuw i64 [[INDEX12]], 8 +; NO-VP-NEXT: [[TMP41:%.*]] = icmp eq i64 [[INDEX_NEXT15]], [[N_VEC10]] +; NO-VP-NEXT: br i1 [[TMP41]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; NO-VP: vec.epilog.middle.block: +; NO-VP-NEXT: [[CMP_N11:%.*]] = icmp eq i64 [[N]], [[N_VEC10]] +; NO-VP-NEXT: br i1 [[CMP_N11]], label [[FOR_COND_CLEANUP]], label [[VEC_EPILOG_SCALAR_PH]] +; NO-VP: vec.epilog.scalar.ph: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC10]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK:%.*]] ] ; NO-VP-NEXT: br label [[FOR_BODY:%.*]] ; NO-VP: for.body: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] ; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; NO-VP-NEXT: [[TMP42:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; NO-VP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; NO-VP-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; NO-VP-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] +; NO-VP-NEXT: [[TMP43:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 +; NO-VP-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP43]], [[TMP42]] ; NO-VP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] ; NO-VP-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 ; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 ; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; NO-VP: for.cond.cleanup: ; NO-VP-NEXT: ret void ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/X86/vplan-vp-intrinsics.ll index 395339faaa60d..9b49d44141db3 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vplan-vp-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vplan-vp-intrinsics.ll @@ -7,7 +7,7 @@ ; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize -force-vector-width=4 \ ; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -mtriple=x86_64 -mattr=+avx512f -disable-output < %s 2>&1 | FileCheck --check-prefix=NO-VP %s define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll index 5f528e8cd9ebf..fa7c0c472f70b 100644 --- a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll +++ b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll @@ -6,7 +6,7 @@ ; RUN: opt -passes=loop-vectorize \ ; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=NO-VP %s ; FIXME: interleaved accesses are not supported yet with predicated vectorization. diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-iv32.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-iv32.ll index 5df0229ded283..511969ed972fb 100644 --- a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-iv32.ll +++ b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-iv32.ll @@ -6,7 +6,7 @@ ; RUN: opt -passes=loop-vectorize \ ; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=NO-VP %s ; The target does not support predicated vectorization. diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics.ll index a3625658fcd69..929889045dbbd 100644 --- a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics.ll @@ -6,7 +6,7 @@ ; RUN: opt -passes=loop-vectorize \ ; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ ; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=NO-VP %s ; The target does not support predicated vectorization. From 75af7ad9f824ad8c7b3c929dbed56ffc68931aa8 Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Thu, 29 Feb 2024 06:56:29 -0800 Subject: [PATCH 07/17] Removed ExplicitVectorLengthIVIncrement, replaced by Instruction::Add --- llvm/lib/Transforms/Vectorize/VPlan.h | 1 - llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp | 15 --------------- llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 10 +++++----- llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp | 14 ++------------ .../LoopVectorize/RISCV/vplan-vp-intrinsics.ll | 2 +- 5 files changed, 8 insertions(+), 34 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index bba955f499672..42627c573f3c2 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -1166,7 +1166,6 @@ class VPInstruction : public VPRecipeWithIRFlags { SLPStore, ActiveLaneMask, ExplicitVectorLength, - ExplicitVectorLengthIVIncrement, CalculateTripCountMinusVF, // Increment the canonical IV separately for each unrolled part. CanonicalIVIncrementForPart, diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index d5f310e8ea76f..9c367b3871fc2 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -413,17 +413,6 @@ Value *VPInstruction::generatePerPart(VPTransformState &State, unsigned Part) { State.EVL = this; return EVL; } - // TODO: remove this once a regular Add VPInstruction is supported. - case VPInstruction::ExplicitVectorLengthIVIncrement: { - assert(Part == 0 && "Expected unroll factor 1 for VP vectorization."); - Value *Phi = State.get(getOperand(0), VPIteration(0, 0)); - Value *EVL = State.get(getOperand(1), VPIteration(0, 0)); - assert(EVL->getType() == Phi->getType() && - "EVL and Phi must have the same type."); - return Builder.CreateAdd(Phi, EVL, Name, hasNoUnsignedWrap(), - hasNoSignedWrap()); - return EVL; - } case VPInstruction::CanonicalIVIncrementForPart: { auto *IV = State.get(getOperand(0), VPIteration(0, 0)); if (Part == 0) @@ -631,7 +620,6 @@ bool VPInstruction::onlyFirstLaneUsed(const VPValue *Op) const { return vputils::onlyFirstLaneUsed(this); case VPInstruction::ActiveLaneMask: case VPInstruction::ExplicitVectorLength: - case VPInstruction::ExplicitVectorLengthIVIncrement: case VPInstruction::CalculateTripCountMinusVF: case VPInstruction::CanonicalIVIncrementForPart: case VPInstruction::BranchOnCount: @@ -671,9 +659,6 @@ void VPInstruction::print(raw_ostream &O, const Twine &Indent, case VPInstruction::ExplicitVectorLength: O << "EXPLICIT-VECTOR-LENGTH"; break; - case VPInstruction::ExplicitVectorLengthIVIncrement: - O << "EXPLICIT-VECTOR-LENGTH +"; - break; case VPInstruction::FirstOrderRecurrenceSplice: O << "first-order splice"; break; diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 3d640097f6e43..cb0f8c687ee74 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1332,11 +1332,11 @@ void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) { CanonicalIVPHI->getScalarType()); OpVPEVL->insertBefore(CanonicalIVIncrement); } - auto *NextEVLIV = new VPInstruction( - VPInstruction::ExplicitVectorLengthIVIncrement, {OpVPEVL, EVLPhi}, - {CanonicalIVIncrement->hasNoUnsignedWrap(), - CanonicalIVIncrement->hasNoSignedWrap()}, - CanonicalIVIncrement->getDebugLoc(), "index.evl.next"); + auto *NextEVLIV = + new VPInstruction(Instruction::Add, {OpVPEVL, EVLPhi}, + {CanonicalIVIncrement->hasNoUnsignedWrap(), + CanonicalIVIncrement->hasNoSignedWrap()}, + CanonicalIVIncrement->getDebugLoc(), "index.evl.next"); NextEVLIV->insertBefore(CanonicalIVIncrement); EVLPhi->addOperand(NextEVLIV); diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp index 37ea02b302102..fc604c51df799 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp @@ -95,13 +95,9 @@ static bool verifyVPBasicBlock(const VPBasicBlock *VPBB, // Set of recipe types along with VPInstruction Opcodes of all EVL-related // recipes that must appear at most once in Entry or Exiting blocks. DenseSet EVLFound; - bool IsHeader = false; - bool IsExiting = false; const VPlan *Plan = VPBB->getPlan(); - if (Plan && Plan->getEntry()->getNumSuccessors() == 1) { - IsHeader = Plan->getVectorLoopRegion()->getEntry() == VPBB; - IsExiting = Plan->getVectorLoopRegion()->getExiting() == VPBB; - } + bool IsHeader = Plan && Plan->getEntry()->getNumSuccessors() == 1 && + Plan->getVectorLoopRegion()->getEntry() == VPBB; auto CheckEVLRecipiesInsts = [&](const VPRecipeBase *R) { if (isa(R)) { if (!IsHeader) { @@ -124,12 +120,6 @@ static bool verifyVPBasicBlock(const VPBasicBlock *VPBB, return false; } break; - case VPInstruction::ExplicitVectorLengthIVIncrement: - if (!IsExiting) { - errs() << "EVL inc instruction not in exit block!\n"; - return false; - } - break; default: return true; } diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll index b0417c61b1c4b..72b881bd44c76 100644 --- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll @@ -36,7 +36,7 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; IF-EVL-NEXT: vp<[[PTR3:%[0-9]+]]> = vector-pointer ir<[[GEP3]]> ; IF-EVL-NEXT: WIDEN store vp<[[PTR3]]>, ir<[[ADD]]>, ir ; IF-EVL-NEXT: SCALAR-CAST vp<[[CAST:%[0-9]+]]> = zext vp<[[EVL]]> to i64 -; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = EXPLICIT-VECTOR-LENGTH + vp<[[CAST]]>, vp<[[EVL_PHI]]> +; IF-EVL-NEXT: EMIT vp<[[IV_NEXT]]> = add vp<[[CAST]]>, vp<[[EVL_PHI]]> ; IF-EVL-NEXT: EMIT vp<[[IV_NEXT_EXIT:%[0-9]+]]> = add vp<[[IV]]>, vp<[[VFUF]]> ; IF-EVL-NEXT: EMIT branch-on-count vp<[[IV_NEXT_EXIT]]>, vp<[[VTC]]> ; IF-EVL-NEXT: No successors From 75687985bfa82951acece4b62cf040bc0cd17487 Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Fri, 1 Mar 2024 11:36:11 -0800 Subject: [PATCH 08/17] Address comments --- .../Transforms/Vectorize/LoopVectorize.cpp | 6 ++-- .../Transforms/Vectorize/VPlanVerifier.cpp | 29 ++++++++++--------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 3d6ea9e383c66..170536ba2b28c 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1580,7 +1580,7 @@ class LoopVectorizationCostModel { /// Returns true if VP intrinsics with explicit vector length support should /// be generated in the tail folded loop. - bool useVPIWithVPEVLVectorization() const { + bool foldTailWithEVL() const { return getTailFoldingStyle() == TailFoldingStyle::DataWithEVL; } @@ -5311,7 +5311,7 @@ LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, return 1; // Do not interleave if EVL is preferred and no User IC is specified. - if (useVPIWithVPEVLVectorization()) { + if (foldTailWithEVL()) { LLVM_DEBUG(dbgs() << "LV: Preference for VP intrinsics indicated. " "Unroll factor forced to be 1.\n"); return 1; @@ -8547,7 +8547,7 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, VPlanTransforms::truncateToMinimalBitwidths( *Plan, CM.getMinimalBitwidths(), PSE.getSE()->getContext()); VPlanTransforms::optimize(*Plan, *PSE.getSE()); - if (CM.useVPIWithVPEVLVectorization()) + if (CM.foldTailWithEVL()) VPlanTransforms::addExplicitVectorLength(*Plan); assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid"); VPlans.push_back(std::move(Plan)); diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp index fc604c51df799..365584096dae4 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp @@ -93,8 +93,9 @@ static bool verifyVPBasicBlock(const VPBasicBlock *VPBB, RecipeNumbering[&R] = Cnt++; // Set of recipe types along with VPInstruction Opcodes of all EVL-related - // recipes that must appear at most once in Entry or Exiting blocks. + // recipes that must appear at most once in the header block. DenseSet EVLFound; + const VPRecipeBase *VPWidenMemRecipe = nullptr; const VPlan *Plan = VPBB->getPlan(); bool IsHeader = Plan && Plan->getEntry()->getNumSuccessors() == 1 && Plan->getVectorLoopRegion()->getEntry() == VPBB; @@ -110,23 +111,25 @@ static bool verifyVPBasicBlock(const VPBasicBlock *VPBB, } return true; } - auto *RInst = dyn_cast(R); - if (!RInst) - return true; - switch (RInst->getOpcode()) { - case VPInstruction::ExplicitVectorLength: + if (const auto *RInst = dyn_cast(R); + RInst && RInst->getOpcode() == VPInstruction::ExplicitVectorLength) { if (!IsHeader) { - errs() << "EVL instruction not in entry block!\n"; + errs() << "EVL instruction not in the header block!\n"; + return false; + } + if (!EVLFound.insert(RInst->getOpcode() + VPDef::VPLastPHISC).second) { + errs() << "EVL instruction inserted more than once!\n"; + return false; + } + if (VPWidenMemRecipe) { + errs() << "Use of EVL instruction by widen memory recipe before " + "definition!\n"; return false; } - break; - default: return true; } - if (!EVLFound.insert(RInst->getOpcode() + VPDef::VPLastPHISC).second) { - errs() << "EVL instruction inserted more than once!\n"; - return false; - } + if (isa(R)) + VPWidenMemRecipe = R; return true; }; From e320aa2fe8ef821214d53b8f6797c5572a36c930 Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Mon, 4 Mar 2024 10:07:45 -0800 Subject: [PATCH 09/17] Address comments, rebase --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp | 6 ++++++ llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 170536ba2b28c..bf1e8ad95c87b 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -9456,6 +9456,9 @@ void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { if (State.EVL) { assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with " "explicit vector length."); + assert(cast(State.EVL)->getOpcode() == + VPInstruction::ExplicitVectorLength && + "EVL must be VPInstruction::ExplicitVectorLength."); Value *EVL = State.get(State.EVL, VPIteration(0, 0)); // If EVL is not nullptr, then EVL must be a valid value set during plan // creation, possibly default value = whole vector register length. EVL @@ -9500,6 +9503,9 @@ void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { if (State.EVL) { assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with " "explicit vector length."); + assert(cast(State.EVL)->getOpcode() == + VPInstruction::ExplicitVectorLength && + "EVL must be VPInstruction::ExplicitVectorLength."); Value *EVL = State.get(State.EVL, VPIteration(0, 0)); // If EVL is not nullptr, then EVL must be a valid value set during plan // creation, possibly default value = whole vector register length. EVL diff --git a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp index 365584096dae4..12d37fa711db9 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanVerifier.cpp @@ -97,7 +97,7 @@ static bool verifyVPBasicBlock(const VPBasicBlock *VPBB, DenseSet EVLFound; const VPRecipeBase *VPWidenMemRecipe = nullptr; const VPlan *Plan = VPBB->getPlan(); - bool IsHeader = Plan && Plan->getEntry()->getNumSuccessors() == 1 && + bool IsHeader = Plan->getEntry()->getNumSuccessors() == 1 && Plan->getVectorLoopRegion()->getEntry() == VPBB; auto CheckEVLRecipiesInsts = [&](const VPRecipeBase *R) { if (isa(R)) { From 96b3db5fa98830e1abcb4264414c37c45f82771a Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Mon, 11 Mar 2024 05:02:45 -0700 Subject: [PATCH 10/17] Rebase, address comments --- .../Transforms/Vectorize/LoopVectorize.cpp | 45 ++++++++++++------- llvm/lib/Transforms/Vectorize/VPlan.h | 2 + .../Transforms/Vectorize/VPlanTransforms.cpp | 18 ++++---- 3 files changed, 39 insertions(+), 26 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index bf1e8ad95c87b..1d5da777ac6fa 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -253,7 +253,8 @@ static cl::opt ForceTailFoldingStyle( "data-and-control-without-rt-check", "Similar to data-and-control, but remove the runtime check"), clEnumValN(TailFoldingStyle::DataWithEVL, "data-with-evl", - "Use predicated EVL instructions for tail folding"))); + "Use predicated EVL instructions for tail folding. If EVL " + "is unsupported, fallback to data-without-lane-mask."))); static cl::opt MaximizeBandwidth( "vectorizer-maximize-bandwidth", cl::init(false), cl::Hidden, @@ -1507,8 +1508,13 @@ class LoopVectorizationCostModel { /// Returns the TailFoldingStyle that is best for the current loop. TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow = true) const { - return IVUpdateMayOverflow ? ChosenTailFoldingStyle.first - : ChosenTailFoldingStyle.second; + if (!ChosenTailFoldingStyle.first) { + assert(!ChosenTailFoldingStyle.second && + "Chosen tail folding style must not be set."); + return TailFoldingStyle::None; + } + return *(IVUpdateMayOverflow ? ChosenTailFoldingStyle.first + : ChosenTailFoldingStyle.second); } /// Selects and saves TailFoldingStyle for 2 options - if IV update may @@ -1516,16 +1522,13 @@ class LoopVectorizationCostModel { /// \param IsScalableVF true if scalable vector factors enabled. /// \param UserIC User specific interleave count. void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC) { - assert(ChosenTailFoldingStyle.first == TailFoldingStyle::None && - ChosenTailFoldingStyle.second == TailFoldingStyle::None && + assert(!ChosenTailFoldingStyle.first && !ChosenTailFoldingStyle.second && "Tail folding must not be selected yet."); if (!Legal->prepareToFoldTailByMasking()) return; if (ForceTailFoldingStyle.getNumOccurrences()) { - ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second = - ForceTailFoldingStyle; - if (ChosenTailFoldingStyle.first == TailFoldingStyle::DataWithEVL) { + if (ForceTailFoldingStyle == TailFoldingStyle::DataWithEVL) { // FIXME: use actual opcode/data type for analysis here. // FIXME: Investigate opportunity for fixed vector factor. bool EVLIsLegal = @@ -1544,17 +1547,23 @@ class LoopVectorizationCostModel { return Data.second.first == CM_Widen_Reverse; }); if (!EVLIsLegal) { + // If for some reason EVL mode is unsupported, fallback to + // DataWithoutLaneMask to try to vectorize the loop with folded tail + // in a generic way. ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second = TailFoldingStyle::DataWithoutLaneMask; - LLVM_DEBUG(dbgs() - << "LV: Preference for VP intrinsics indicated. Will " - "not try to generate VP Intrinsics since " - << (UserIC > 1 - ? "interleave count specified is greater than 1.\n" - : "the target does not support vector length " - "predication.\n")); + LLVM_DEBUG( + dbgs() + << "LV: Preference for VP intrinsics indicated. Will " + "not try to generate VP Intrinsics " + << (UserIC > 1 + ? "since interleave count specified is greater than 1.\n" + : "due to non-interleaving reasons.\n")); + return; } } + ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second = + ForceTailFoldingStyle; return; } @@ -1728,8 +1737,8 @@ class LoopVectorizationCostModel { /// Control finally chosen tail folding style. The first element is used if /// the IV update may overflow, the second element - if it does not. - std::pair ChosenTailFoldingStyle = - std::make_pair(TailFoldingStyle::None, TailFoldingStyle::None); + std::pair, std::optional> + ChosenTailFoldingStyle; /// A map holding scalar costs for different vectorization factors. The /// presence of a cost for an instruction in the mapping indicates that the @@ -9453,6 +9462,7 @@ void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { for (unsigned Part = 0; Part < State.UF; ++Part) { Instruction *NewSI = nullptr; Value *StoredVal = State.get(StoredValue, Part); + // TODO: split this into several classes for better design. if (State.EVL) { assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with " "explicit vector length."); @@ -9500,6 +9510,7 @@ void VPWidenMemoryInstructionRecipe::execute(VPTransformState &State) { State.setDebugLocFrom(getDebugLoc()); for (unsigned Part = 0; Part < State.UF; ++Part) { Value *NewLI; + // TODO: split this into several classes for better design. if (State.EVL) { assert(State.UF == 1 && "Expected only UF == 1 when vectorizing with " "explicit vector length."); diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 42627c573f3c2..5167c4f2ab75a 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -246,6 +246,8 @@ struct VPTransformState { /// transformation, possibly a default value = whole vector register length. /// EVL is created only if TTI prefers predicated vectorization, thus if EVL /// is not nullptr it also implies preference for predicated vectorization. + /// TODO: this is a temporarily solution, the EVL must be explicitly used by + /// the recipes and must be removed here. VPValue *EVL = nullptr; /// Hold the indices to generate specific scalar instructions. Null indicates diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index cb0f8c687ee74..75b2cf86278bd 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1306,9 +1306,9 @@ void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) { auto *CanonicalIVPHI = Plan.getCanonicalIV(); VPValue *StartV = CanonicalIVPHI->getStartValue(); - // Walk users of WideCanonicalIV and replace all compares of the form - // (ICMP_ULE, WideCanonicalIV, backedge-taken-count) with an - // all-true-mask. + // Walk VPWidenMemoryInstructionRecipe users of WideCanonicalIV and replace + // all compares of the form (ICMP_ULE, WideCanonicalIV, backedge-taken-count), + // used as mask in VPWidenMemoryInstructionRecipe, with an all-true-mask. Value *TrueMask = ConstantInt::getTrue(CanonicalIVPHI->getScalarType()->getContext()); VPValue *VPTrueMask = Plan.getVPValueOrAddLiveIn(TrueMask); @@ -1325,11 +1325,11 @@ void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) { auto *CanonicalIVIncrement = cast(CanonicalIVPHI->getBackedgeValue()); VPSingleDefRecipe *OpVPEVL = VPEVL; - if (CanonicalIVPHI->getScalarType() != - IntegerType::get(CanonicalIVPHI->getScalarType()->getContext(), - /*NumBits=*/32)) { - OpVPEVL = new VPScalarCastRecipe(Instruction::ZExt, OpVPEVL, - CanonicalIVPHI->getScalarType()); + if (unsigned IVSize = CanonicalIVPHI->getScalarType()->getScalarSizeInBits(); + IVSize != 32) { + OpVPEVL = new VPScalarCastRecipe(IVSize < 32 ? Instruction::Trunc + : Instruction::ZExt, + OpVPEVL, CanonicalIVPHI->getScalarType()); OpVPEVL->insertBefore(CanonicalIVIncrement); } auto *NextEVLIV = @@ -1341,7 +1341,7 @@ void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) { EVLPhi->addOperand(NextEVLIV); // Replace all uses of VPCanonicalIVPHIRecipe by - // VPEVLBasedIVPHIRecipe except for the canonical IV increment.. + // VPEVLBasedIVPHIRecipe except for the canonical IV increment. CanonicalIVPHI->replaceAllUsesWith(EVLPhi); CanonicalIVIncrement->setOperand(0, CanonicalIVPHI); // TODO: support unroll factor > 1. From d000d5a55dafec8bdbed08f5d49a94b6d294ab1d Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Mon, 18 Mar 2024 13:47:21 -0700 Subject: [PATCH 11/17] Address pending comments --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp | 9 ++++++--- llvm/lib/Transforms/Vectorize/VPlan.h | 9 +++++---- llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp | 2 +- llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 9 +++++---- 4 files changed, 17 insertions(+), 12 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 1d5da777ac6fa..1667c91e5e4eb 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -4699,9 +4699,11 @@ LoopVectorizationCostModel::computeMaxVF(ElementCount UserVF, unsigned UserIC) { setTailFoldingStyles(MaxFactors.ScalableVF.isScalable(), UserIC); if (foldTailByMasking()) { if (getTailFoldingStyle() == TailFoldingStyle::DataWithEVL) { - LLVM_DEBUG(dbgs() << "LV: Preference for VP intrinsics indicated. Will " - "try to generate VP Intrinsics with scalable vector " - "factors only.\n"); + LLVM_DEBUG( + dbgs() + << "LV: tail is folded with EVL, forcing unroll factor to be 1. Will " + "try to generate VP Intrinsics with scalable vector " + "factors only.\n"); // Tail folded loop using VP intrinsics restricts the VF to be scalable // for now. // TODO: extend it for fixed vectors, if required. @@ -8556,6 +8558,7 @@ void LoopVectorizationPlanner::buildVPlansWithVPRecipes(ElementCount MinVF, VPlanTransforms::truncateToMinimalBitwidths( *Plan, CM.getMinimalBitwidths(), PSE.getSE()->getContext()); VPlanTransforms::optimize(*Plan, *PSE.getSE()); + // TODO: try to put it close to addActiveLaneMask(). if (CM.foldTailWithEVL()) VPlanTransforms::addExplicitVectorLength(*Plan); assert(verifyVPlanIsValid(*Plan) && "VPlan is invalid"); diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 5167c4f2ab75a..0b261c483f2e5 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -2499,12 +2499,13 @@ class VPActiveLaneMaskPHIRecipe : public VPHeaderPHIRecipe { }; /// A recipe for generating the phi node for the current index of elements, -/// adjusted in accordance with EVL value. It starts at StartIV value and gets -/// incremented by EVL in each iteration of the vector loop. +/// adjusted in accordance with EVL value. It starts at the start value of the +/// canonical induction and gets incremented by EVL in each iteration of the +/// vector loop. class VPEVLBasedIVPHIRecipe : public VPHeaderPHIRecipe { public: - VPEVLBasedIVPHIRecipe(VPValue *StartMask, DebugLoc DL) - : VPHeaderPHIRecipe(VPDef::VPEVLBasedIVPHISC, nullptr, StartMask, DL) {} + VPEVLBasedIVPHIRecipe(VPValue *StartIV, DebugLoc DL) + : VPHeaderPHIRecipe(VPDef::VPEVLBasedIVPHISC, nullptr, StartIV, DL) {} ~VPEVLBasedIVPHIRecipe() override = default; diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index 9c367b3871fc2..dc46c375eb324 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -404,7 +404,7 @@ Value *VPInstruction::generatePerPart(VPTransformState &State, unsigned Part) { // TODO: Restructure this code with an explicit remainder loop, vsetvli can // be outside of the main loop. assert(Part == 0 && "No unrolling expected for predicated vectorization."); - // Compute VTC - IV as the EVL(requested vector length). + // Compute VTC - IV as the AVL (requested vector length). Value *Index = State.get(getOperand(0), VPIteration(0, 0)); Value *TripCount = State.get(getOperand(1), VPIteration(0, 0)); Value *AVL = State.Builder.CreateSub(TripCount, Index); diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 75b2cf86278bd..06286b5fd0a6f 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1210,7 +1210,7 @@ static VPActiveLaneMaskPHIRecipe *addVPLaneMaskPhiAndUpdateExitBranch( } /// Replaces (ICMP_ULE, WideCanonicalIV, backedge-taken-count) pattern using -/// the given idiom \p Idiom. +/// the given \p Idiom. static void replaceHeaderPredicateWithIdiom( VPlan &Plan, VPValue &Idiom, function_ref Cond = {}) { @@ -1295,10 +1295,11 @@ void VPlanTransforms::addActiveLaneMask( /// /// vector.body: /// ... -/// %P = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ], [ -/// %NextEVL, %vector.body ] %EVL = EXPLICIT-VECTOR-LENGTH %P, original TC +/// %EVLPhi = EXPLICIT-VECTOR-LENGTH-BASED-IV-PHI [ %StartV, %vector.ph ], +/// [ %NextEVLIV, %vector.body ] +/// %VPEVL = EXPLICIT-VECTOR-LENGTH %EVLPhi, original TC /// ... -/// %NextEVL = EXPLICIT-VECTOR-LENGTH + %P, %EVL +/// %NextEVLIV = add i32 (cast to i32 %VPEVVL), %EVLPhi /// ... /// void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) { From d476fe1ebed3615c29a4834af9b8b5247bdcc270 Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Tue, 26 Mar 2024 13:18:49 -0700 Subject: [PATCH 12/17] Rebase, address comments. --- .../Transforms/Vectorize/LoopVectorize.cpp | 84 ++++++++++--------- .../lib/Transforms/Vectorize/VPlanRecipes.cpp | 1 + .../Transforms/Vectorize/VPlanTransforms.cpp | 12 +-- 3 files changed, 50 insertions(+), 47 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 1667c91e5e4eb..f42cdbfdaf7fb 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1524,53 +1524,55 @@ class LoopVectorizationCostModel { void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC) { assert(!ChosenTailFoldingStyle.first && !ChosenTailFoldingStyle.second && "Tail folding must not be selected yet."); - if (!Legal->prepareToFoldTailByMasking()) - return; - - if (ForceTailFoldingStyle.getNumOccurrences()) { - if (ForceTailFoldingStyle == TailFoldingStyle::DataWithEVL) { - // FIXME: use actual opcode/data type for analysis here. - // FIXME: Investigate opportunity for fixed vector factor. - bool EVLIsLegal = - IsScalableVF && UserIC <= 1 && - TTI.hasActiveVectorLength(0, nullptr, Align()) && - !EnableVPlanNativePath && - // FIXME: implement support for max safe dependency distance. - Legal->isSafeForAnyVectorWidth() && - // FIXME: remove this once reductions are supported. - Legal->getReductionVars().empty() && - // FIXME: remove this once vp_reverse is supported. - none_of(WideningDecisions, - [](const std::pair, - std::pair> - &Data) { - return Data.second.first == CM_Widen_Reverse; - }); - if (!EVLIsLegal) { - // If for some reason EVL mode is unsupported, fallback to - // DataWithoutLaneMask to try to vectorize the loop with folded tail - // in a generic way. - ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second = - TailFoldingStyle::DataWithoutLaneMask; - LLVM_DEBUG( - dbgs() - << "LV: Preference for VP intrinsics indicated. Will " - "not try to generate VP Intrinsics " - << (UserIC > 1 - ? "since interleave count specified is greater than 1.\n" - : "due to non-interleaving reasons.\n")); - return; - } - } + if (!Legal->prepareToFoldTailByMasking()) { ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second = - ForceTailFoldingStyle; + TailFoldingStyle::None; return; } - ChosenTailFoldingStyle.first = + if (!ForceTailFoldingStyle.getNumOccurrences()) { + ChosenTailFoldingStyle.first = TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/true); - ChosenTailFoldingStyle.second = + ChosenTailFoldingStyle.second = TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/false); + return; + } + + // Set styles when forced. + ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second = + ForceTailFoldingStyle; + if (ForceTailFoldingStyle != TailFoldingStyle::DataWithEVL) + return; + // Override forced styles if needed. + // FIXME: use actual opcode/data type for analysis here. + // FIXME: Investigate opportunity for fixed vector factor. + bool EVLIsLegal = + IsScalableVF && UserIC <= 1 && + TTI.hasActiveVectorLength(0, nullptr, Align()) && + !EnableVPlanNativePath && + // FIXME: implement support for max safe dependency distance. + Legal->isSafeForAnyVectorWidth() && + // FIXME: remove this once reductions are supported. + Legal->getReductionVars().empty() && + // FIXME: remove this once vp_reverse is supported. + none_of(WideningDecisions, + [](const std::pair, + std::pair> & + Data) { return Data.second.first == CM_Widen_Reverse; }); + if (!EVLIsLegal) { + // If for some reason EVL mode is unsupported, fallback to + // DataWithoutLaneMask to try to vectorize the loop with folded tail + // in a generic way. + ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second = + TailFoldingStyle::DataWithoutLaneMask; + LLVM_DEBUG( + dbgs() + << "LV: Preference for VP intrinsics indicated. Will " + "not try to generate VP Intrinsics " + << (UserIC > 1 + ? "since interleave count specified is greater than 1.\n" + : "due to non-interleaving reasons.\n")); + } } /// Returns true if all loop blocks should be masked to fold tail loop. diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index dc46c375eb324..f03caffcf656c 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -286,6 +286,7 @@ bool VPInstruction::canGenerateScalarForFirstLane() const { case VPInstruction::CanonicalIVIncrementForPart: case VPInstruction::ComputeReductionResult: case VPInstruction::PtrAdd: + case VPInstruction::ExplicitVectorLength: return true; default: return false; diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 06286b5fd0a6f..dfe8a667ea4d8 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1211,9 +1211,9 @@ static VPActiveLaneMaskPHIRecipe *addVPLaneMaskPhiAndUpdateExitBranch( /// Replaces (ICMP_ULE, WideCanonicalIV, backedge-taken-count) pattern using /// the given \p Idiom. -static void replaceHeaderPredicateWithIdiom( - VPlan &Plan, VPValue &Idiom, - function_ref Cond = {}) { +static void +replaceHeaderPredicateWith(VPlan &Plan, VPValue &Idiom, + function_ref Cond = {}) { auto *FoundWidenCanonicalIVUser = find_if(Plan.getCanonicalIV()->users(), [](VPUser *U) { return isa(U); }); @@ -1277,7 +1277,7 @@ void VPlanTransforms::addActiveLaneMask( // Walk users of WideCanonicalIV and replace all compares of the form // (ICMP_ULE, WideCanonicalIV, backedge-taken-count) with an // active-lane-mask. - replaceHeaderPredicateWithIdiom(Plan, *LaneMask); + replaceHeaderPredicateWith(Plan, *LaneMask); } /// Add a VPEVLBasedIVPHIRecipe and related recipes to \p Plan and @@ -1299,7 +1299,7 @@ void VPlanTransforms::addActiveLaneMask( /// [ %NextEVLIV, %vector.body ] /// %VPEVL = EXPLICIT-VECTOR-LENGTH %EVLPhi, original TC /// ... -/// %NextEVLIV = add i32 (cast to i32 %VPEVVL), %EVLPhi +/// %NextEVLIV = add IVSize (cast i32 %VPEVVL to IVSize), %EVLPhi /// ... /// void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) { @@ -1313,7 +1313,7 @@ void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) { Value *TrueMask = ConstantInt::getTrue(CanonicalIVPHI->getScalarType()->getContext()); VPValue *VPTrueMask = Plan.getVPValueOrAddLiveIn(TrueMask); - replaceHeaderPredicateWithIdiom(Plan, *VPTrueMask, [](VPUser &U, unsigned) { + replaceHeaderPredicateWith(Plan, *VPTrueMask, [](VPUser &U, unsigned) { return isa(U); }); // Now create the ExplicitVectorLengthPhi recipe in the main loop. From 7c7cc1c4983174653568405d831c4a848f9aa029 Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Tue, 26 Mar 2024 13:25:57 -0700 Subject: [PATCH 13/17] Fix formatting --- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index f42cdbfdaf7fb..bc5b8fcb3074e 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1532,9 +1532,9 @@ class LoopVectorizationCostModel { if (!ForceTailFoldingStyle.getNumOccurrences()) { ChosenTailFoldingStyle.first = - TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/true); + TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/true); ChosenTailFoldingStyle.second = - TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/false); + TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/false); return; } From 1532c859dc2c7a728fd72f04b5428ec53528a4bd Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Wed, 27 Mar 2024 13:01:36 -0700 Subject: [PATCH 14/17] Address comments --- llvm/lib/Transforms/Vectorize/VPlan.h | 11 ++++++----- llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 1 + 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 0b261c483f2e5..17adafe97f163 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -242,10 +242,11 @@ struct VPTransformState { ElementCount VF; unsigned UF; - /// If EVL is not nullptr, then EVL must be a valid value set during plan - /// transformation, possibly a default value = whole vector register length. - /// EVL is created only if TTI prefers predicated vectorization, thus if EVL - /// is not nullptr it also implies preference for predicated vectorization. + /// If EVL (Explicit Vector Length) is not nullptr, then EVL must be a valid + /// value set during plan transformation, possibly a default value = whole + /// vector register length. EVL is created only if TTI prefers predicated + /// vectorization, thus if EVL is not nullptr it also implies preference for + /// predicated vectorization. /// TODO: this is a temporarily solution, the EVL must be explicitly used by /// the recipes and must be removed here. VPValue *EVL = nullptr; @@ -2510,7 +2511,7 @@ class VPEVLBasedIVPHIRecipe : public VPHeaderPHIRecipe { ~VPEVLBasedIVPHIRecipe() override = default; VPEVLBasedIVPHIRecipe *clone() override { - return new VPEVLBasedIVPHIRecipe(getOperand(0), getDebugLoc()); + llvm_unreachable("cloning not implemented yet"); } VP_CLASSOF_IMPL(VPDef::VPEVLBasedIVPHISC) diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index dfe8a667ea4d8..0db3b90cbd186 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1307,6 +1307,7 @@ void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) { auto *CanonicalIVPHI = Plan.getCanonicalIV(); VPValue *StartV = CanonicalIVPHI->getStartValue(); +// TODO: revisit this and try to remove the mask operand. // Walk VPWidenMemoryInstructionRecipe users of WideCanonicalIV and replace // all compares of the form (ICMP_ULE, WideCanonicalIV, backedge-taken-count), // used as mask in VPWidenMemoryInstructionRecipe, with an all-true-mask. From 08809ca3f1ca5e1cf7d9cc4e8ec50db3d6a77e8d Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Fri, 29 Mar 2024 13:44:38 -0700 Subject: [PATCH 15/17] Address comments. 1. Fixed UB in areRuntimeChecksProfitable. 2. Moved most of the test to RISCV directory. 3. Fixed a check for supported EVL mode in foldTailWithEVL. Have to check later, since the decisions are detected later than setTailFoldingStyles(). 4. Fixed a crash with VPDerivedIVRecipe. 5. Rebased. --- .../Transforms/Vectorize/LoopVectorize.cpp | 31 ++-- llvm/lib/Transforms/Vectorize/VPlan.h | 7 +- .../Transforms/Vectorize/VPlanTransforms.cpp | 4 +- ...cs.ll => vectorize-force-tail-with-evl.ll} | 0 ...insics.ll => vplan-force-tail-with-evl.ll} | 0 ...rize-force-tail-with-evl-gather-scatter.ll | 116 ++++++++++++ ...ectorize-force-tail-with-evl-interleave.ll | 175 ++++++++++++++++++ .../vectorize-force-tail-with-evl-iv32.ll | 124 +++++++++++++ ...ze-force-tail-with-evl-masked-loadstore.ll | 132 +++++++++++++ ...ctorize-force-tail-with-evl-no-masking.ll} | 4 +- ...-force-tail-with-evl-reverse-load-store.ll | 119 ++++++++++++ ...cs.ll => vectorize-force-tail-with-evl.ll} | 0 ...cs.ll => vectorize-force-tail-with-evl.ll} | 0 .../vectorize-vp-intrinsics-gather-scatter.ll | 48 ----- .../vectorize-vp-intrinsics-interleave.ll | 168 ----------------- .../vectorize-vp-intrinsics-iv32.ll | 85 --------- ...ectorize-vp-intrinsics-masked-loadstore.ll | 60 ------ ...torize-vp-intrinsics-reverse-load-store.ll | 49 ----- ...insics.ll => vplan-force-tail-with-evl.ll} | 0 19 files changed, 690 insertions(+), 432 deletions(-) rename llvm/test/Transforms/LoopVectorize/PowerPC/{vectorize-vp-intrinsics.ll => vectorize-force-tail-with-evl.ll} (100%) rename llvm/test/Transforms/LoopVectorize/PowerPC/{vplan-vp-intrinsics.ll => vplan-force-tail-with-evl.ll} (100%) create mode 100644 llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-gather-scatter.ll create mode 100644 llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll create mode 100644 llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-iv32.ll create mode 100644 llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-masked-loadstore.ll rename llvm/test/Transforms/LoopVectorize/{vectorize-vp-intrinsics-no-masking.ll => RISCV/vectorize-force-tail-with-evl-no-masking.ll} (84%) create mode 100644 llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll rename llvm/test/Transforms/LoopVectorize/X86/{vectorize-vp-intrinsics.ll => vectorize-force-tail-with-evl.ll} (100%) rename llvm/test/Transforms/LoopVectorize/{vectorize-vp-intrinsics.ll => vectorize-force-tail-with-evl.ll} (100%) delete mode 100644 llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-gather-scatter.ll delete mode 100644 llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll delete mode 100644 llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-iv32.ll delete mode 100644 llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-masked-loadstore.ll delete mode 100644 llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-reverse-load-store.ll rename llvm/test/Transforms/LoopVectorize/{vplan-vp-intrinsics.ll => vplan-force-tail-with-evl.ll} (100%) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index bc5b8fcb3074e..2cd1dc9e3f7da 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1553,12 +1553,7 @@ class LoopVectorizationCostModel { // FIXME: implement support for max safe dependency distance. Legal->isSafeForAnyVectorWidth() && // FIXME: remove this once reductions are supported. - Legal->getReductionVars().empty() && - // FIXME: remove this once vp_reverse is supported. - none_of(WideningDecisions, - [](const std::pair, - std::pair> & - Data) { return Data.second.first == CM_Widen_Reverse; }); + Legal->getReductionVars().empty(); if (!EVLIsLegal) { // If for some reason EVL mode is unsupported, fallback to // DataWithoutLaneMask to try to vectorize the loop with folded tail @@ -1592,7 +1587,13 @@ class LoopVectorizationCostModel { /// Returns true if VP intrinsics with explicit vector length support should /// be generated in the tail folded loop. bool foldTailWithEVL() const { - return getTailFoldingStyle() == TailFoldingStyle::DataWithEVL; + return getTailFoldingStyle() == TailFoldingStyle::DataWithEVL && + // FIXME: remove this once vp_reverse is supported. + none_of( + WideningDecisions, + [](const std::pair, + std::pair> + &Data) { return Data.second.first == CM_Widen_Reverse; }); } /// Returns true if the Phi is part of an inloop reduction. @@ -9255,7 +9256,7 @@ void VPDerivedIVRecipe::execute(VPTransformState &State) { State.Builder.setFastMathFlags(FPBinOp->getFastMathFlags()); Value *Step = State.get(getStepValue(), VPIteration(0, 0)); - Value *CanonicalIV = State.get(getCanonicalIV(), VPIteration(0, 0)); + Value *CanonicalIV = State.get(getOperand(1), VPIteration(0, 0)); Value *DerivedIV = emitTransformedIndex( State.Builder, CanonicalIV, getStartValue()->getLiveInIRValue(), Step, Kind, cast_if_present(FPBinOp)); @@ -9742,7 +9743,7 @@ static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks, } // The scalar cost should only be 0 when vectorizing with a user specified VF/IC. In those cases, runtime checks should always be generated. - double ScalarC = *VF.ScalarCost.getValue(); + uint64_t ScalarC = *VF.ScalarCost.getValue(); if (ScalarC == 0) return true; @@ -9769,7 +9770,7 @@ static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks, // RtC + VecC * (TC / VF) + EpiC < ScalarC * TC // // Now we can compute the minimum required trip count TC as - // (RtC + EpiC) / (ScalarC - (VecC / VF)) < TC + // VF * (RtC + EpiC) / (ScalarC * VF - VecC) < TC // // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that // the computations are performed on doubles, not integers and the result @@ -9781,9 +9782,9 @@ static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks, AssumedMinimumVscale = *VScale; IntVF *= AssumedMinimumVscale; } - double VecCOverVF = double(*VF.Cost.getValue()) / IntVF; - double RtC = *CheckCost.getValue(); - double MinTC1 = RtC / (ScalarC - VecCOverVF); + uint64_t RtC = *CheckCost.getValue(); + uint64_t Div = ScalarC * IntVF - *VF.Cost.getValue(); + uint64_t MinTC1 = Div == 0 ? 0 : divideCeil(RtC * IntVF, Div); // Second, compute a minimum iteration count so that the cost of the // runtime checks is only a fraction of the total scalar loop cost. This @@ -9792,12 +9793,12 @@ static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks, // * TC. To bound the runtime check to be a fraction 1/X of the scalar // cost, compute // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC - double MinTC2 = RtC * 10 / ScalarC; + uint64_t MinTC2 = divideCeil(RtC * 10, ScalarC); // Now pick the larger minimum. If it is not a multiple of VF and a scalar // epilogue is allowed, choose the next closest multiple of VF. This should // partly compensate for ignoring the epilogue cost. - uint64_t MinTC = std::ceil(std::max(MinTC1, MinTC2)); + uint64_t MinTC = std::max(MinTC1, MinTC2); if (SEL == CM_ScalarEpilogueAllowed) MinTC = alignTo(MinTC, IntVF); VF.MinProfitableTripCount = ElementCount::getFixed(MinTC); diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 17adafe97f163..1ef759dea3c0b 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -2581,6 +2581,10 @@ class VPDerivedIVRecipe : public VPSingleDefRecipe { /// for floating point inductions. const FPMathOperator *FPBinOp; + VPCanonicalIVPHIRecipe *getCanonicalIV() const { + return cast(getOperand(1)); + } + public: VPDerivedIVRecipe(const InductionDescriptor &IndDesc, VPValue *Start, VPCanonicalIVPHIRecipe *CanonicalIV, VPValue *Step) @@ -2619,9 +2623,6 @@ class VPDerivedIVRecipe : public VPSingleDefRecipe { } VPValue *getStartValue() const { return getOperand(0); } - VPCanonicalIVPHIRecipe *getCanonicalIV() const { - return cast(getOperand(1)); - } VPValue *getStepValue() const { return getOperand(2); } /// Returns true if the recipe only uses the first lane of operand \p Op. diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 0db3b90cbd186..1256e4d8fda50 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1307,13 +1307,13 @@ void VPlanTransforms::addExplicitVectorLength(VPlan &Plan) { auto *CanonicalIVPHI = Plan.getCanonicalIV(); VPValue *StartV = CanonicalIVPHI->getStartValue(); -// TODO: revisit this and try to remove the mask operand. + // TODO: revisit this and try to remove the mask operand. // Walk VPWidenMemoryInstructionRecipe users of WideCanonicalIV and replace // all compares of the form (ICMP_ULE, WideCanonicalIV, backedge-taken-count), // used as mask in VPWidenMemoryInstructionRecipe, with an all-true-mask. Value *TrueMask = ConstantInt::getTrue(CanonicalIVPHI->getScalarType()->getContext()); - VPValue *VPTrueMask = Plan.getVPValueOrAddLiveIn(TrueMask); + VPValue *VPTrueMask = Plan.getOrAddLiveIn(TrueMask); replaceHeaderPredicateWith(Plan, *VPTrueMask, [](VPUser &U, unsigned) { return isa(U); }); diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-force-tail-with-evl.ll similarity index 100% rename from llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-vp-intrinsics.ll rename to llvm/test/Transforms/LoopVectorize/PowerPC/vectorize-force-tail-with-evl.ll diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/vplan-force-tail-with-evl.ll similarity index 100% rename from llvm/test/Transforms/LoopVectorize/PowerPC/vplan-vp-intrinsics.ll rename to llvm/test/Transforms/LoopVectorize/PowerPC/vplan-force-tail-with-evl.ll diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-gather-scatter.ll new file mode 100644 index 0000000000000..835ff37568817 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-gather-scatter.ll @@ -0,0 +1,116 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=IF-EVL + +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=NO-VP + +define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %index, i64 %n) { +; IF-EVL-LABEL: @gather_scatter( +; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N:%.*]] +; IF-EVL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 2 +; IF-EVL-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; IF-EVL-NEXT: br i1 [[TMP3]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2 +; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 2 +; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP8]] +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 2 +; IF-EVL-NEXT: [[TMP11:%.*]] = call @llvm.experimental.stepvector.nxv2i64() +; IF-EVL-NEXT: [[TMP12:%.*]] = add [[TMP11]], zeroinitializer +; IF-EVL-NEXT: [[TMP13:%.*]] = mul [[TMP12]], shufflevector ( insertelement ( poison, i64 1, i64 0), poison, zeroinitializer) +; IF-EVL-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP13]] +; IF-EVL-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 2 +; IF-EVL-NEXT: [[TMP16:%.*]] = mul i64 1, [[TMP15]] +; IF-EVL-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[TMP16]], i64 0 +; IF-EVL-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[TMP17:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]] +; IF-EVL-NEXT: [[TMP18:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP17]], i32 2, i1 true) +; IF-EVL-NEXT: [[TMP19:%.*]] = icmp ule [[VEC_IND]], [[BROADCAST_SPLAT]] +; IF-EVL-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], [[VEC_IND]] +; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.vp.gather.nxv2i64.nxv2p0( align 8 [[TMP20]], [[TMP19]], i32 [[TMP18]]) +; IF-EVL-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], [[WIDE_MASKED_GATHER]] +; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call @llvm.vp.gather.nxv2f32.nxv2p0( align 4 [[TMP21]], [[TMP19]], i32 [[TMP18]]) +; IF-EVL-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], [[WIDE_MASKED_GATHER]] +; IF-EVL-NEXT: call void @llvm.vp.scatter.nxv2f32.nxv2p0( [[WIDE_MASKED_GATHER2]], align 4 [[TMP22]], [[TMP19]], i32 [[TMP18]]) +; IF-EVL-NEXT: [[TMP23:%.*]] = zext i32 [[TMP18]] to i64 +; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP23]], [[EVL_BASED_IV]] +; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX1]], [[TMP10]] +; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], [[DOTSPLAT]] +; IF-EVL-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: for.body: +; IF-EVL-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX]], i64 [[INDVARS_IV]] +; IF-EVL-NEXT: [[TMP25:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8 +; IF-EVL-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[TMP25]] +; IF-EVL-NEXT: [[TMP26:%.*]] = load float, ptr [[ARRAYIDX5]], align 4 +; IF-EVL-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUT]], i64 [[TMP25]] +; IF-EVL-NEXT: store float [[TMP26]], ptr [[ARRAYIDX7]], align 4 +; IF-EVL-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; IF-EVL: for.end: +; IF-EVL-NEXT: ret void +; +; NO-VP-LABEL: @gather_scatter( +; NO-VP-NEXT: entry: +; NO-VP-NEXT: br label [[FOR_BODY:%.*]] +; NO-VP: for.body: +; NO-VP-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], i64 [[INDVARS_IV]] +; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8 +; NO-VP-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], i64 [[TMP0]] +; NO-VP-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX5]], align 4 +; NO-VP-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], i64 [[TMP0]] +; NO-VP-NEXT: store float [[TMP1]], ptr [[ARRAYIDX7]], align 4 +; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 +; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; NO-VP: for.end: +; NO-VP-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx3 = getelementptr inbounds i32, ptr %index, i64 %indvars.iv + %0 = load i64, ptr %arrayidx3, align 8 + %arrayidx5 = getelementptr inbounds float, ptr %in, i64 %0 + %1 = load float, ptr %arrayidx5, align 4 + %arrayidx7 = getelementptr inbounds float, ptr %out, i64 %0 + store float %1, ptr %arrayidx7, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond.not = icmp eq i64 %indvars.iv.next, %n + br i1 %exitcond.not, label %for.end, label %for.body + +for.end: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll new file mode 100644 index 0000000000000..0b495bc680f0c --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll @@ -0,0 +1,175 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=IF-EVL %s + +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=NO-VP %s + +; FIXME: interleaved accesses are not supported yet with predicated vectorization. +define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) { +; IF-EVL-LABEL: @interleave( +; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: [[TMP17:%.*]] = sub i64 -1, [[N:%.*]] +; IF-EVL-NEXT: [[TMP31:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP2:%.*]] = mul i64 [[TMP31]], 8 +; IF-EVL-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP17]], [[TMP2]] +; IF-EVL-NEXT: br i1 [[TMP3]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8 +; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 8 +; IF-EVL-NEXT: [[TMP32:%.*]] = sub i64 [[TMP7]], 1 +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP32]] +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 8 +; IF-EVL-NEXT: [[TMP11:%.*]] = call @llvm.experimental.stepvector.nxv4i64() +; IF-EVL-NEXT: [[TMP12:%.*]] = add [[TMP11]], zeroinitializer +; IF-EVL-NEXT: [[TMP13:%.*]] = mul [[TMP12]], shufflevector ( insertelement ( poison, i64 1, i64 0), poison, zeroinitializer) +; IF-EVL-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP13]] +; IF-EVL-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 4 +; IF-EVL-NEXT: [[TMP37:%.*]] = mul i64 1, [[TMP15]] +; IF-EVL-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[TMP37]], i64 0 +; IF-EVL-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[STEP_ADD:%.*]] = add [[VEC_IND]], [[DOTSPLAT]] +; IF-EVL-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; IF-EVL-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 4 +; IF-EVL-NEXT: [[TMP38:%.*]] = add i64 [[TMP19]], 0 +; IF-EVL-NEXT: [[TMP39:%.*]] = mul i64 [[TMP38]], 1 +; IF-EVL-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], [[TMP39]] +; IF-EVL-NEXT: [[TMP23:%.*]] = icmp ule [[VEC_IND]], [[BROADCAST_SPLAT]] +; IF-EVL-NEXT: [[TMP24:%.*]] = icmp ule [[STEP_ADD]], [[BROADCAST_SPLAT]] +; IF-EVL-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x i32], ptr [[B:%.*]], [[VEC_IND]], i32 0 +; IF-EVL-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], [[STEP_ADD]], i32 0 +; IF-EVL-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[TMP25]], i32 4, [[TMP23]], poison) +; IF-EVL-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[TMP26]], i32 4, [[TMP24]], poison) +; IF-EVL-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], [[VEC_IND]], i32 1 +; IF-EVL-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], [[STEP_ADD]], i32 1 +; IF-EVL-NEXT: [[WIDE_MASKED_GATHER3:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[TMP27]], i32 4, [[TMP23]], poison) +; IF-EVL-NEXT: [[WIDE_MASKED_GATHER4:%.*]] = call @llvm.masked.gather.nxv4i32.nxv4p0( [[TMP28]], i32 4, [[TMP24]], poison) +; IF-EVL-NEXT: [[TMP29:%.*]] = add nsw [[WIDE_MASKED_GATHER3]], [[WIDE_MASKED_GATHER]] +; IF-EVL-NEXT: [[TMP30:%.*]] = add nsw [[WIDE_MASKED_GATHER4]], [[WIDE_MASKED_GATHER2]] +; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] +; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP1]] +; IF-EVL-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 0 +; IF-EVL-NEXT: [[TMP34:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP35:%.*]] = mul i64 [[TMP34]], 4 +; IF-EVL-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 [[TMP35]] +; IF-EVL-NEXT: call void @llvm.masked.store.nxv4i32.p0( [[TMP29]], ptr [[TMP33]], i32 4, [[TMP23]]) +; IF-EVL-NEXT: call void @llvm.masked.store.nxv4i32.p0( [[TMP30]], ptr [[TMP36]], i32 4, [[TMP24]]) +; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP10]] +; IF-EVL-NEXT: [[VEC_IND_NEXT]] = add [[STEP_ADD]], [[DOTSPLAT]] +; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: for.body: +; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 0 +; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 1 +; IF-EVL-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 +; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP22]], [[TMP21]] +; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 +; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; IF-EVL: for.cond.cleanup: +; IF-EVL-NEXT: ret void +; +; NO-VP-LABEL: @interleave( +; NO-VP-NEXT: entry: +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 16 +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; NO-VP: vector.ph: +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 16 +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] +; NO-VP: vector.body: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0 +; NO-VP-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 8 +; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i32], ptr [[B:%.*]], i64 [[TMP10]], i32 0 +; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[TMP1]], i32 0 +; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 0 +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0 +; NO-VP-NEXT: [[WIDE_VEC:%.*]] = load <16 x i32>, ptr [[TMP4]], align 4 +; NO-VP-NEXT: [[WIDE_VEC1:%.*]] = load <16 x i32>, ptr [[TMP5]], align 4 +; NO-VP-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <8 x i32> +; NO-VP-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <16 x i32> [[WIDE_VEC1]], <16 x i32> poison, <8 x i32> +; NO-VP-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <16 x i32> [[WIDE_VEC]], <16 x i32> poison, <8 x i32> +; NO-VP-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <16 x i32> [[WIDE_VEC1]], <16 x i32> poison, <8 x i32> +; NO-VP-NEXT: [[TMP6:%.*]] = add nsw <8 x i32> [[STRIDED_VEC3]], [[STRIDED_VEC]] +; NO-VP-NEXT: [[TMP7:%.*]] = add nsw <8 x i32> [[STRIDED_VEC4]], [[STRIDED_VEC2]] +; NO-VP-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]] +; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP1]] +; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i32 0 +; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i32 8 +; NO-VP-NEXT: store <8 x i32> [[TMP6]], ptr [[TMP12]], align 4 +; NO-VP-NEXT: store <8 x i32> [[TMP7]], ptr [[TMP11]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; NO-VP-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; NO-VP-NEXT: br label [[FOR_BODY:%.*]] +; NO-VP: for.body: +; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 0 +; NO-VP-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; NO-VP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x i32], ptr [[B]], i64 [[IV]], i32 1 +; NO-VP-NEXT: [[TMP30:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 +; NO-VP-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP30]], [[TMP29]] +; NO-VP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] +; NO-VP-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; NO-VP: for.cond.cleanup: +; NO-VP-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds [2 x i32], ptr %b, i64 %iv, i32 0 + %0 = load i32, ptr %arrayidx, align 4 + %arrayidx2 = getelementptr inbounds [2 x i32], ptr %b, i64 %iv, i32 1 + %1 = load i32, ptr %arrayidx2, align 4 + %add = add nsw i32 %1, %0 + %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %iv + store i32 %add, ptr %arrayidx4, align 4 + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp eq i64 %iv.next, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !0 + +for.cond.cleanup: + ret void +} + +!0 = distinct !{!0, !1, !2} +!1 = !{!"llvm.loop.interleave.count", i32 2} +!2 = !{!"llvm.loop.vectorize.enable", i1 true} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-iv32.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-iv32.ll new file mode 100644 index 0000000000000..d5ad99f5cff88 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-iv32.ll @@ -0,0 +1,124 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=IF-EVL %s + +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=NO-VP %s + +define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) { +; IF-EVL-LABEL: @iv32( +; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: [[TMP19:%.*]] = sub i32 -1, [[N:%.*]] +; IF-EVL-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32() +; IF-EVL-NEXT: [[TMP2:%.*]] = mul i32 [[TMP1]], 4 +; IF-EVL-NEXT: [[TMP3:%.*]] = icmp ult i32 [[TMP19]], [[TMP2]] +; IF-EVL-NEXT: br i1 [[TMP3]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() +; IF-EVL-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], 4 +; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() +; IF-EVL-NEXT: [[TMP7:%.*]] = mul i32 [[TMP6]], 4 +; IF-EVL-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1 +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i32 [[N]], [[TMP8]] +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N_RND_UP]], [[TMP5]] +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i32 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TMP9:%.*]] = call i32 @llvm.vscale.i32() +; IF-EVL-NEXT: [[TMP10:%.*]] = mul i32 [[TMP9]], 4 +; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[TMP11:%.*]] = sub i32 [[N]], [[EVL_BASED_IV]] +; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[TMP11]], i32 4, i1 true) +; IF-EVL-NEXT: [[TMP13:%.*]] = add i32 [[EVL_BASED_IV]], 0 +; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[TMP13]] +; IF-EVL-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[TMP14]], i32 0 +; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP15]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer), i32 [[TMP12]]) +; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[TMP13]] +; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP16]], i32 0 +; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0( [[VP_OP_LOAD]], ptr align 4 [[TMP17]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer), i32 [[TMP12]]) +; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i32 [[TMP12]], [[EVL_BASED_IV]] +; IF-EVL-NEXT: [[IV_NEXT]] = add i32 [[IV]], [[TMP10]] +; IF-EVL-NEXT: [[TMP18:%.*]] = icmp eq i32 [[IV_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ] +; IF-EVL-NEXT: br label [[FOR_BODY1:%.*]] +; IF-EVL: for.body: +; IF-EVL-NEXT: [[IV1:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[IV1]] +; IF-EVL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[IV1]] +; IF-EVL-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX4]], align 4 +; IF-EVL-NEXT: [[IV_NEXT1]] = add nuw nsw i32 [[IV1]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT1]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY1]], !llvm.loop [[LOOP3:![0-9]+]] +; IF-EVL: for.cond.cleanup: +; IF-EVL-NEXT: ret void +; +; NO-VP-LABEL: @iv32( +; NO-VP-NEXT: entry: +; NO-VP-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() +; NO-VP-NEXT: [[TMP10:%.*]] = mul i32 [[TMP0]], 4 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N:%.*]], [[TMP10]] +; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; NO-VP: vector.ph: +; NO-VP-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32() +; NO-VP-NEXT: [[TMP11:%.*]] = mul i32 [[TMP1]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], [[TMP11]] +; NO-VP-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]] +; NO-VP-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() +; NO-VP-NEXT: [[TMP12:%.*]] = mul i32 [[TMP2]], 4 +; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] +; NO-VP: vector.body: +; NO-VP-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; NO-VP-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0 +; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[TMP3]] +; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0 +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP5]], align 4 +; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[TMP3]] +; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 0 +; NO-VP-NEXT: store [[WIDE_LOAD]], ptr [[TMP7]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP12]] +; NO-VP-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; NO-VP: middle.block: +; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] +; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] +; NO-VP: scalar.ph: +; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; NO-VP-NEXT: br label [[FOR_BODY:%.*]] +; NO-VP: for.body: +; NO-VP-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[IV]] +; NO-VP-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; NO-VP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[IV]] +; NO-VP-NEXT: store i32 [[TMP9]], ptr [[ARRAYIDX4]], align 4 +; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 +; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; NO-VP: for.cond.cleanup: +; NO-VP-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32, ptr %b, i32 %iv + %0 = load i32, ptr %arrayidx, align 4 + %arrayidx4 = getelementptr inbounds i32, ptr %a, i32 %iv + store i32 %0, ptr %arrayidx4, align 4 + %iv.next = add nuw nsw i32 %iv, 1 + %exitcond.not = icmp eq i32 %iv.next, %N + br i1 %exitcond.not, label %for.cond.cleanup, label %for.body + +for.cond.cleanup: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-masked-loadstore.ll new file mode 100644 index 0000000000000..203d0c977074e --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-masked-loadstore.ll @@ -0,0 +1,132 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=IF-EVL + +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=NO-VP + +define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) { +; IF-EVL-LABEL: @masked_loadstore( +; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: [[TMP0:%.*]] = sub i64 -1, [[N:%.*]] +; IF-EVL-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 4 +; IF-EVL-NEXT: [[TMP3:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; IF-EVL-NEXT: br i1 [[TMP3]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; IF-EVL-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4 +; IF-EVL-NEXT: [[TMP8:%.*]] = sub i64 [[TMP7]], 1 +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N]], [[TMP8]] +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP5]] +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 +; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 4 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[TMP11:%.*]] = sub i64 [[N]], [[EVL_BASED_IV]] +; IF-EVL-NEXT: [[TMP12:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[TMP11]], i32 4, i1 true) +; IF-EVL-NEXT: [[TMP13:%.*]] = add i64 [[EVL_BASED_IV]], 0 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[EVL_BASED_IV]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; IF-EVL-NEXT: [[TMP14:%.*]] = call @llvm.experimental.stepvector.nxv4i64() +; IF-EVL-NEXT: [[TMP15:%.*]] = add zeroinitializer, [[TMP14]] +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add [[BROADCAST_SPLAT]], [[TMP15]] +; IF-EVL-NEXT: [[TMP16:%.*]] = icmp ule [[VEC_IV]], [[BROADCAST_SPLAT2]] +; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP13]] +; IF-EVL-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i32 0 +; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP18]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer), i32 [[TMP12]]) +; IF-EVL-NEXT: [[TMP19:%.*]] = icmp ne [[VP_OP_LOAD]], zeroinitializer +; IF-EVL-NEXT: [[TMP20:%.*]] = select [[TMP16]], [[TMP19]], zeroinitializer +; IF-EVL-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[TMP13]] +; IF-EVL-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP21]], i32 0 +; IF-EVL-NEXT: [[VP_OP_LOAD3:%.*]] = call @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP22]], [[TMP20]], i32 [[TMP12]]) +; IF-EVL-NEXT: [[TMP23:%.*]] = add [[VP_OP_LOAD]], [[VP_OP_LOAD3]] +; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0( [[TMP23]], ptr align 4 [[TMP22]], [[TMP20]], i32 [[TMP12]]) +; IF-EVL-NEXT: [[TMP24:%.*]] = zext i32 [[TMP12]] to i64 +; IF-EVL-NEXT: [[INDEX_EVL_NEXT]] = add i64 [[TMP24]], [[EVL_BASED_IV]] +; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP10]] +; IF-EVL-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: for.body: +; IF-EVL-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I_011]] +; IF-EVL-NEXT: [[TMP26:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; IF-EVL-NEXT: [[CMP1:%.*]] = icmp ne i32 [[TMP26]], 0 +; IF-EVL-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] +; IF-EVL: if.then: +; IF-EVL-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I_011]] +; IF-EVL-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4 +; IF-EVL-NEXT: [[ADD:%.*]] = add i32 [[TMP26]], [[TMP27]] +; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX3]], align 4 +; IF-EVL-NEXT: br label [[FOR_INC]] +; IF-EVL: for.inc: +; IF-EVL-NEXT: [[INC]] = add nuw nsw i64 [[I_011]], 1 +; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N]] +; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; IF-EVL: exit: +; IF-EVL-NEXT: ret void +; +; NO-VP-LABEL: @masked_loadstore( +; NO-VP-NEXT: entry: +; NO-VP-NEXT: br label [[FOR_BODY:%.*]] +; NO-VP: for.body: +; NO-VP-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ] +; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[I_011]] +; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; NO-VP-NEXT: [[CMP1:%.*]] = icmp ne i32 [[TMP0]], 0 +; NO-VP-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] +; NO-VP: if.then: +; NO-VP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[I_011]] +; NO-VP-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4 +; NO-VP-NEXT: [[ADD:%.*]] = add i32 [[TMP0]], [[TMP1]] +; NO-VP-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX3]], align 4 +; NO-VP-NEXT: br label [[FOR_INC]] +; NO-VP: for.inc: +; NO-VP-NEXT: [[INC]] = add nuw nsw i64 [[I_011]], 1 +; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N:%.*]] +; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT:%.*]], label [[FOR_BODY]] +; NO-VP: exit: +; NO-VP-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %i.011 = phi i64 [ %inc, %for.inc ], [ 0, %entry ] + %arrayidx = getelementptr inbounds i32, ptr %b, i64 %i.011 + %0 = load i32, ptr %arrayidx, align 4 + %cmp1 = icmp ne i32 %0, 0 + br i1 %cmp1, label %if.then, label %for.inc + +if.then: + %arrayidx3 = getelementptr inbounds i32, ptr %a, i64 %i.011 + %1 = load i32, ptr %arrayidx3, align 4 + %add = add i32 %0, %1 + store i32 %add, ptr %arrayidx3, align 4 + br label %for.inc + +for.inc: + %inc = add nuw nsw i64 %i.011, 1 + %exitcond.not = icmp eq i64 %inc, %n + br i1 %exitcond.not, label %exit, label %for.body + +exit: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-no-masking.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-no-masking.ll similarity index 84% rename from llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-no-masking.ll rename to llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-no-masking.ll index ed736602565e8..1c49fba1370e9 100644 --- a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-no-masking.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-no-masking.ll @@ -2,12 +2,12 @@ ; RUN: opt -passes=loop-vectorize \ ; RUN: -force-tail-folding-style=data-with-evl \ ; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ -; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s +; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s ; RUN: opt -passes=loop-vectorize \ ; RUN: -force-tail-folding-style=none \ ; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ -; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s +; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s ; No need to emit predicated vector code if the vector instructions with masking are not required. define i32 @no_masking() { diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll new file mode 100644 index 0000000000000..f2222e0a1f936 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll @@ -0,0 +1,119 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=IF-EVL + +; RUN: opt -passes=loop-vectorize \ +; RUN: -force-tail-folding-style=none \ +; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ +; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=NO-VP + +; FIXME: reversed loads/stores are not supported yet with predicated vectorization. +define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %ptr2) { +; IF-EVL-LABEL: @reverse_load_store( +; IF-EVL-NEXT: entry: +; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; IF-EVL: vector.ph: +; IF-EVL-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 +; IF-EVL-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 +; IF-EVL-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 +; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 1024, [[TMP4]] +; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] +; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] +; IF-EVL-NEXT: [[IND_END:%.*]] = sub i64 [[STARTVAL:%.*]], [[N_VEC]] +; IF-EVL-NEXT: [[IND_END1:%.*]] = trunc i64 [[N_VEC]] to i32 +; IF-EVL-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4 +; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] +; IF-EVL: vector.body: +; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; IF-EVL-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL]], [[INDEX]] +; IF-EVL-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 0 +; IF-EVL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i64 [[INDEX]], i64 0 +; IF-EVL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; IF-EVL-NEXT: [[TMP8:%.*]] = call @llvm.experimental.stepvector.nxv4i64() +; IF-EVL-NEXT: [[TMP9:%.*]] = add zeroinitializer, [[TMP8]] +; IF-EVL-NEXT: [[VEC_IV:%.*]] = add [[BROADCAST_SPLAT]], [[TMP9]] +; IF-EVL-NEXT: [[TMP10:%.*]] = icmp ule [[VEC_IV]], shufflevector ( insertelement ( poison, i64 1023, i64 0), poison, zeroinitializer) +; IF-EVL-NEXT: [[TMP11:%.*]] = add i64 [[TMP7]], -1 +; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[TMP11]] +; IF-EVL-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 4 +; IF-EVL-NEXT: [[TMP15:%.*]] = mul i64 0, [[TMP14]] +; IF-EVL-NEXT: [[TMP16:%.*]] = sub i64 1, [[TMP14]] +; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i64 [[TMP15]] +; IF-EVL-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i64 [[TMP16]] +; IF-EVL-NEXT: [[REVERSE:%.*]] = call @llvm.experimental.vector.reverse.nxv4i1( [[TMP10]]) +; IF-EVL-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv4i32.p0(ptr [[TMP18]], i32 4, [[REVERSE]], poison) +; IF-EVL-NEXT: [[REVERSE3:%.*]] = call @llvm.experimental.vector.reverse.nxv4i32( [[WIDE_MASKED_LOAD]]) +; IF-EVL-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[PTR2:%.*]], i64 [[TMP11]] +; IF-EVL-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() +; IF-EVL-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 4 +; IF-EVL-NEXT: [[TMP22:%.*]] = mul i64 0, [[TMP21]] +; IF-EVL-NEXT: [[TMP23:%.*]] = sub i64 1, [[TMP21]] +; IF-EVL-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[TMP19]], i64 [[TMP22]] +; IF-EVL-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i64 [[TMP23]] +; IF-EVL-NEXT: [[REVERSE4:%.*]] = call @llvm.experimental.vector.reverse.nxv4i1( [[TMP10]]) +; IF-EVL-NEXT: [[REVERSE5:%.*]] = call @llvm.experimental.vector.reverse.nxv4i32( [[REVERSE3]]) +; IF-EVL-NEXT: call void @llvm.masked.store.nxv4i32.p0( [[REVERSE5]], ptr [[TMP25]], i32 4, [[REVERSE4]]) +; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] +; IF-EVL-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; IF-EVL-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; IF-EVL: middle.block: +; IF-EVL-NEXT: br i1 true, label [[LOOPEND:%.*]], label [[SCALAR_PH]] +; IF-EVL: scalar.ph: +; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[STARTVAL]], [[ENTRY:%.*]] ] +; IF-EVL-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i32 [ [[IND_END1]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] +; IF-EVL: for.body: +; IF-EVL-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; IF-EVL-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 +; IF-EVL-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[ADD]] +; IF-EVL-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 +; IF-EVL-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2]], i64 [[ADD]] +; IF-EVL-NEXT: store i32 [[TMP]], ptr [[GEPS]], align 4 +; IF-EVL-NEXT: [[INC]] = add i32 [[I]], 1 +; IF-EVL-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024 +; IF-EVL-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND]], !llvm.loop [[LOOP3:![0-9]+]] +; IF-EVL: loopend: +; IF-EVL-NEXT: ret void +; +; NO-VP-LABEL: @reverse_load_store( +; NO-VP-NEXT: entry: +; NO-VP-NEXT: br label [[FOR_BODY:%.*]] +; NO-VP: for.body: +; NO-VP-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL:%.*]], [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; NO-VP-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 +; NO-VP-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[ADD]] +; NO-VP-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 +; NO-VP-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2:%.*]], i64 [[ADD]] +; NO-VP-NEXT: store i32 [[TMP]], ptr [[GEPS]], align 4 +; NO-VP-NEXT: [[INC]] = add i32 [[I]], 1 +; NO-VP-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024 +; NO-VP-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND:%.*]] +; NO-VP: loopend: +; NO-VP-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %add.phi = phi i64 [ %startval, %entry ], [ %add, %for.body ] + %i = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %add = add i64 %add.phi, -1 + %gepl = getelementptr inbounds i32, ptr %ptr, i64 %add + %tmp = load i32, ptr %gepl, align 4 + %geps = getelementptr inbounds i32, ptr %ptr2, i64 %add + store i32 %tmp, ptr %geps, align 4 + %inc = add i32 %i, 1 + %exitcond = icmp ne i32 %inc, 1024 + br i1 %exitcond, label %for.body, label %loopend + +loopend: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll similarity index 100% rename from llvm/test/Transforms/LoopVectorize/X86/vectorize-vp-intrinsics.ll rename to llvm/test/Transforms/LoopVectorize/X86/vectorize-force-tail-with-evl.ll diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/vectorize-force-tail-with-evl.ll similarity index 100% rename from llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics.ll rename to llvm/test/Transforms/LoopVectorize/vectorize-force-tail-with-evl.ll diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-gather-scatter.ll deleted file mode 100644 index 98d2938523ce4..0000000000000 --- a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-gather-scatter.ll +++ /dev/null @@ -1,48 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ -; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s - -; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ -; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s - -; The target does not support predicated vectorization. -define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %index, i64 %n) { -; CHECK-LABEL: @gather_scatter( -; CHECK-NEXT: entry: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8 -; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX5]], align 4 -; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], i64 [[TMP0]] -; CHECK-NEXT: store float [[TMP1]], ptr [[ARRAYIDX7]], align 4 -; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]] -; CHECK: for.end: -; CHECK-NEXT: ret void -; -entry: - br label %for.body - -for.body: - %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] - %arrayidx3 = getelementptr inbounds i32, ptr %index, i64 %indvars.iv - %0 = load i64, ptr %arrayidx3, align 8 - %arrayidx5 = getelementptr inbounds float, ptr %in, i64 %0 - %1 = load float, ptr %arrayidx5, align 4 - %arrayidx7 = getelementptr inbounds float, ptr %out, i64 %0 - store float %1, ptr %arrayidx7, align 4 - %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 - %exitcond.not = icmp eq i64 %indvars.iv.next, %n - br i1 %exitcond.not, label %for.end, label %for.body - -for.end: - ret void -} diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll deleted file mode 100644 index fa7c0c472f70b..0000000000000 --- a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-interleave.ll +++ /dev/null @@ -1,168 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ -; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=IF-EVL %s - -; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ -; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=NO-VP %s - -; FIXME: interleaved accesses are not supported yet with predicated vectorization. -define void @interleave(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { -; IF-EVL-LABEL: @interleave( -; IF-EVL-NEXT: entry: -; IF-EVL-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] -; IF-EVL: vector.ph: -; IF-EVL-NEXT: [[N_RND_UP:%.*]] = add i64 [[N:%.*]], 1 -; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 2 -; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] -; IF-EVL-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1 -; IF-EVL-NEXT: br label [[VECTOR_BODY:%.*]] -; IF-EVL: vector.body: -; IF-EVL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ] -; IF-EVL-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; IF-EVL-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 -; IF-EVL-NEXT: [[TMP2:%.*]] = icmp ule i64 [[TMP0]], [[TRIP_COUNT_MINUS_1]] -; IF-EVL-NEXT: [[TMP3:%.*]] = icmp ule i64 [[TMP1]], [[TRIP_COUNT_MINUS_1]] -; IF-EVL-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] -; IF-EVL: pred.store.if: -; IF-EVL-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] -; IF-EVL-NEXT: [[TMP5:%.*]] = load i32, ptr [[TMP4]], align 4 -; IF-EVL-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP0]] -; IF-EVL-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4 -; IF-EVL-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] -; IF-EVL-NEXT: [[TMP9:%.*]] = add nsw i32 [[TMP7]], [[TMP5]] -; IF-EVL-NEXT: store i32 [[TMP9]], ptr [[TMP8]], align 4 -; IF-EVL-NEXT: br label [[PRED_STORE_CONTINUE]] -; IF-EVL: pred.store.continue: -; IF-EVL-NEXT: [[TMP10:%.*]] = phi i32 [ poison, [[VECTOR_BODY]] ], [ [[TMP5]], [[PRED_STORE_IF]] ] -; IF-EVL-NEXT: [[TMP11:%.*]] = phi i32 [ poison, [[VECTOR_BODY]] ], [ [[TMP7]], [[PRED_STORE_IF]] ] -; IF-EVL-NEXT: br i1 [[TMP3]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]] -; IF-EVL: pred.store.if1: -; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP1]] -; IF-EVL-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP12]], align 4 -; IF-EVL-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[TMP1]] -; IF-EVL-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4 -; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP1]] -; IF-EVL-NEXT: [[TMP17:%.*]] = add nsw i32 [[TMP15]], [[TMP13]] -; IF-EVL-NEXT: store i32 [[TMP17]], ptr [[TMP16]], align 4 -; IF-EVL-NEXT: br label [[PRED_STORE_CONTINUE2]] -; IF-EVL: pred.store.continue2: -; IF-EVL-NEXT: [[TMP18:%.*]] = phi i32 [ poison, [[PRED_STORE_CONTINUE]] ], [ [[TMP13]], [[PRED_STORE_IF1]] ] -; IF-EVL-NEXT: [[TMP19:%.*]] = phi i32 [ poison, [[PRED_STORE_CONTINUE]] ], [ [[TMP15]], [[PRED_STORE_IF1]] ] -; IF-EVL-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 -; IF-EVL-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; IF-EVL-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; IF-EVL: middle.block: -; IF-EVL-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] -; IF-EVL: scalar.ph: -; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; IF-EVL-NEXT: [[TMP22:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP22]], [[TMP21]] -; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] -; IF-EVL: for.cond.cleanup: -; IF-EVL-NEXT: ret void -; -; NO-VP-LABEL: @interleave( -; NO-VP-NEXT: entry: -; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 -; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] -; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] -; NO-VP: vector.ph: -; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 -; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] -; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2 -; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] -; NO-VP: vector.body: -; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; NO-VP-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 0 -; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], 0 -; NO-VP-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 1 -; NO-VP-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], [[TMP9]] -; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP6]] -; NO-VP-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP10]] -; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP11]], i32 0 -; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[TMP11]], i64 [[TMP14]] -; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP13]], align 4 -; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP15]], align 4 -; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP6]] -; NO-VP-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[TMP10]] -; NO-VP-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP16]], i32 0 -; NO-VP-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP16]], i64 [[TMP19]] -; NO-VP-NEXT: [[WIDE_LOAD2:%.*]] = load , ptr [[TMP18]], align 4 -; NO-VP-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP20]], align 4 -; NO-VP-NEXT: [[TMP21:%.*]] = add nsw [[WIDE_LOAD2]], [[WIDE_LOAD]] -; NO-VP-NEXT: [[TMP22:%.*]] = add nsw [[WIDE_LOAD3]], [[WIDE_LOAD1]] -; NO-VP-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP6]] -; NO-VP-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP10]] -; NO-VP-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[TMP23]], i32 0 -; NO-VP-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[TMP23]], i64 [[TMP26]] -; NO-VP-NEXT: store [[TMP21]], ptr [[TMP25]], align 4 -; NO-VP-NEXT: store [[TMP22]], ptr [[TMP27]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] -; NO-VP-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; NO-VP-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; NO-VP: middle.block: -; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] -; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] -; NO-VP: scalar.ph: -; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; NO-VP-NEXT: br label [[FOR_BODY:%.*]] -; NO-VP: for.body: -; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]] -; NO-VP-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; NO-VP-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]] -; NO-VP-NEXT: [[TMP30:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4 -; NO-VP-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP30]], [[TMP29]] -; NO-VP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]] -; NO-VP-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX4]], align 4 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 -; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] -; NO-VP: for.cond.cleanup: -; NO-VP-NEXT: ret void -; -entry: - br label %for.body - -for.body: - %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ] - %arrayidx = getelementptr inbounds i32, ptr %b, i64 %iv - %0 = load i32, ptr %arrayidx, align 4 - %arrayidx2 = getelementptr inbounds i32, ptr %c, i64 %iv - %1 = load i32, ptr %arrayidx2, align 4 - %add = add nsw i32 %1, %0 - %arrayidx4 = getelementptr inbounds i32, ptr %a, i64 %iv - store i32 %add, ptr %arrayidx4, align 4 - %iv.next = add nuw nsw i64 %iv, 1 - %exitcond.not = icmp eq i64 %iv.next, %N - br i1 %exitcond.not, label %for.cond.cleanup, label %for.body, !llvm.loop !0 - -for.cond.cleanup: - ret void -} - -!0 = distinct !{!0, !1, !2} -!1 = !{!"llvm.loop.interleave.count", i32 2} -!2 = !{!"llvm.loop.vectorize.enable", i1 true} diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-iv32.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-iv32.ll deleted file mode 100644 index 511969ed972fb..0000000000000 --- a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-iv32.ll +++ /dev/null @@ -1,85 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ -; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=IF-EVL %s - -; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ -; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=NO-VP %s - -; The target does not support predicated vectorization. -define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) { -; IF-EVL-LABEL: @iv32( -; IF-EVL-NEXT: entry: -; IF-EVL-NEXT: br label [[FOR_BODY:%.*]] -; IF-EVL: for.body: -; IF-EVL-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[IV]] -; IF-EVL-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; IF-EVL-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[IV]] -; IF-EVL-NEXT: store i32 [[TMP0]], ptr [[ARRAYIDX4]], align 4 -; IF-EVL-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], [[N:%.*]] -; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] -; IF-EVL: for.cond.cleanup: -; IF-EVL-NEXT: ret void -; -; NO-VP-LABEL: @iv32( -; NO-VP-NEXT: entry: -; NO-VP-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() -; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N:%.*]], [[TMP0]] -; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] -; NO-VP: vector.ph: -; NO-VP-NEXT: [[TMP1:%.*]] = call i32 @llvm.vscale.i32() -; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], [[TMP1]] -; NO-VP-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]] -; NO-VP-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() -; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] -; NO-VP: vector.body: -; NO-VP-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; NO-VP-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 0 -; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i32 [[TMP3]] -; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0 -; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP5]], align 4 -; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[TMP3]] -; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 0 -; NO-VP-NEXT: store [[WIDE_LOAD]], ptr [[TMP7]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP2]] -; NO-VP-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; NO-VP-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] -; NO-VP: middle.block: -; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] -; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] -; NO-VP: scalar.ph: -; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; NO-VP-NEXT: br label [[FOR_BODY:%.*]] -; NO-VP: for.body: -; NO-VP-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i32 [[IV]] -; NO-VP-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; NO-VP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, ptr [[A]], i32 [[IV]] -; NO-VP-NEXT: store i32 [[TMP9]], ptr [[ARRAYIDX4]], align 4 -; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i32 [[IV]], 1 -; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[IV_NEXT]], [[N]] -; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] -; NO-VP: for.cond.cleanup: -; NO-VP-NEXT: ret void -; -entry: - br label %for.body - -for.body: - %iv = phi i32 [ 0, %entry ], [ %iv.next, %for.body ] - %arrayidx = getelementptr inbounds i32, ptr %b, i32 %iv - %0 = load i32, ptr %arrayidx, align 4 - %arrayidx4 = getelementptr inbounds i32, ptr %a, i32 %iv - store i32 %0, ptr %arrayidx4, align 4 - %iv.next = add nuw nsw i32 %iv, 1 - %exitcond.not = icmp eq i32 %iv.next, %N - br i1 %exitcond.not, label %for.cond.cleanup, label %for.body - -for.cond.cleanup: - ret void -} diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-masked-loadstore.ll deleted file mode 100644 index efa45a3374681..0000000000000 --- a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-masked-loadstore.ll +++ /dev/null @@ -1,60 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ -; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s - -; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ -; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s - -; The target does not support predicated vectorization. -define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) { -; CHECK-LABEL: @masked_loadstore( -; CHECK-NEXT: entry: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[I_011]] -; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[TMP0]], 0 -; CHECK-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]] -; CHECK: if.then: -; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[I_011]] -; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4 -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP0]], [[TMP1]] -; CHECK-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX3]], align 4 -; CHECK-NEXT: br label [[FOR_INC]] -; CHECK: for.inc: -; CHECK-NEXT: [[INC]] = add nuw nsw i64 [[I_011]], 1 -; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N:%.*]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT:%.*]], label [[FOR_BODY]] -; CHECK: exit: -; CHECK-NEXT: ret void -; -entry: - br label %for.body - -for.body: - %i.011 = phi i64 [ %inc, %for.inc ], [ 0, %entry ] - %arrayidx = getelementptr inbounds i32, ptr %b, i64 %i.011 - %0 = load i32, ptr %arrayidx, align 4 - %cmp1 = icmp ne i32 %0, 0 - br i1 %cmp1, label %if.then, label %for.inc - -if.then: - %arrayidx3 = getelementptr inbounds i32, ptr %a, i64 %i.011 - %1 = load i32, ptr %arrayidx3, align 4 - %add = add i32 %0, %1 - store i32 %add, ptr %arrayidx3, align 4 - br label %for.inc - -for.inc: - %inc = add nuw nsw i64 %i.011, 1 - %exitcond.not = icmp eq i64 %inc, %n - br i1 %exitcond.not, label %exit, label %for.body - -exit: - ret void -} diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-reverse-load-store.ll deleted file mode 100644 index f455cf633ff7c..0000000000000 --- a/llvm/test/Transforms/LoopVectorize/vectorize-vp-intrinsics-reverse-load-store.ll +++ /dev/null @@ -1,49 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ -; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s - -; RUN: opt -passes=loop-vectorize \ -; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \ -; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck %s - -; FIXME: reversed loads/stores are not supported yet with predicated vectorization. - -define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %ptr2) { -; CHECK-LABEL: @reverse_load_store( -; CHECK-NEXT: entry: -; CHECK-NEXT: br label [[FOR_BODY:%.*]] -; CHECK: for.body: -; CHECK-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL:%.*]], [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1 -; CHECK-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[ADD]] -; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4 -; CHECK-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2:%.*]], i64 [[ADD]] -; CHECK-NEXT: store i32 [[TMP]], ptr [[GEPS]], align 4 -; CHECK-NEXT: [[INC]] = add i32 [[I]], 1 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND:%.*]] -; CHECK: loopend: -; CHECK-NEXT: ret void -; -entry: - br label %for.body - -for.body: - %add.phi = phi i64 [ %startval, %entry ], [ %add, %for.body ] - %i = phi i32 [ 0, %entry ], [ %inc, %for.body ] - %add = add i64 %add.phi, -1 - %gepl = getelementptr inbounds i32, ptr %ptr, i64 %add - %tmp = load i32, ptr %gepl, align 4 - %geps = getelementptr inbounds i32, ptr %ptr2, i64 %add - store i32 %tmp, ptr %geps, align 4 - %inc = add i32 %i, 1 - %exitcond = icmp ne i32 %inc, 1024 - br i1 %exitcond, label %for.body, label %loopend - -loopend: - ret void -} diff --git a/llvm/test/Transforms/LoopVectorize/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/vplan-force-tail-with-evl.ll similarity index 100% rename from llvm/test/Transforms/LoopVectorize/vplan-vp-intrinsics.ll rename to llvm/test/Transforms/LoopVectorize/vplan-force-tail-with-evl.ll From 4d1622de96137975b60e4f0beebd9e3a87a428eb Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Mon, 1 Apr 2024 07:13:15 -0700 Subject: [PATCH 16/17] Address comments --- .../Transforms/Vectorize/LoopVectorize.cpp | 48 +++++++++---------- llvm/lib/Transforms/Vectorize/VPlan.h | 19 +++----- .../lib/Transforms/Vectorize/VPlanRecipes.cpp | 2 +- .../vectorize-force-tail-with-evl.ll | 19 ++++---- .../vplan-force-tail-with-evl.ll | 2 +- 5 files changed, 42 insertions(+), 48 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 2cd1dc9e3f7da..cb0fd06554e6c 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1508,13 +1508,10 @@ class LoopVectorizationCostModel { /// Returns the TailFoldingStyle that is best for the current loop. TailFoldingStyle getTailFoldingStyle(bool IVUpdateMayOverflow = true) const { - if (!ChosenTailFoldingStyle.first) { - assert(!ChosenTailFoldingStyle.second && - "Chosen tail folding style must not be set."); + if (!ChosenTailFoldingStyle) return TailFoldingStyle::None; - } - return *(IVUpdateMayOverflow ? ChosenTailFoldingStyle.first - : ChosenTailFoldingStyle.second); + return IVUpdateMayOverflow ? ChosenTailFoldingStyle->first + : ChosenTailFoldingStyle->second; } /// Selects and saves TailFoldingStyle for 2 options - if IV update may @@ -1522,25 +1519,23 @@ class LoopVectorizationCostModel { /// \param IsScalableVF true if scalable vector factors enabled. /// \param UserIC User specific interleave count. void setTailFoldingStyles(bool IsScalableVF, unsigned UserIC) { - assert(!ChosenTailFoldingStyle.first && !ChosenTailFoldingStyle.second && - "Tail folding must not be selected yet."); + assert(!ChosenTailFoldingStyle && "Tail folding must not be selected yet."); if (!Legal->prepareToFoldTailByMasking()) { - ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second = - TailFoldingStyle::None; + ChosenTailFoldingStyle = + std::make_pair(TailFoldingStyle::None, TailFoldingStyle::None); return; } if (!ForceTailFoldingStyle.getNumOccurrences()) { - ChosenTailFoldingStyle.first = - TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/true); - ChosenTailFoldingStyle.second = - TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/false); + ChosenTailFoldingStyle = std::make_pair( + TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/true), + TTI.getPreferredTailFoldingStyle(/*IVUpdateMayOverflow=*/false)); return; } // Set styles when forced. - ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second = - ForceTailFoldingStyle; + ChosenTailFoldingStyle = std::make_pair(ForceTailFoldingStyle.getValue(), + ForceTailFoldingStyle.getValue()); if (ForceTailFoldingStyle != TailFoldingStyle::DataWithEVL) return; // Override forced styles if needed. @@ -1558,8 +1553,9 @@ class LoopVectorizationCostModel { // If for some reason EVL mode is unsupported, fallback to // DataWithoutLaneMask to try to vectorize the loop with folded tail // in a generic way. - ChosenTailFoldingStyle.first = ChosenTailFoldingStyle.second = - TailFoldingStyle::DataWithoutLaneMask; + ChosenTailFoldingStyle = + std::make_pair(TailFoldingStyle::DataWithoutLaneMask, + TailFoldingStyle::DataWithoutLaneMask); LLVM_DEBUG( dbgs() << "LV: Preference for VP intrinsics indicated. Will " @@ -1740,7 +1736,7 @@ class LoopVectorizationCostModel { /// Control finally chosen tail folding style. The first element is used if /// the IV update may overflow, the second element - if it does not. - std::pair, std::optional> + std::optional> ChosenTailFoldingStyle; /// A map holding scalar costs for different vectorization factors. The @@ -9743,7 +9739,7 @@ static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks, } // The scalar cost should only be 0 when vectorizing with a user specified VF/IC. In those cases, runtime checks should always be generated. - uint64_t ScalarC = *VF.ScalarCost.getValue(); + double ScalarC = *VF.ScalarCost.getValue(); if (ScalarC == 0) return true; @@ -9770,7 +9766,7 @@ static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks, // RtC + VecC * (TC / VF) + EpiC < ScalarC * TC // // Now we can compute the minimum required trip count TC as - // VF * (RtC + EpiC) / (ScalarC * VF - VecC) < TC + // (RtC + EpiC) / (ScalarC - (VecC / VF)) < TC // // For now we assume the epilogue cost EpiC = 0 for simplicity. Note that // the computations are performed on doubles, not integers and the result @@ -9782,9 +9778,9 @@ static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks, AssumedMinimumVscale = *VScale; IntVF *= AssumedMinimumVscale; } - uint64_t RtC = *CheckCost.getValue(); - uint64_t Div = ScalarC * IntVF - *VF.Cost.getValue(); - uint64_t MinTC1 = Div == 0 ? 0 : divideCeil(RtC * IntVF, Div); + double VecCOverVF = double(*VF.Cost.getValue()) / IntVF; + double RtC = *CheckCost.getValue(); + double MinTC1 = RtC / (ScalarC - VecCOverVF); // Second, compute a minimum iteration count so that the cost of the // runtime checks is only a fraction of the total scalar loop cost. This @@ -9793,12 +9789,12 @@ static bool areRuntimeChecksProfitable(GeneratedRTChecks &Checks, // * TC. To bound the runtime check to be a fraction 1/X of the scalar // cost, compute // RtC < ScalarC * TC * (1 / X) ==> RtC * X / ScalarC < TC - uint64_t MinTC2 = divideCeil(RtC * 10, ScalarC); + double MinTC2 = RtC * 10 / ScalarC; // Now pick the larger minimum. If it is not a multiple of VF and a scalar // epilogue is allowed, choose the next closest multiple of VF. This should // partly compensate for ignoring the epilogue cost. - uint64_t MinTC = std::max(MinTC1, MinTC2); + uint64_t MinTC = std::ceil(std::max(MinTC1, MinTC2)); if (SEL == CM_ScalarEpilogueAllowed) MinTC = alignTo(MinTC, IntVF); VF.MinProfitableTripCount = ElementCount::getFixed(MinTC); diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 1ef759dea3c0b..3a2965fcb6bde 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -2572,8 +2572,7 @@ class VPWidenCanonicalIVRecipe : public VPSingleDefRecipe { }; /// A recipe for converting the canonical IV value to the corresponding value of -/// an IV with different start and step values, using Start + CanonicalIV * -/// Step. +/// an IV with different start and step values, using Start + IV * Step. class VPDerivedIVRecipe : public VPSingleDefRecipe { /// Kind of the induction. const InductionDescriptor::InductionKind Kind; @@ -2581,10 +2580,6 @@ class VPDerivedIVRecipe : public VPSingleDefRecipe { /// for floating point inductions. const FPMathOperator *FPBinOp; - VPCanonicalIVPHIRecipe *getCanonicalIV() const { - return cast(getOperand(1)); - } - public: VPDerivedIVRecipe(const InductionDescriptor &IndDesc, VPValue *Start, VPCanonicalIVPHIRecipe *CanonicalIV, VPValue *Step) @@ -2594,16 +2589,16 @@ class VPDerivedIVRecipe : public VPSingleDefRecipe { Start, CanonicalIV, Step) {} VPDerivedIVRecipe(InductionDescriptor::InductionKind Kind, - const FPMathOperator *FPBinOp, VPValue *Start, - VPCanonicalIVPHIRecipe *CanonicalIV, VPValue *Step) - : VPSingleDefRecipe(VPDef::VPDerivedIVSC, {Start, CanonicalIV, Step}), - Kind(Kind), FPBinOp(FPBinOp) {} + const FPMathOperator *FPBinOp, VPValue *Start, VPValue *IV, + VPValue *Step) + : VPSingleDefRecipe(VPDef::VPDerivedIVSC, {Start, IV, Step}), Kind(Kind), + FPBinOp(FPBinOp) {} ~VPDerivedIVRecipe() override = default; VPRecipeBase *clone() override { - return new VPDerivedIVRecipe(Kind, FPBinOp, getStartValue(), - getCanonicalIV(), getStepValue()); + return new VPDerivedIVRecipe(Kind, FPBinOp, getStartValue(), getOperand(1), + getStepValue()); } VP_CLASSOF_IMPL(VPDef::VPDerivedIVSC) diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index f03caffcf656c..1be0287ce7c9e 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -1216,7 +1216,7 @@ void VPDerivedIVRecipe::print(raw_ostream &O, const Twine &Indent, O << Indent << "= DERIVED-IV "; getStartValue()->printAsOperand(O, SlotTracker); O << " + "; - getCanonicalIV()->printAsOperand(O, SlotTracker); + getOperand(1)->printAsOperand(O, SlotTracker); O << " * "; getStepValue()->printAsOperand(O, SlotTracker); } diff --git a/llvm/test/Transforms/LoopVectorize/vectorize-force-tail-with-evl.ll b/llvm/test/Transforms/LoopVectorize/vectorize-force-tail-with-evl.ll index 929889045dbbd..a90b38c6a9605 100644 --- a/llvm/test/Transforms/LoopVectorize/vectorize-force-tail-with-evl.ll +++ b/llvm/test/Transforms/LoopVectorize/vectorize-force-tail-with-evl.ll @@ -6,7 +6,7 @@ ; RUN: opt -passes=loop-vectorize \ ; RUN: -force-tail-folding-style=none \ -; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \ +; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -force-vector-width=4 \ ; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on -S < %s | FileCheck --check-prefix=NO-VP %s ; The target does not support predicated vectorization. @@ -32,28 +32,31 @@ define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) { ; NO-VP-LABEL: @foo( ; NO-VP-NEXT: entry: ; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP0]] +; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 [[TMP0]], 4 +; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP8]] ; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; NO-VP: vector.ph: ; NO-VP-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() -; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP1]] +; NO-VP-NEXT: [[TMP14:%.*]] = mul i64 [[TMP1]], 4 +; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP14]] ; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]] ; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; NO-VP-NEXT: [[TMP15:%.*]] = mul i64 [[TMP2]], 4 ; NO-VP-NEXT: br label [[VECTOR_BODY:%.*]] ; NO-VP: vector.body: ; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; NO-VP-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0 ; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP3]] ; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0 -; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP5]], align 4 +; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP5]], align 4 ; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[C:%.*]], i64 [[TMP3]] ; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[TMP6]], i32 0 -; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP7]], align 4 -; NO-VP-NEXT: [[TMP8:%.*]] = add nsw [[WIDE_LOAD1]], [[WIDE_LOAD]] +; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP7]], align 4 +; NO-VP-NEXT: [[TMP16:%.*]] = add nsw [[WIDE_LOAD1]], [[WIDE_LOAD]] ; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP3]] ; NO-VP-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i32 0 -; NO-VP-NEXT: store [[TMP8]], ptr [[TMP10]], align 4 -; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]] +; NO-VP-NEXT: store [[TMP16]], ptr [[TMP10]], align 4 +; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP15]] ; NO-VP-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; NO-VP-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; NO-VP: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/vplan-force-tail-with-evl.ll b/llvm/test/Transforms/LoopVectorize/vplan-force-tail-with-evl.ll index 30e0f055acfc3..f510d47d06e36 100644 --- a/llvm/test/Transforms/LoopVectorize/vplan-force-tail-with-evl.ll +++ b/llvm/test/Transforms/LoopVectorize/vplan-force-tail-with-evl.ll @@ -1,7 +1,7 @@ ; REQUIRES: asserts ; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \ -; RUN: -force-tail-folding-style=data-with-evl \ +; RUN: -force-tail-folding-style=data-with-evl -force-vector-width=4 \ ; RUN: -force-target-supports-scalable-vectors -scalable-vectorization=on \ ; RUN: -disable-output < %s 2>&1 | FileCheck --check-prefixes=NO-VP %s From 635a8939d8f98fc98c0a92e074f8f37d8f2b14a1 Mon Sep 17 00:00:00 2001 From: Alexey Bataev Date: Thu, 4 Apr 2024 06:45:51 -0700 Subject: [PATCH 17/17] Address comments --- llvm/lib/Transforms/Vectorize/VPlan.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index 3a2965fcb6bde..77577b516ae27 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -2571,8 +2571,9 @@ class VPWidenCanonicalIVRecipe : public VPSingleDefRecipe { } }; -/// A recipe for converting the canonical IV value to the corresponding value of -/// an IV with different start and step values, using Start + IV * Step. +/// A recipe for converting the input value \p IV value to the corresponding +/// value of an IV with different start and step values, using Start + IV * +/// Step. class VPDerivedIVRecipe : public VPSingleDefRecipe { /// Kind of the induction. const InductionDescriptor::InductionKind Kind;