From 0c337ef857e1b96253e0d18959d66c4640780274 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Mon, 9 Dec 2024 15:59:40 +0100 Subject: [PATCH] [InstCombine] Infer nuw for gep inbounds from base of object When we have a gep inbounds from the base of an object (e.g. alloca or global), we know that the index cannot be negative, as this would go out of bounds. As such, we can infer nuw as well. The implementation is a bit stricter than necessary, we could also accept one unknown index followed by known-non-negative indices. Proof: https://alive2.llvm.org/ce/z/Hp7-6w (Note that alive2 currently incorrectly doesn't require the inbounds for the alloca case, see https://github.com/AliveToolkit/alive2/issues/1138). --- clang/test/CodeGen/attr-counted-by.c | 4 +- clang/test/CodeGen/union-tbaa1.c | 6 +- .../InstCombine/InstructionCombining.cpp | 20 ++ .../AMDGPU/memcpy-from-constant.ll | 6 +- llvm/test/Transforms/InstCombine/cast_phi.ll | 4 +- llvm/test/Transforms/InstCombine/load-cmp.ll | 2 +- .../InstCombine/memcpy-addrspace.ll | 16 +- .../InstCombine/memcpy-from-global.ll | 2 +- llvm/test/Transforms/InstCombine/stpcpy-1.ll | 2 +- .../Transforms/InstCombine/stpcpy_chk-1.ll | 2 +- llvm/test/Transforms/InstCombine/strlen-1.ll | 6 +- llvm/test/Transforms/InstCombine/strlen-4.ll | 16 +- llvm/test/Transforms/InstCombine/strncat-2.ll | 2 +- llvm/test/Transforms/InstCombine/strnlen-3.ll | 18 +- llvm/test/Transforms/InstCombine/strnlen-4.ll | 4 +- llvm/test/Transforms/InstCombine/strnlen-5.ll | 4 +- llvm/test/Transforms/InstCombine/sub-gep.ll | 8 +- llvm/test/Transforms/InstCombine/wcslen-1.ll | 6 +- llvm/test/Transforms/InstCombine/wcslen-3.ll | 2 +- llvm/test/Transforms/InstCombine/wcslen-5.ll | 16 +- .../AArch64/sve-interleaved-accesses.ll | 8 +- .../LoopVectorize/AArch64/sve2-histcnt.ll | 6 +- .../LoopVectorize/X86/small-size.ll | 38 ++-- .../X86/x86_fp80-vector-store.ll | 4 +- .../LoopVectorize/interleaved-accesses.ll | 6 +- .../LoopVectorize/multiple-address-spaces.ll | 4 +- .../Transforms/LoopVectorize/non-const-n.ll | 6 +- .../PhaseOrdering/X86/excessive-unrolling.ll | 174 +++++++++--------- .../SLPVectorizer/X86/operandorder.ll | 16 +- 29 files changed, 214 insertions(+), 194 deletions(-) diff --git a/clang/test/CodeGen/attr-counted-by.c b/clang/test/CodeGen/attr-counted-by.c index 6b3cad5708835..be4c7f07e9215 100644 --- a/clang/test/CodeGen/attr-counted-by.c +++ b/clang/test/CodeGen/attr-counted-by.c @@ -1043,7 +1043,7 @@ int test12_a, test12_b; // NO-SANITIZE-WITH-ATTR-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[BAZ]]) #[[ATTR11:[0-9]+]] // NO-SANITIZE-WITH-ATTR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(24) [[BAZ]], ptr noundef nonnull align 4 dereferenceable(24) @test12_bar, i64 24, i1 false), !tbaa.struct [[TBAA_STRUCT7:![0-9]+]] // NO-SANITIZE-WITH-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64 -// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [6 x i32], ptr [[BAZ]], i64 0, i64 [[IDXPROM]] +// NO-SANITIZE-WITH-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [6 x i32], ptr [[BAZ]], i64 0, i64 [[IDXPROM]] // NO-SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]] // NO-SANITIZE-WITH-ATTR-NEXT: store i32 [[TMP0]], ptr @test12_b, align 4, !tbaa [[TBAA2]] // NO-SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr inbounds nuw (i8, ptr @test12_foo, i64 4), align 4, !tbaa [[TBAA2]] @@ -1085,7 +1085,7 @@ int test12_a, test12_b; // NO-SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.lifetime.start.p0(i64 24, ptr nonnull [[BAZ]]) #[[ATTR9:[0-9]+]] // NO-SANITIZE-WITHOUT-ATTR-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr noundef nonnull align 4 dereferenceable(24) [[BAZ]], ptr noundef nonnull align 4 dereferenceable(24) @test12_bar, i64 24, i1 false), !tbaa.struct [[TBAA_STRUCT7:![0-9]+]] // NO-SANITIZE-WITHOUT-ATTR-NEXT: [[IDXPROM:%.*]] = sext i32 [[INDEX]] to i64 -// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [6 x i32], ptr [[BAZ]], i64 0, i64 [[IDXPROM]] +// NO-SANITIZE-WITHOUT-ATTR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [6 x i32], ptr [[BAZ]], i64 0, i64 [[IDXPROM]] // NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]] // NO-SANITIZE-WITHOUT-ATTR-NEXT: store i32 [[TMP0]], ptr @test12_b, align 4, !tbaa [[TBAA2]] // NO-SANITIZE-WITHOUT-ATTR-NEXT: [[TMP1:%.*]] = load i32, ptr getelementptr inbounds nuw (i8, ptr @test12_foo, i64 4), align 4, !tbaa [[TBAA2]] diff --git a/clang/test/CodeGen/union-tbaa1.c b/clang/test/CodeGen/union-tbaa1.c index 0f7a67cb7eccd..7d44c9a3fbe6b 100644 --- a/clang/test/CodeGen/union-tbaa1.c +++ b/clang/test/CodeGen/union-tbaa1.c @@ -16,17 +16,17 @@ void bar(vect32 p[][2]); // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARR]], i32 [[TMP0]] // CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !tbaa [[TBAA2]] // CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TMP1]], [[NUM]] -// CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP0]] +// CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP0]] // CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX2]], align 8, !tbaa [[TBAA6:![0-9]+]] // CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [2 x i32], ptr [[ARR]], i32 [[TMP0]], i32 1 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4, !tbaa [[TBAA2]] // CHECK-NEXT: [[MUL6:%.*]] = mul i32 [[TMP2]], [[NUM]] -// CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP0]], i32 1 +// CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds nuw [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP0]], i32 1 // CHECK-NEXT: store i32 [[MUL6]], ptr [[ARRAYIDX8]], align 4, !tbaa [[TBAA6]] // CHECK-NEXT: [[TMP3:%.*]] = lshr i32 [[MUL]], 16 // CHECK-NEXT: store i32 [[TMP3]], ptr [[VEC]], align 4, !tbaa [[TBAA2]] // CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[INDEX]], align 4, !tbaa [[TBAA2]] -// CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP4]], i32 1 +// CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds nuw [4 x [2 x %union.vect32]], ptr [[TMP]], i32 0, i32 [[TMP4]], i32 1 // CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds nuw i8, ptr [[ARRAYIDX14]], i32 2 // CHECK-NEXT: [[TMP5:%.*]] = load i16, ptr [[ARRAYIDX15]], align 2, !tbaa [[TBAA6]] // CHECK-NEXT: [[CONV16:%.*]] = zext i16 [[TMP5]] to i32 diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index ac9a4fdcf304d..8f55e5b3cc28a 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -3119,6 +3119,26 @@ Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) { } } + // The single (non-zero) index of an inbounds GEP of a base object cannot + // be negative. + auto HasOneNonZeroIndex = [&]() { + bool FoundNonZero = false; + for (Value *Idx : GEP.indices()) { + auto *C = dyn_cast(Idx); + if (C && C->isNullValue()) + continue; + if (FoundNonZero) + return false; + FoundNonZero = true; + } + return true; + }; + if (GEP.isInBounds() && !GEP.hasNoUnsignedWrap() && isBaseOfObject(PtrOp) && + HasOneNonZeroIndex()) { + GEP.setNoWrapFlags(GEP.getNoWrapFlags() | GEPNoWrapFlags::noUnsignedWrap()); + return &GEP; + } + // nusw + nneg -> nuw if (GEP.hasNoUnsignedSignedWrap() && !GEP.hasNoUnsignedWrap() && all_of(GEP.indices(), [&](Value *Idx) { diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll b/llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll index c14d61b51ad77..0e5b4dedd4a0d 100644 --- a/llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll +++ b/llvm/test/Transforms/InstCombine/AMDGPU/memcpy-from-constant.ll @@ -53,7 +53,7 @@ define i64 @memcpy_constant_arg_ptr_to_alloca_load_atomic(ptr addrspace(4) noali ; CHECK-LABEL: @memcpy_constant_arg_ptr_to_alloca_load_atomic( ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [32 x i64], align 8, addrspace(5) ; CHECK-NEXT: call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef align 8 dereferenceable(256) [[ALLOCA]], ptr addrspace(4) noundef align 8 dereferenceable(256) [[ARG:%.*]], i64 256, i1 false) -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [32 x i64], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[IDX:%.*]] +; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw [32 x i64], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[IDX:%.*]] ; CHECK-NEXT: [[LOAD:%.*]] = load atomic i64, ptr addrspace(5) [[GEP]] syncscope("somescope") acquire, align 8 ; CHECK-NEXT: ret i64 [[LOAD]] ; @@ -101,7 +101,7 @@ define amdgpu_kernel void @memcpy_constant_byref_arg_ptr_to_alloca_too_many_byte ; CHECK-LABEL: @memcpy_constant_byref_arg_ptr_to_alloca_too_many_bytes( ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [32 x i8], align 4, addrspace(5) ; CHECK-NEXT: call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef align 4 dereferenceable(31) [[ALLOCA]], ptr addrspace(4) noundef align 4 dereferenceable(31) [[ARG:%.*]], i64 31, i1 false) -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [32 x i8], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[IDX:%.*]] +; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw [32 x i8], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[IDX:%.*]] ; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(5) [[GEP]], align 1 ; CHECK-NEXT: store i8 [[LOAD]], ptr addrspace(1) [[OUT:%.*]], align 1 ; CHECK-NEXT: ret void @@ -120,7 +120,7 @@ define amdgpu_kernel void @memcpy_constant_intrinsic_ptr_to_alloca(ptr addrspace ; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [32 x i8], align 4, addrspace(5) ; CHECK-NEXT: [[KERNARG_SEGMENT_PTR:%.*]] = call align 16 dereferenceable(32) ptr addrspace(4) @llvm.amdgcn.kernarg.segment.ptr() ; CHECK-NEXT: call void @llvm.memcpy.p5.p4.i64(ptr addrspace(5) noundef align 4 dereferenceable(32) [[ALLOCA]], ptr addrspace(4) noundef align 16 dereferenceable(32) [[KERNARG_SEGMENT_PTR]], i64 32, i1 false) -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [32 x i8], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[IDX:%.*]] +; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw [32 x i8], ptr addrspace(5) [[ALLOCA]], i32 0, i32 [[IDX:%.*]] ; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(5) [[GEP]], align 1 ; CHECK-NEXT: store i8 [[LOAD]], ptr addrspace(1) [[OUT:%.*]], align 1 ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/InstCombine/cast_phi.ll b/llvm/test/Transforms/InstCombine/cast_phi.ll index aafe5f57c4c72..f289e1459000a 100644 --- a/llvm/test/Transforms/InstCombine/cast_phi.ll +++ b/llvm/test/Transforms/InstCombine/cast_phi.ll @@ -31,8 +31,8 @@ define void @MainKernel(i32 %iNumSteps, i32 %tid, i32 %base) { ; CHECK-NEXT: [[TMP3:%.*]] = icmp ugt i32 [[I12_06]], [[BASE:%.*]] ; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[I12_06]], 1 ; CHECK-NEXT: [[CONV_I9:%.*]] = sext i32 [[ADD]] to i64 -; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds [258 x float], ptr [[CALLA]], i64 0, i64 [[CONV_I9]] -; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds [258 x float], ptr [[CALLB]], i64 0, i64 [[CONV_I9]] +; CHECK-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds nuw [258 x float], ptr [[CALLA]], i64 0, i64 [[CONV_I9]] +; CHECK-NEXT: [[ARRAYIDX24:%.*]] = getelementptr inbounds nuw [258 x float], ptr [[CALLB]], i64 0, i64 [[CONV_I9]] ; CHECK-NEXT: [[CMP40:%.*]] = icmp ult i32 [[I12_06]], [[BASE]] ; CHECK-NEXT: br i1 [[TMP3]], label [[DOTBB4:%.*]], label [[DOTBB5:%.*]] ; CHECK: .bb4: diff --git a/llvm/test/Transforms/InstCombine/load-cmp.ll b/llvm/test/Transforms/InstCombine/load-cmp.ll index 12be81b8f815d..531258935bb82 100644 --- a/llvm/test/Transforms/InstCombine/load-cmp.ll +++ b/llvm/test/Transforms/InstCombine/load-cmp.ll @@ -339,7 +339,7 @@ define i1 @test10_struct_arr_noinbounds_i64(i64 %x) { define i1 @pr93017(i64 %idx) { ; CHECK-LABEL: @pr93017( ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[IDX:%.*]] to i32 -; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds [2 x ptr], ptr @table, i32 0, i32 [[TMP1]] +; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds nuw [2 x ptr], ptr @table, i32 0, i32 [[TMP1]] ; CHECK-NEXT: [[V:%.*]] = load ptr, ptr [[GEP]], align 4 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[V]], null ; CHECK-NEXT: ret i1 [[CMP]] diff --git a/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll b/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll index d6624010acb21..f931b41eb0f71 100644 --- a/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll +++ b/llvm/test/Transforms/InstCombine/memcpy-addrspace.ll @@ -6,7 +6,7 @@ define void @test_load(ptr addrspace(1) %out, i64 %x) { ; CHECK-LABEL: @test_load( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], ptr addrspace(2) @test.data, i64 0, i64 [[X:%.*]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i32], ptr addrspace(2) @test.data, i64 0, i64 [[X:%.*]] ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(2) [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]] ; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4 @@ -45,7 +45,7 @@ entry: define void @test_load_bitcast_chain(ptr addrspace(1) %out, i64 %x) { ; CHECK-LABEL: @test_load_bitcast_chain( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr addrspace(2) @test.data, i64 [[X:%.*]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr addrspace(2) @test.data, i64 [[X:%.*]] ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspace(2) [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]] ; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4 @@ -66,7 +66,7 @@ define void @test_call(ptr addrspace(1) %out, i64 %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4 ; CHECK-NEXT: call void @llvm.memcpy.p0.p2.i64(ptr noundef nonnull align 4 dereferenceable(32) [[DATA]], ptr addrspace(2) noundef align 4 dereferenceable(32) @test.data, i64 32, i1 false) -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], ptr [[DATA]], i64 0, i64 [[X:%.*]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i32], ptr [[DATA]], i64 0, i64 [[X:%.*]] ; CHECK-NEXT: [[TMP0:%.*]] = call i32 @foo(ptr nonnull [[ARRAYIDX]]) ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]] ; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4 @@ -87,8 +87,8 @@ define void @test_call_no_null_opt(ptr addrspace(1) %out, i64 %x) #0 { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4 ; CHECK-NEXT: call void @llvm.memcpy.p0.p2.i64(ptr noundef nonnull align 4 dereferenceable(32) [[DATA]], ptr addrspace(2) noundef align 4 dereferenceable(32) @test.data, i64 32, i1 false) -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], ptr [[DATA]], i64 0, i64 [[X:%.*]] -; CHECK-NEXT: [[TMP0:%.*]] = call i32 @foo(ptr [[ARRAYIDX]]) +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i32], ptr [[DATA]], i64 0, i64 [[X:%.*]] +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @foo(ptr nonnull [[ARRAYIDX]]) ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]] ; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4 ; CHECK-NEXT: ret void @@ -108,7 +108,7 @@ define void @test_load_and_call(ptr addrspace(1) %out, i64 %x, i64 %y) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4 ; CHECK-NEXT: call void @llvm.memcpy.p0.p2.i64(ptr noundef nonnull align 4 dereferenceable(32) [[DATA]], ptr addrspace(2) noundef align 4 dereferenceable(32) @test.data, i64 32, i1 false) -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], ptr [[DATA]], i64 0, i64 [[X:%.*]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i32], ptr [[DATA]], i64 0, i64 [[X:%.*]] ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]] ; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4 @@ -135,11 +135,11 @@ define void @test_load_and_call_no_null_opt(ptr addrspace(1) %out, i64 %x, i64 % ; CHECK-NEXT: entry: ; CHECK-NEXT: [[DATA:%.*]] = alloca [8 x i32], align 4 ; CHECK-NEXT: call void @llvm.memcpy.p0.p2.i64(ptr noundef nonnull align 4 dereferenceable(32) [[DATA]], ptr addrspace(2) noundef align 4 dereferenceable(32) @test.data, i64 32, i1 false) -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [8 x i32], ptr [[DATA]], i64 0, i64 [[X:%.*]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [8 x i32], ptr [[DATA]], i64 0, i64 [[X:%.*]] ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT:%.*]], i64 [[X]] ; CHECK-NEXT: store i32 [[TMP0]], ptr addrspace(1) [[ARRAYIDX1]], align 4 -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @foo(ptr [[ARRAYIDX]]) +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @foo(ptr nonnull [[ARRAYIDX]]) ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr addrspace(1) [[OUT]], i64 [[Y:%.*]] ; CHECK-NEXT: store i32 [[TMP1]], ptr addrspace(1) [[ARRAYIDX2]], align 4 ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll index 34e6c601f494a..845b1ad703596 100644 --- a/llvm/test/Transforms/InstCombine/memcpy-from-global.ll +++ b/llvm/test/Transforms/InstCombine/memcpy-from-global.ll @@ -322,7 +322,7 @@ define float @test11_volatile(i64 %i) { ; CHECK-NEXT: [[A:%.*]] = alloca [4 x float], align 4 ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[A]]) ; CHECK-NEXT: call void @llvm.memcpy.p0.p1.i64(ptr align 4 [[A]], ptr addrspace(1) align 4 @I, i64 16, i1 true) -; CHECK-NEXT: [[G:%.*]] = getelementptr inbounds [4 x float], ptr [[A]], i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[G:%.*]] = getelementptr inbounds nuw [4 x float], ptr [[A]], i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[R:%.*]] = load float, ptr [[G]], align 4 ; CHECK-NEXT: ret float [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/stpcpy-1.ll b/llvm/test/Transforms/InstCombine/stpcpy-1.ll index 2ddacb2097442..88d2ab9bfec42 100644 --- a/llvm/test/Transforms/InstCombine/stpcpy-1.ll +++ b/llvm/test/Transforms/InstCombine/stpcpy-1.ll @@ -25,7 +25,7 @@ define ptr @test_simplify1() { define ptr @test_simplify2() { ; CHECK-LABEL: @test_simplify2( ; CHECK-NEXT: [[STRLEN:%.*]] = call i32 @strlen(ptr noundef nonnull dereferenceable(1) @a) -; CHECK-NEXT: [[RET:%.*]] = getelementptr inbounds i8, ptr @a, i32 [[STRLEN]] +; CHECK-NEXT: [[RET:%.*]] = getelementptr inbounds nuw i8, ptr @a, i32 [[STRLEN]] ; CHECK-NEXT: ret ptr [[RET]] ; %ret = call ptr @stpcpy(ptr @a, ptr @a) diff --git a/llvm/test/Transforms/InstCombine/stpcpy_chk-1.ll b/llvm/test/Transforms/InstCombine/stpcpy_chk-1.ll index 2d775f35c8bda..8a1ce0dfa9f9b 100644 --- a/llvm/test/Transforms/InstCombine/stpcpy_chk-1.ll +++ b/llvm/test/Transforms/InstCombine/stpcpy_chk-1.ll @@ -93,7 +93,7 @@ define ptr @test_simplify5() { define ptr @test_simplify6() { ; CHECK-LABEL: @test_simplify6( ; CHECK-NEXT: [[STRLEN:%.*]] = call i32 @strlen(ptr noundef nonnull dereferenceable(1) @a) -; CHECK-NEXT: [[RET:%.*]] = getelementptr inbounds i8, ptr @a, i32 [[STRLEN]] +; CHECK-NEXT: [[RET:%.*]] = getelementptr inbounds nuw i8, ptr @a, i32 [[STRLEN]] ; CHECK-NEXT: ret ptr [[RET]] ; diff --git a/llvm/test/Transforms/InstCombine/strlen-1.ll b/llvm/test/Transforms/InstCombine/strlen-1.ll index facf4f7d0973f..2d28139bf75a8 100644 --- a/llvm/test/Transforms/InstCombine/strlen-1.ll +++ b/llvm/test/Transforms/InstCombine/strlen-1.ll @@ -155,7 +155,7 @@ define i32 @test_no_simplify1() { define i32 @test_no_simplify2(i32 %x) { ; CHECK-LABEL: @test_no_simplify2( -; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [7 x i8], ptr @null_hello, i32 0, i32 [[X:%.*]] +; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds nuw [7 x i8], ptr @null_hello, i32 0, i32 [[X:%.*]] ; CHECK-NEXT: [[HELLO_L:%.*]] = call i32 @strlen(ptr noundef nonnull dereferenceable(1) [[HELLO_P]]) ; CHECK-NEXT: ret i32 [[HELLO_L]] ; @@ -166,8 +166,8 @@ define i32 @test_no_simplify2(i32 %x) { define i32 @test_no_simplify2_no_null_opt(i32 %x) #0 { ; CHECK-LABEL: @test_no_simplify2_no_null_opt( -; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [7 x i8], ptr @null_hello, i32 0, i32 [[X:%.*]] -; CHECK-NEXT: [[HELLO_L:%.*]] = call i32 @strlen(ptr noundef [[HELLO_P]]) +; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds nuw [7 x i8], ptr @null_hello, i32 0, i32 [[X:%.*]] +; CHECK-NEXT: [[HELLO_L:%.*]] = call i32 @strlen(ptr noundef nonnull dereferenceable(1) [[HELLO_P]]) ; CHECK-NEXT: ret i32 [[HELLO_L]] ; %hello_p = getelementptr inbounds [7 x i8], ptr @null_hello, i32 0, i32 %x diff --git a/llvm/test/Transforms/InstCombine/strlen-4.ll b/llvm/test/Transforms/InstCombine/strlen-4.ll index 58d04e8a0d4be..ca01ce93be883 100644 --- a/llvm/test/Transforms/InstCombine/strlen-4.ll +++ b/llvm/test/Transforms/InstCombine/strlen-4.ll @@ -18,7 +18,7 @@ declare i64 @strlen(ptr) define i64 @fold_strlen_s3_pi_s5(i1 %X, i64 %I) { ; CHECK-LABEL: @fold_strlen_s3_pi_s5( -; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[X:%.*]], ptr [[PS3_PI]], ptr @s5 ; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[SEL]]) ; CHECK-NEXT: ret i64 [[LEN]] @@ -40,7 +40,7 @@ define i64 @fold_strlen_s3_pi_p1_s5(i1 %0, i64 %1) { ; XFAIL-CHECK-NEXT: [[SEL:%.*]] = select i1 %0, i64 [[DIF_I]], i64 5 ; XFAIL-CHECK-NEXT: ret i64 [[SEL]] ; CHECK-LABEL: @fold_strlen_s3_pi_p1_s5( -; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds [4 x i8], ptr @s3, i64 0, i64 [[TMP1:%.*]] +; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @s3, i64 0, i64 [[TMP1:%.*]] ; CHECK-NEXT: [[PS3_PI_P1:%.*]] = getelementptr i8, ptr [[PS3_PI]], i64 1 ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr [[PS3_PI_P1]], ptr @s5 ; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[SEL]]) @@ -61,7 +61,7 @@ define i64 @fold_strlen_s3_pi_p1_s5(i1 %0, i64 %1) { define i64 @call_strlen_s5_3_pi_s5(i1 %0, i64 %1) { ; CHECK-LABEL: @call_strlen_s5_3_pi_s5( -; CHECK-NEXT: [[PS5_3_PI:%.*]] = getelementptr inbounds [10 x i8], ptr @s5_3, i64 0, i64 [[TMP1:%.*]] +; CHECK-NEXT: [[PS5_3_PI:%.*]] = getelementptr inbounds nuw [10 x i8], ptr @s5_3, i64 0, i64 [[TMP1:%.*]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr [[PS5_3_PI]], ptr @s5 ; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[SEL]]) ; CHECK-NEXT: ret i64 [[LEN]] @@ -78,7 +78,7 @@ define i64 @call_strlen_s5_3_pi_s5(i1 %0, i64 %1) { define i64 @call_strlen_s5_3_s5_pj(i1 %X, i64 %J) { ; CHECK-LABEL: @call_strlen_s5_3_s5_pj( -; CHECK-NEXT: [[PS5:%.*]] = getelementptr inbounds [6 x i8], ptr @s5, i64 0, i64 [[J:%.*]] +; CHECK-NEXT: [[PS5:%.*]] = getelementptr inbounds nuw [6 x i8], ptr @s5, i64 0, i64 [[J:%.*]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[X:%.*]], ptr @s5_3, ptr [[PS5]] ; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[SEL]]) ; CHECK-NEXT: ret i64 [[LEN]] @@ -95,7 +95,7 @@ define i64 @call_strlen_s5_3_s5_pj(i1 %X, i64 %J) { define i64 @fold_strlen_s3_s5_pj(i1 %X, i64 %J) { ; CHECK-LABEL: @fold_strlen_s3_s5_pj( -; CHECK-NEXT: [[PS5_PJ:%.*]] = getelementptr inbounds [6 x i8], ptr @s5, i64 0, i64 [[J:%.*]] +; CHECK-NEXT: [[PS5_PJ:%.*]] = getelementptr inbounds nuw [6 x i8], ptr @s5, i64 0, i64 [[J:%.*]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[X:%.*]], ptr @s3, ptr [[PS5_PJ]] ; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[SEL]]) ; CHECK-NEXT: ret i64 [[LEN]] @@ -114,7 +114,7 @@ define i64 @fold_strlen_s3_s5_pj(i1 %X, i64 %J) { define i64 @call_strlen_s3_s5_3_pj(i1 %0, i64 %1) { ; CHECK-LABEL: @call_strlen_s3_s5_3_pj( -; CHECK-NEXT: [[PS5_3_PJ:%.*]] = getelementptr inbounds [10 x i8], ptr @s5_3, i64 0, i64 [[TMP1:%.*]] +; CHECK-NEXT: [[PS5_3_PJ:%.*]] = getelementptr inbounds nuw [10 x i8], ptr @s5_3, i64 0, i64 [[TMP1:%.*]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr @s3, ptr [[PS5_3_PJ]] ; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[SEL]]) ; CHECK-NEXT: ret i64 [[LEN]] @@ -131,8 +131,8 @@ define i64 @call_strlen_s3_s5_3_pj(i1 %0, i64 %1) { define i64 @fold_strlen_s3_pi_s5_pj(i1 %X, i64 %I, i64 %J) { ; CHECK-LABEL: @fold_strlen_s3_pi_s5_pj( -; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]] -; CHECK-NEXT: [[PS5_PJ:%.*]] = getelementptr inbounds [6 x i8], ptr @s5, i64 0, i64 [[J:%.*]] +; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[PS5_PJ:%.*]] = getelementptr inbounds nuw [6 x i8], ptr @s5, i64 0, i64 [[J:%.*]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[X:%.*]], ptr [[PS3_PI]], ptr [[PS5_PJ]] ; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @strlen(ptr noundef nonnull dereferenceable(1) [[SEL]]) ; CHECK-NEXT: ret i64 [[LEN]] diff --git a/llvm/test/Transforms/InstCombine/strncat-2.ll b/llvm/test/Transforms/InstCombine/strncat-2.ll index 7f1199d908d39..bc857c5b31d87 100644 --- a/llvm/test/Transforms/InstCombine/strncat-2.ll +++ b/llvm/test/Transforms/InstCombine/strncat-2.ll @@ -13,7 +13,7 @@ declare ptr @strncat(ptr, ptr, i32) define void @test_simplify1() { ; CHECK-LABEL: @test_simplify1( ; CHECK-NEXT: [[STRLEN:%.*]] = call i32 @strlen(ptr noundef nonnull dereferenceable(1) @a) -; CHECK-NEXT: [[ENDPTR:%.*]] = getelementptr inbounds i8, ptr @a, i32 [[STRLEN]] +; CHECK-NEXT: [[ENDPTR:%.*]] = getelementptr inbounds nuw i8, ptr @a, i32 [[STRLEN]] ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(6) [[ENDPTR]], ptr noundef nonnull align 1 dereferenceable(6) @hello, i32 6, i1 false) ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/InstCombine/strnlen-3.ll b/llvm/test/Transforms/InstCombine/strnlen-3.ll index f988f86fc15b3..3b7e44f8df889 100644 --- a/llvm/test/Transforms/InstCombine/strnlen-3.ll +++ b/llvm/test/Transforms/InstCombine/strnlen-3.ll @@ -31,7 +31,7 @@ define i64 @fold_strnlen_sx_pi_0(i64 %i) { define i64 @call_strnlen_sx_pi_n(i64 %i, i64 %n) { ; CHECK-LABEL: @call_strnlen_sx_pi_n( -; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [0 x i8], ptr @sx, i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds nuw [0 x i8], ptr @sx, i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr nonnull [[PTR]], i64 [[N:%.*]]) ; CHECK-NEXT: ret i64 [[LEN]] ; @@ -46,7 +46,7 @@ define i64 @call_strnlen_sx_pi_n(i64 %i, i64 %n) { define i64 @call_strnlen_a3_pi_2(i64 %i) { ; CHECK-LABEL: @call_strnlen_a3_pi_2( -; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [3 x i8], ptr @a3, i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds nuw [3 x i8], ptr @a3, i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull dereferenceable(1) [[PTR]], i64 2) ; CHECK-NEXT: ret i64 [[LEN]] ; @@ -61,7 +61,7 @@ define i64 @call_strnlen_a3_pi_2(i64 %i) { define i64 @call_strnlen_a3_pi_3(i64 %i) { ; CHECK-LABEL: @call_strnlen_a3_pi_3( -; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [3 x i8], ptr @a3, i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds nuw [3 x i8], ptr @a3, i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull dereferenceable(1) [[PTR]], i64 3) ; CHECK-NEXT: ret i64 [[LEN]] ; @@ -111,7 +111,7 @@ define i64 @fold_strnlen_s5_3_pi_0(i64 zeroext %i) { define i64 @call_strnlen_s5_3_pi_n(i64 zeroext %i, i64 %n) { ; CHECK-LABEL: @call_strnlen_s5_3_pi_n( -; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [10 x i8], ptr @s5_3, i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds nuw [10 x i8], ptr @s5_3, i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr nonnull [[PTR]], i64 [[N:%.*]]) ; CHECK-NEXT: ret i64 [[LEN]] ; @@ -151,7 +151,7 @@ define i64 @fold_strnlen_s3_n(i64 %n) { define i64 @fold_strnlen_a3_pi_2(i64 %i) { ; CHECK-LABEL: @fold_strnlen_a3_pi_2( -; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [3 x i8], ptr @a3, i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds nuw [3 x i8], ptr @a3, i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull dereferenceable(1) [[PTR]], i64 2) ; CHECK-NEXT: ret i64 [[LEN]] ; @@ -166,7 +166,7 @@ define i64 @fold_strnlen_a3_pi_2(i64 %i) { define i64 @fold_strnlen_s3_pi_2(i64 %i) { ; CHECK-LABEL: @fold_strnlen_s3_pi_2( -; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull dereferenceable(1) [[PTR]], i64 2) ; CHECK-NEXT: ret i64 [[LEN]] ; @@ -181,7 +181,7 @@ define i64 @fold_strnlen_s3_pi_2(i64 %i) { define i64 @fold_strnlen_s3_pi_3(i64 %i) { ; CHECK-LABEL: @fold_strnlen_s3_pi_3( -; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull dereferenceable(1) [[PTR]], i64 3) ; CHECK-NEXT: ret i64 [[LEN]] ; @@ -196,7 +196,7 @@ define i64 @fold_strnlen_s3_pi_3(i64 %i) { define i64 @fold_strnlen_s3_pi_n(i64 %i, i64 %n) { ; CHECK-LABEL: @fold_strnlen_s3_pi_n( -; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr nonnull [[PTR]], i64 [[N:%.*]]) ; CHECK-NEXT: ret i64 [[LEN]] ; @@ -212,7 +212,7 @@ define i64 @fold_strnlen_s3_pi_n(i64 %i, i64 %n) { define i64 @call_strnlen_s5_3_pi_2(i64 %i) { ; CHECK-LABEL: @call_strnlen_s5_3_pi_2( -; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [10 x i8], ptr @s5_3, i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds nuw [10 x i8], ptr @s5_3, i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr noundef nonnull dereferenceable(1) [[PTR]], i64 2) ; CHECK-NEXT: ret i64 [[LEN]] ; diff --git a/llvm/test/Transforms/InstCombine/strnlen-4.ll b/llvm/test/Transforms/InstCombine/strnlen-4.ll index 7a222f0993be4..6f73ee47d7b50 100644 --- a/llvm/test/Transforms/InstCombine/strnlen-4.ll +++ b/llvm/test/Transforms/InstCombine/strnlen-4.ll @@ -17,7 +17,7 @@ declare i64 @strnlen(ptr, i64) define i64 @fold_strnlen_s3_pi_s5_n(i1 %C, i64 %i, i64 %n) { ; CHECK-LABEL: @fold_strnlen_s3_pi_s5_n( -; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], ptr [[PTR]], ptr @s5 ; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr nonnull [[SEL]], i64 [[N:%.*]]) ; CHECK-NEXT: ret i64 [[LEN]] @@ -57,7 +57,7 @@ define i64 @call_strnlen_s3_pi_xbounds_s5_n(i1 %C, i64 %i, i64 %n) { define i64 @call_strnlen_s3_pi_sx_n(i1 %C, i64 %i, i64 %n) { ; CHECK-LABEL: @call_strnlen_s3_pi_sx_n( -; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds nuw [4 x i8], ptr @s3, i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], ptr [[PTR]], ptr @sx ; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr nonnull [[SEL]], i64 [[N:%.*]]) ; CHECK-NEXT: ret i64 [[LEN]] diff --git a/llvm/test/Transforms/InstCombine/strnlen-5.ll b/llvm/test/Transforms/InstCombine/strnlen-5.ll index 3866b92ab8b4a..3eb3764955fa5 100644 --- a/llvm/test/Transforms/InstCombine/strnlen-5.ll +++ b/llvm/test/Transforms/InstCombine/strnlen-5.ll @@ -164,7 +164,7 @@ define i1 @fold_strnlen_ax_nz_gtz(i64 %n) { define i1 @fold_strnlen_a5_pi_nz_eqz(i64 %i, i64 %n) { ; CHECK-LABEL: @fold_strnlen_a5_pi_nz_eqz( -; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [5 x i8], ptr @a5, i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds nuw [5 x i8], ptr @a5, i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[CHAR0:%.*]] = load i8, ptr [[PTR]], align 1 ; CHECK-NEXT: [[EQZ:%.*]] = icmp eq i8 [[CHAR0]], 0 ; CHECK-NEXT: ret i1 [[EQZ]] @@ -200,7 +200,7 @@ define i1 @fold_strnlen_s5_pi_nz_eqz(i64 %i, i64 %n) { define i1 @call_strnlen_s5_pi_n_eqz(i64 %i, i64 %n) { ; CHECK-LABEL: @call_strnlen_s5_pi_n_eqz( -; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds [6 x i8], ptr @s5, i64 0, i64 [[I:%.*]] +; CHECK-NEXT: [[PTR:%.*]] = getelementptr inbounds nuw [6 x i8], ptr @s5, i64 0, i64 [[I:%.*]] ; CHECK-NEXT: [[LEN:%.*]] = call i64 @strnlen(ptr nonnull [[PTR]], i64 [[N:%.*]]) ; CHECK-NEXT: [[EQZ:%.*]] = icmp eq i64 [[LEN]], 0 ; CHECK-NEXT: ret i1 [[EQZ]] diff --git a/llvm/test/Transforms/InstCombine/sub-gep.ll b/llvm/test/Transforms/InstCombine/sub-gep.ll index 3f8728d3a4381..39d502b23ee31 100644 --- a/llvm/test/Transforms/InstCombine/sub-gep.ll +++ b/llvm/test/Transforms/InstCombine/sub-gep.ll @@ -305,7 +305,7 @@ define i16 @test24a_as1(ptr addrspace(1) %P, i16 %A) { define i64 @test24b(ptr %P, i64 %A){ ; CHECK-LABEL: @test24b( -; CHECK-NEXT: [[B_IDX:%.*]] = shl nsw i64 [[A:%.*]], 1 +; CHECK-NEXT: [[B_IDX:%.*]] = shl nuw nsw i64 [[A:%.*]], 1 ; CHECK-NEXT: ret i64 [[B_IDX]] ; %B = getelementptr inbounds [42 x i16], ptr @Arr, i64 0, i64 %A @@ -316,7 +316,7 @@ define i64 @test24b(ptr %P, i64 %A){ define i64 @test25(ptr %P, i64 %A){ ; CHECK-LABEL: @test25( -; CHECK-NEXT: [[B_IDX:%.*]] = shl nsw i64 [[A:%.*]], 1 +; CHECK-NEXT: [[B_IDX:%.*]] = shl nuw nsw i64 [[A:%.*]], 1 ; CHECK-NEXT: [[GEPDIFF:%.*]] = add nsw i64 [[B_IDX]], -84 ; CHECK-NEXT: ret i64 [[GEPDIFF]] ; @@ -395,7 +395,7 @@ define i64 @negative_ptrtoint_sub_zext_ptrtoint(ptr %p, i32 %offset) { define i16 @test25_as1(ptr addrspace(1) %P, i64 %A) { ; CHECK-LABEL: @test25_as1( ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i16 -; CHECK-NEXT: [[B_IDX:%.*]] = shl nsw i16 [[TMP1]], 1 +; CHECK-NEXT: [[B_IDX:%.*]] = shl nuw nsw i16 [[TMP1]], 1 ; CHECK-NEXT: [[GEPDIFF:%.*]] = add nsw i16 [[B_IDX]], -84 ; CHECK-NEXT: ret i16 [[GEPDIFF]] ; @@ -409,7 +409,7 @@ define i16 @test25_as1(ptr addrspace(1) %P, i64 %A) { define i64 @ptrtoint_sub_zext_ptrtoint_as2_inbounds(i32 %offset) { ; CHECK-LABEL: @ptrtoint_sub_zext_ptrtoint_as2_inbounds( -; CHECK-NEXT: [[A:%.*]] = getelementptr inbounds bfloat, ptr addrspace(2) @Arr_as2, i32 [[OFFSET:%.*]] +; CHECK-NEXT: [[A:%.*]] = getelementptr inbounds nuw bfloat, ptr addrspace(2) @Arr_as2, i32 [[OFFSET:%.*]] ; CHECK-NEXT: [[B:%.*]] = ptrtoint ptr addrspace(2) [[A]] to i32 ; CHECK-NEXT: [[C:%.*]] = zext i32 [[B]] to i64 ; CHECK-NEXT: [[D:%.*]] = sub nsw i64 ptrtoint (ptr addrspace(2) @Arr_as2 to i64), [[C]] diff --git a/llvm/test/Transforms/InstCombine/wcslen-1.ll b/llvm/test/Transforms/InstCombine/wcslen-1.ll index 68ab3470f6768..e2873a9a48593 100644 --- a/llvm/test/Transforms/InstCombine/wcslen-1.ll +++ b/llvm/test/Transforms/InstCombine/wcslen-1.ll @@ -149,7 +149,7 @@ define i64 @test_no_simplify1() { define i64 @test_no_simplify2(i32 %x) { ; CHECK-LABEL: @test_no_simplify2( ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[X:%.*]] to i64 -; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [7 x i32], ptr @null_hello, i64 0, i64 [[TMP1]] +; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds nuw [7 x i32], ptr @null_hello, i64 0, i64 [[TMP1]] ; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(ptr nonnull [[HELLO_P]]) ; CHECK-NEXT: ret i64 [[HELLO_L]] ; @@ -161,8 +161,8 @@ define i64 @test_no_simplify2(i32 %x) { define i64 @test_no_simplify2_no_null_opt(i32 %x) #0 { ; CHECK-LABEL: @test_no_simplify2_no_null_opt( ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[X:%.*]] to i64 -; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [7 x i32], ptr @null_hello, i64 0, i64 [[TMP1]] -; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(ptr [[HELLO_P]]) +; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds nuw [7 x i32], ptr @null_hello, i64 0, i64 [[TMP1]] +; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(ptr nonnull [[HELLO_P]]) ; CHECK-NEXT: ret i64 [[HELLO_L]] ; %hello_p = getelementptr inbounds [7 x i32], ptr @null_hello, i32 0, i32 %x diff --git a/llvm/test/Transforms/InstCombine/wcslen-3.ll b/llvm/test/Transforms/InstCombine/wcslen-3.ll index 39516de0a0800..65b5adadb805d 100644 --- a/llvm/test/Transforms/InstCombine/wcslen-3.ll +++ b/llvm/test/Transforms/InstCombine/wcslen-3.ll @@ -150,7 +150,7 @@ define i64 @test_no_simplify1() { define i64 @test_no_simplify2(i16 %x) { ; CHECK-LABEL: @test_no_simplify2( ; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i64 -; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [7 x i16], ptr @null_hello, i64 0, i64 [[TMP1]] +; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds nuw [7 x i16], ptr @null_hello, i64 0, i64 [[TMP1]] ; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(ptr nonnull [[HELLO_P]]) ; CHECK-NEXT: ret i64 [[HELLO_L]] ; diff --git a/llvm/test/Transforms/InstCombine/wcslen-5.ll b/llvm/test/Transforms/InstCombine/wcslen-5.ll index 33c6075a602a3..8cb37be89b4fd 100644 --- a/llvm/test/Transforms/InstCombine/wcslen-5.ll +++ b/llvm/test/Transforms/InstCombine/wcslen-5.ll @@ -19,7 +19,7 @@ declare i64 @wcslen(ptr) define dso_local i64 @fold_wcslen_s3_pi_s5(i1 zeroext %0, i64 %1) { ; CHECK-LABEL: @fold_wcslen_s3_pi_s5( -; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds [4 x i32], ptr @ws3, i64 0, i64 [[TMP1:%.*]] +; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds nuw [4 x i32], ptr @ws3, i64 0, i64 [[TMP1:%.*]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr [[PS3_PI]], ptr @ws5 ; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull [[SEL]]) ; CHECK-NEXT: ret i64 [[LEN]] @@ -41,7 +41,7 @@ define dso_local i64 @fold_wcslen_s3_pi_p1_s5(i1 zeroext %0, i64 %1) { ; XFAIL-CHECK-NEXT: [[SEL:%.*]] = select i1 %0, i64 [[DIF_I]], i64 5 ; XFAIL-CHECK-NEXT: ret i64 [[SEL]] ; CHECK-LABEL: @fold_wcslen_s3_pi_p1_s5( -; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds [4 x i32], ptr @ws3, i64 0, i64 [[TMP1:%.*]] +; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds nuw [4 x i32], ptr @ws3, i64 0, i64 [[TMP1:%.*]] ; CHECK-NEXT: [[PS3_PI_P1:%.*]] = getelementptr inbounds nuw i8, ptr [[PS3_PI]], i64 4 ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr [[PS3_PI_P1]], ptr @ws5 ; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull [[SEL]]) @@ -62,7 +62,7 @@ define dso_local i64 @fold_wcslen_s3_pi_p1_s5(i1 zeroext %0, i64 %1) { define dso_local i64 @call_wcslen_s5_3_pi_s5(i1 zeroext %0, i64 %1) { ; CHECK-LABEL: @call_wcslen_s5_3_pi_s5( -; CHECK-NEXT: [[PS5_3_PI:%.*]] = getelementptr inbounds [10 x i32], ptr @ws5_3, i64 0, i64 [[TMP1:%.*]] +; CHECK-NEXT: [[PS5_3_PI:%.*]] = getelementptr inbounds nuw [10 x i32], ptr @ws5_3, i64 0, i64 [[TMP1:%.*]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr [[PS5_3_PI]], ptr @ws5 ; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull [[SEL]]) ; CHECK-NEXT: ret i64 [[LEN]] @@ -79,7 +79,7 @@ define dso_local i64 @call_wcslen_s5_3_pi_s5(i1 zeroext %0, i64 %1) { define dso_local i64 @call_wcslen_s5_3_s5_pj(i1 zeroext %0, i64 %1) { ; CHECK-LABEL: @call_wcslen_s5_3_s5_pj( -; CHECK-NEXT: [[PS5:%.*]] = getelementptr inbounds [6 x i32], ptr @ws5, i64 0, i64 [[TMP1:%.*]] +; CHECK-NEXT: [[PS5:%.*]] = getelementptr inbounds nuw [6 x i32], ptr @ws5, i64 0, i64 [[TMP1:%.*]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr @ws5_3, ptr [[PS5]] ; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull [[SEL]]) ; CHECK-NEXT: ret i64 [[LEN]] @@ -96,7 +96,7 @@ define dso_local i64 @call_wcslen_s5_3_s5_pj(i1 zeroext %0, i64 %1) { define dso_local i64 @fold_wcslen_s3_s5_pj(i1 zeroext %0, i64 %1) { ; CHECK-LABEL: @fold_wcslen_s3_s5_pj( -; CHECK-NEXT: [[PS5_PJ:%.*]] = getelementptr inbounds [6 x i32], ptr @ws5, i64 0, i64 [[TMP1:%.*]] +; CHECK-NEXT: [[PS5_PJ:%.*]] = getelementptr inbounds nuw [6 x i32], ptr @ws5, i64 0, i64 [[TMP1:%.*]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr @ws3, ptr [[PS5_PJ]] ; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull [[SEL]]) ; CHECK-NEXT: ret i64 [[LEN]] @@ -115,7 +115,7 @@ define dso_local i64 @fold_wcslen_s3_s5_pj(i1 zeroext %0, i64 %1) { define dso_local i64 @call_wcslen_s3_s5_3_pj(i1 zeroext %0, i64 %1) { ; CHECK-LABEL: @call_wcslen_s3_s5_3_pj( -; CHECK-NEXT: [[PS5_3_PJ:%.*]] = getelementptr inbounds [10 x i32], ptr @ws5_3, i64 0, i64 [[TMP1:%.*]] +; CHECK-NEXT: [[PS5_3_PJ:%.*]] = getelementptr inbounds nuw [10 x i32], ptr @ws5_3, i64 0, i64 [[TMP1:%.*]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr @ws3, ptr [[PS5_3_PJ]] ; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull [[SEL]]) ; CHECK-NEXT: ret i64 [[LEN]] @@ -132,8 +132,8 @@ define dso_local i64 @call_wcslen_s3_s5_3_pj(i1 zeroext %0, i64 %1) { define dso_local i64 @fold_wcslen_s3_pi_s5_pj(i1 zeroext %0, i64 %1, i64 %2) { ; CHECK-LABEL: @fold_wcslen_s3_pi_s5_pj( -; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds [4 x i32], ptr @ws3, i64 0, i64 [[TMP1:%.*]] -; CHECK-NEXT: [[PS5_PJ:%.*]] = getelementptr inbounds [6 x i32], ptr @ws5, i64 0, i64 [[TMP2:%.*]] +; CHECK-NEXT: [[PS3_PI:%.*]] = getelementptr inbounds nuw [4 x i32], ptr @ws3, i64 0, i64 [[TMP1:%.*]] +; CHECK-NEXT: [[PS5_PJ:%.*]] = getelementptr inbounds nuw [6 x i32], ptr @ws5, i64 0, i64 [[TMP2:%.*]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[TMP0:%.*]], ptr [[PS3_PI]], ptr [[PS5_PJ]] ; CHECK-NEXT: [[LEN:%.*]] = tail call i64 @wcslen(ptr nonnull [[SEL]]) ; CHECK-NEXT: ret i64 [[LEN]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll index 1e310ca5fcaed..28c9c5398a7a0 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll @@ -36,14 +36,14 @@ define void @test_array_load2_store2(i32 %C, i32 %D) #1 { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1024 x i32], ptr @AB, i64 0, i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [1024 x i32], ptr @AB, i64 0, i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load , ptr [[TMP2]], align 4 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { , } @llvm.vector.deinterleave2.nxv8i32( [[WIDE_VEC]]) ; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { , } [[STRIDED_VEC]], 0 ; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , } [[STRIDED_VEC]], 1 ; CHECK-NEXT: [[TMP6:%.*]] = add nsw [[TMP3]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP7:%.*]] = mul nsw [[TMP4]], [[BROADCAST_SPLAT2]] -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1024 x i32], ptr @CD, i64 0, i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [1024 x i32], ptr @CD, i64 0, i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call @llvm.vector.interleave2.nxv8i32( [[TMP6]], [[TMP7]]) ; CHECK-NEXT: store [[INTERLEAVED_VEC]], ptr [[TMP8]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP1]] @@ -127,7 +127,7 @@ define void @test_array_load2_i16_store2(i32 %C, i32 %D) #1 { ; CHECK-NEXT: [[WIDE_MASKED_GATHER1:%.*]] = call @llvm.masked.gather.nxv4i16.nxv4p0( [[TMP8]], i32 2, splat (i1 true), poison) ; CHECK-NEXT: [[TMP9:%.*]] = sext [[WIDE_MASKED_GATHER]] to ; CHECK-NEXT: [[TMP10:%.*]] = add nsw [[BROADCAST_SPLAT]], [[TMP9]] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1024 x i32], ptr @CD, i64 0, i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [1024 x i32], ptr @CD, i64 0, i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[TMP11:%.*]] = sext [[WIDE_MASKED_GATHER1]] to ; CHECK-NEXT: [[TMP12:%.*]] = mul nsw [[BROADCAST_SPLAT3]], [[TMP11]] ; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = call @llvm.vector.interleave2.nxv8i32( [[TMP10]], [[TMP12]]) @@ -209,7 +209,7 @@ define void @test_array_load2_store2_i16(i32 noundef %C, i32 noundef %D) #1 { ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[TMP3]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1024 x i32], ptr @AB, i64 0, i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [1024 x i32], ptr @AB, i64 0, i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load , ptr [[TMP6]], align 4 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { , } @llvm.vector.deinterleave2.nxv8i32( [[WIDE_VEC]]) ; CHECK-NEXT: [[TMP7:%.*]] = extractvalue { , } [[STRIDED_VEC]], 0 diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll index 3b00312959d8a..728d7d1995e08 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll @@ -557,7 +557,7 @@ define void @histogram_array_3op_gep(i64 noundef %N) #0 { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1048576 x i32], ptr @idx_array, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [1048576 x i32], ptr @idx_array, i64 0, i64 [[INDEX]] ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP5]], align 4 ; CHECK-NEXT: [[TMP14:%.*]] = sext [[WIDE_LOAD1]] to ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1048576 x i32], ptr @data_array, i64 0, [[TMP14]] @@ -573,10 +573,10 @@ define void @histogram_array_3op_gep(i64 noundef %N) #0 { ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ] -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1048576 x i32], ptr @idx_array, i64 0, i64 [[IV]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [1048576 x i32], ptr @idx_array, i64 0, i64 [[IV]] ; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP9]] to i64 -; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [1048576 x i32], ptr @data_array, i64 0, i64 [[IDXPROM5]] +; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw [1048576 x i32], ptr @data_array, i64 0, i64 [[IDXPROM5]] ; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4 ; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP10]], 1 ; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX6]], align 4 diff --git a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll index 55ff26c55b512..4e98a4fc78805 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/small-size.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/small-size.ll @@ -28,12 +28,12 @@ define void @example1() optsize { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @b, i64 0, i64 [[INDEX]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2048 x i32], ptr @c, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @c, i64 0, i64 [[INDEX]] ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 ; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2048 x i32], ptr @a, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @a, i64 0, i64 [[INDEX]] ; CHECK-NEXT: store <4 x i32> [[TMP3]], ptr [[TMP4]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256 @@ -89,7 +89,7 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP3]], i64 0 ; CHECK-NEXT: br i1 [[TMP4]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] ; CHECK: pred.store.if: -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @b, i64 0, i64 [[INDEX]] ; CHECK-NEXT: store i32 [[X:%.*]], ptr [[TMP5]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]] ; CHECK: pred.store.continue: @@ -97,7 +97,7 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2:%.*]] ; CHECK: pred.store.if1: ; CHECK-NEXT: [[TMP7:%.*]] = or disjoint i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[TMP7]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @b, i64 0, i64 [[TMP7]] ; CHECK-NEXT: store i32 [[X]], ptr [[TMP8]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE2]] ; CHECK: pred.store.continue2: @@ -105,7 +105,7 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: br i1 [[TMP9]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]] ; CHECK: pred.store.if3: ; CHECK-NEXT: [[TMP10:%.*]] = or disjoint i64 [[INDEX]], 2 -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[TMP10]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @b, i64 0, i64 [[TMP10]] ; CHECK-NEXT: store i32 [[X]], ptr [[TMP11]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE4]] ; CHECK: pred.store.continue4: @@ -113,7 +113,7 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6]] ; CHECK: pred.store.if5: ; CHECK-NEXT: [[TMP13:%.*]] = or disjoint i64 [[INDEX]], 3 -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[TMP13]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @b, i64 0, i64 [[TMP13]] ; CHECK-NEXT: store i32 [[X]], ptr [[TMP14]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]] ; CHECK: pred.store.continue6: @@ -152,11 +152,11 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP18]], i64 0 ; CHECK-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF21:%.*]], label [[PRED_STORE_CONTINUE22:%.*]] ; CHECK: pred.store.if21: -; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @b, i64 0, i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4 -; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2048 x i32], ptr @c, i64 0, i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @c, i64 0, i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 -; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2048 x i32], ptr @a, i64 0, i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @a, i64 0, i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[TMP25:%.*]] = and i32 [[TMP23]], [[TMP21]] ; CHECK-NEXT: store i32 [[TMP25]], ptr [[TMP24]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE22]] @@ -165,11 +165,11 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: br i1 [[TMP26]], label [[PRED_STORE_IF23:%.*]], label [[PRED_STORE_CONTINUE24:%.*]] ; CHECK: pred.store.if23: ; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[OFFSET_IDX]], 1 -; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[TMP27]] +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @b, i64 0, i64 [[TMP27]] ; CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4 -; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2048 x i32], ptr @c, i64 0, i64 [[TMP27]] +; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @c, i64 0, i64 [[TMP27]] ; CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP30]], align 4 -; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2048 x i32], ptr @a, i64 0, i64 [[TMP27]] +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @a, i64 0, i64 [[TMP27]] ; CHECK-NEXT: [[TMP33:%.*]] = and i32 [[TMP31]], [[TMP29]] ; CHECK-NEXT: store i32 [[TMP33]], ptr [[TMP32]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE24]] @@ -178,11 +178,11 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: br i1 [[TMP34]], label [[PRED_STORE_IF25:%.*]], label [[PRED_STORE_CONTINUE26:%.*]] ; CHECK: pred.store.if25: ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[OFFSET_IDX]], 2 -; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[TMP35]] +; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @b, i64 0, i64 [[TMP35]] ; CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP36]], align 4 -; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds [2048 x i32], ptr @c, i64 0, i64 [[TMP35]] +; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @c, i64 0, i64 [[TMP35]] ; CHECK-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP38]], align 4 -; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds [2048 x i32], ptr @a, i64 0, i64 [[TMP35]] +; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @a, i64 0, i64 [[TMP35]] ; CHECK-NEXT: [[TMP41:%.*]] = and i32 [[TMP39]], [[TMP37]] ; CHECK-NEXT: store i32 [[TMP41]], ptr [[TMP40]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE26]] @@ -191,11 +191,11 @@ define void @example2(i32 %n, i32 %x) optsize { ; CHECK-NEXT: br i1 [[TMP42]], label [[PRED_STORE_IF27:%.*]], label [[PRED_STORE_CONTINUE28]] ; CHECK: pred.store.if27: ; CHECK-NEXT: [[TMP43:%.*]] = add i64 [[OFFSET_IDX]], 3 -; CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[TMP43]] +; CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @b, i64 0, i64 [[TMP43]] ; CHECK-NEXT: [[TMP45:%.*]] = load i32, ptr [[TMP44]], align 4 -; CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2048 x i32], ptr @c, i64 0, i64 [[TMP43]] +; CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @c, i64 0, i64 [[TMP43]] ; CHECK-NEXT: [[TMP47:%.*]] = load i32, ptr [[TMP46]], align 4 -; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2048 x i32], ptr @a, i64 0, i64 [[TMP43]] +; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @a, i64 0, i64 [[TMP43]] ; CHECK-NEXT: [[TMP49:%.*]] = and i32 [[TMP47]], [[TMP45]] ; CHECK-NEXT: store i32 [[TMP49]], ptr [[TMP48]], align 4 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE28]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/x86_fp80-vector-store.ll b/llvm/test/Transforms/LoopVectorize/X86/x86_fp80-vector-store.ll index 921cf4246f725..050a866d8ca33 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/x86_fp80-vector-store.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/x86_fp80-vector-store.ll @@ -14,8 +14,8 @@ define void @example() nounwind ssp uwtable { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = or disjoint i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1024 x x86_fp80], ptr @x, i64 0, i64 [[INDEX]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1024 x x86_fp80], ptr @x, i64 0, i64 [[TMP0]] +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw [1024 x x86_fp80], ptr @x, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [1024 x x86_fp80], ptr @x, i64 0, i64 [[TMP0]] ; CHECK-NEXT: store x86_fp80 0xK3FFF8000000000000000, ptr [[TMP1]], align 16 ; CHECK-NEXT: store x86_fp80 0xK3FFF8000000000000000, ptr [[TMP2]], align 16 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll index 16ba3b26e68ae..e3f9126f22607 100644 --- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll @@ -34,13 +34,13 @@ define void @test_array_load2_store2(i32 %C, i32 %D) { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1 -; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1024 x i32], ptr @AB, i64 0, i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [1024 x i32], ptr @AB, i64 0, i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[TMP0]], align 4 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> ; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] ; CHECK-NEXT: [[TMP3:%.*]] = mul nsw <4 x i32> [[STRIDED_VEC1]], [[BROADCAST_SPLAT3]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1024 x i32], ptr @CD, i64 0, i64 [[OFFSET_IDX]] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw [1024 x i32], ptr @CD, i64 0, i64 [[OFFSET_IDX]] ; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <8 x i32> ; CHECK-NEXT: store <8 x i32> [[INTERLEAVED_VEC]], ptr [[TMP4]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 @@ -113,7 +113,7 @@ define void @test_struct_array_load3_store3() { ; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> ; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <12 x i32> [[WIDE_VEC]], <12 x i32> poison, <4 x i32> ; CHECK-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[STRIDED_VEC]], splat (i32 1) -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1024 x %struct.ST3], ptr @S, i64 0, i64 [[INDEX]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [1024 x %struct.ST3], ptr @S, i64 0, i64 [[INDEX]], i32 0 ; CHECK-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[STRIDED_VEC2]], splat (i32 2) ; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[STRIDED_VEC3]], splat (i32 3) ; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> [[TMP1]], <8 x i32> diff --git a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll index ca0edb3e1a46d..bc916372f8e01 100644 --- a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll +++ b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll @@ -24,10 +24,10 @@ define i32 @main() #0 { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [40000 x i8], ptr addrspace(1) @Y, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [40000 x i8], ptr addrspace(1) @Y, i64 0, i64 [[INDEX]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr addrspace(1) [[TMP0]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = add <4 x i8> [[WIDE_LOAD]], splat (i8 1) -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [40000 x i8], ptr @X, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [40000 x i8], ptr @X, i64 0, i64 [[INDEX]] ; CHECK-NEXT: store <4 x i8> [[TMP1]], ptr [[TMP2]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 40000 diff --git a/llvm/test/Transforms/LoopVectorize/non-const-n.ll b/llvm/test/Transforms/LoopVectorize/non-const-n.ll index 295bf9111329c..60077710b01b3 100644 --- a/llvm/test/Transforms/LoopVectorize/non-const-n.ll +++ b/llvm/test/Transforms/LoopVectorize/non-const-n.ll @@ -19,12 +19,12 @@ define void @example1(i32 %n) nounwind uwtable ssp { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2048 x i32], ptr @b, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @b, i64 0, i64 [[INDEX]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [2048 x i32], ptr @c, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @c, i64 0, i64 [[INDEX]] ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[WIDE_LOAD]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2048 x i32], ptr @a, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw [2048 x i32], ptr @a, i64 0, i64 [[INDEX]] ; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP5]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX]], [[TMP1]] diff --git a/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll b/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll index ad869268f7e65..57a3d8175ba51 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll @@ -20,118 +20,118 @@ define void @test_known_trip_count() { ; CHECK-NEXT: [[TMP1:%.*]] = fadd <2 x double> [[WIDE_LOAD3]], [[WIDE_LOAD5]] ; CHECK-NEXT: store <2 x double> [[TMP0]], ptr @a, align 16 ; CHECK-NEXT: store <2 x double> [[TMP1]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 16), align 16 -; CHECK-NEXT: [[WIDE_LOAD_1:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 32), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_1:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 48), align 16 -; CHECK-NEXT: [[WIDE_LOAD4_1:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 32), align 16 -; CHECK-NEXT: [[WIDE_LOAD5_1:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 48), align 16 +; CHECK-NEXT: [[WIDE_LOAD_1:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 32), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_1:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 48), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_1:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 32), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_1:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 48), align 16 ; CHECK-NEXT: [[TMP2:%.*]] = fadd <2 x double> [[WIDE_LOAD_1]], [[WIDE_LOAD4_1]] ; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[WIDE_LOAD3_1]], [[WIDE_LOAD5_1]] -; CHECK-NEXT: store <2 x double> [[TMP2]], ptr getelementptr inbounds (i8, ptr @a, i64 32), align 16 -; CHECK-NEXT: store <2 x double> [[TMP3]], ptr getelementptr inbounds (i8, ptr @a, i64 48), align 16 -; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 64), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_2:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 80), align 16 -; CHECK-NEXT: [[WIDE_LOAD4_2:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 64), align 16 -; CHECK-NEXT: [[WIDE_LOAD5_2:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 80), align 16 +; CHECK-NEXT: store <2 x double> [[TMP2]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 32), align 16 +; CHECK-NEXT: store <2 x double> [[TMP3]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 48), align 16 +; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 64), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_2:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 80), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_2:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 64), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_2:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 80), align 16 ; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[WIDE_LOAD_2]], [[WIDE_LOAD4_2]] ; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[WIDE_LOAD3_2]], [[WIDE_LOAD5_2]] -; CHECK-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds (i8, ptr @a, i64 64), align 16 -; CHECK-NEXT: store <2 x double> [[TMP5]], ptr getelementptr inbounds (i8, ptr @a, i64 80), align 16 -; CHECK-NEXT: [[WIDE_LOAD_3:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 96), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_3:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 112), align 16 -; CHECK-NEXT: [[WIDE_LOAD4_3:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 96), align 16 -; CHECK-NEXT: [[WIDE_LOAD5_3:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 112), align 16 +; CHECK-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 64), align 16 +; CHECK-NEXT: store <2 x double> [[TMP5]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 80), align 16 +; CHECK-NEXT: [[WIDE_LOAD_3:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 96), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_3:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 112), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_3:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 96), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_3:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 112), align 16 ; CHECK-NEXT: [[TMP6:%.*]] = fadd <2 x double> [[WIDE_LOAD_3]], [[WIDE_LOAD4_3]] ; CHECK-NEXT: [[TMP7:%.*]] = fadd <2 x double> [[WIDE_LOAD3_3]], [[WIDE_LOAD5_3]] -; CHECK-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds (i8, ptr @a, i64 96), align 16 -; CHECK-NEXT: store <2 x double> [[TMP7]], ptr getelementptr inbounds (i8, ptr @a, i64 112), align 16 -; CHECK-NEXT: [[WIDE_LOAD_4:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 128), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_4:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 144), align 16 -; CHECK-NEXT: [[WIDE_LOAD4_4:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 128), align 16 -; CHECK-NEXT: [[WIDE_LOAD5_4:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 144), align 16 +; CHECK-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 96), align 16 +; CHECK-NEXT: store <2 x double> [[TMP7]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 112), align 16 +; CHECK-NEXT: [[WIDE_LOAD_4:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 128), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_4:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 144), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_4:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 128), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_4:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 144), align 16 ; CHECK-NEXT: [[TMP8:%.*]] = fadd <2 x double> [[WIDE_LOAD_4]], [[WIDE_LOAD4_4]] ; CHECK-NEXT: [[TMP9:%.*]] = fadd <2 x double> [[WIDE_LOAD3_4]], [[WIDE_LOAD5_4]] -; CHECK-NEXT: store <2 x double> [[TMP8]], ptr getelementptr inbounds (i8, ptr @a, i64 128), align 16 -; CHECK-NEXT: store <2 x double> [[TMP9]], ptr getelementptr inbounds (i8, ptr @a, i64 144), align 16 -; CHECK-NEXT: [[WIDE_LOAD_5:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 160), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_5:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 176), align 16 -; CHECK-NEXT: [[WIDE_LOAD4_5:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 160), align 16 -; CHECK-NEXT: [[WIDE_LOAD5_5:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 176), align 16 +; CHECK-NEXT: store <2 x double> [[TMP8]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 128), align 16 +; CHECK-NEXT: store <2 x double> [[TMP9]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 144), align 16 +; CHECK-NEXT: [[WIDE_LOAD_5:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 160), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_5:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 176), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_5:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 160), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_5:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 176), align 16 ; CHECK-NEXT: [[TMP10:%.*]] = fadd <2 x double> [[WIDE_LOAD_5]], [[WIDE_LOAD4_5]] ; CHECK-NEXT: [[TMP11:%.*]] = fadd <2 x double> [[WIDE_LOAD3_5]], [[WIDE_LOAD5_5]] -; CHECK-NEXT: store <2 x double> [[TMP10]], ptr getelementptr inbounds (i8, ptr @a, i64 160), align 16 -; CHECK-NEXT: store <2 x double> [[TMP11]], ptr getelementptr inbounds (i8, ptr @a, i64 176), align 16 -; CHECK-NEXT: [[WIDE_LOAD_6:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 192), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_6:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 208), align 16 -; CHECK-NEXT: [[WIDE_LOAD4_6:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 192), align 16 -; CHECK-NEXT: [[WIDE_LOAD5_6:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 208), align 16 +; CHECK-NEXT: store <2 x double> [[TMP10]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 160), align 16 +; CHECK-NEXT: store <2 x double> [[TMP11]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 176), align 16 +; CHECK-NEXT: [[WIDE_LOAD_6:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 192), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_6:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 208), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_6:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 192), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_6:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 208), align 16 ; CHECK-NEXT: [[TMP12:%.*]] = fadd <2 x double> [[WIDE_LOAD_6]], [[WIDE_LOAD4_6]] ; CHECK-NEXT: [[TMP13:%.*]] = fadd <2 x double> [[WIDE_LOAD3_6]], [[WIDE_LOAD5_6]] -; CHECK-NEXT: store <2 x double> [[TMP12]], ptr getelementptr inbounds (i8, ptr @a, i64 192), align 16 -; CHECK-NEXT: store <2 x double> [[TMP13]], ptr getelementptr inbounds (i8, ptr @a, i64 208), align 16 -; CHECK-NEXT: [[WIDE_LOAD_7:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 224), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_7:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 240), align 16 -; CHECK-NEXT: [[WIDE_LOAD4_7:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 224), align 16 -; CHECK-NEXT: [[WIDE_LOAD5_7:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 240), align 16 +; CHECK-NEXT: store <2 x double> [[TMP12]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 192), align 16 +; CHECK-NEXT: store <2 x double> [[TMP13]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 208), align 16 +; CHECK-NEXT: [[WIDE_LOAD_7:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 224), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_7:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 240), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_7:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 224), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_7:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 240), align 16 ; CHECK-NEXT: [[TMP14:%.*]] = fadd <2 x double> [[WIDE_LOAD_7]], [[WIDE_LOAD4_7]] ; CHECK-NEXT: [[TMP15:%.*]] = fadd <2 x double> [[WIDE_LOAD3_7]], [[WIDE_LOAD5_7]] -; CHECK-NEXT: store <2 x double> [[TMP14]], ptr getelementptr inbounds (i8, ptr @a, i64 224), align 16 -; CHECK-NEXT: store <2 x double> [[TMP15]], ptr getelementptr inbounds (i8, ptr @a, i64 240), align 16 -; CHECK-NEXT: [[WIDE_LOAD_8:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 256), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_8:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 272), align 16 -; CHECK-NEXT: [[WIDE_LOAD4_8:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 256), align 16 -; CHECK-NEXT: [[WIDE_LOAD5_8:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 272), align 16 +; CHECK-NEXT: store <2 x double> [[TMP14]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 224), align 16 +; CHECK-NEXT: store <2 x double> [[TMP15]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 240), align 16 +; CHECK-NEXT: [[WIDE_LOAD_8:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 256), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_8:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 272), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_8:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 256), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_8:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 272), align 16 ; CHECK-NEXT: [[TMP16:%.*]] = fadd <2 x double> [[WIDE_LOAD_8]], [[WIDE_LOAD4_8]] ; CHECK-NEXT: [[TMP17:%.*]] = fadd <2 x double> [[WIDE_LOAD3_8]], [[WIDE_LOAD5_8]] -; CHECK-NEXT: store <2 x double> [[TMP16]], ptr getelementptr inbounds (i8, ptr @a, i64 256), align 16 -; CHECK-NEXT: store <2 x double> [[TMP17]], ptr getelementptr inbounds (i8, ptr @a, i64 272), align 16 -; CHECK-NEXT: [[WIDE_LOAD_9:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 288), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_9:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 304), align 16 -; CHECK-NEXT: [[WIDE_LOAD4_9:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 288), align 16 -; CHECK-NEXT: [[WIDE_LOAD5_9:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 304), align 16 +; CHECK-NEXT: store <2 x double> [[TMP16]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 256), align 16 +; CHECK-NEXT: store <2 x double> [[TMP17]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 272), align 16 +; CHECK-NEXT: [[WIDE_LOAD_9:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 288), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_9:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 304), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_9:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 288), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_9:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 304), align 16 ; CHECK-NEXT: [[TMP18:%.*]] = fadd <2 x double> [[WIDE_LOAD_9]], [[WIDE_LOAD4_9]] ; CHECK-NEXT: [[TMP19:%.*]] = fadd <2 x double> [[WIDE_LOAD3_9]], [[WIDE_LOAD5_9]] -; CHECK-NEXT: store <2 x double> [[TMP18]], ptr getelementptr inbounds (i8, ptr @a, i64 288), align 16 -; CHECK-NEXT: store <2 x double> [[TMP19]], ptr getelementptr inbounds (i8, ptr @a, i64 304), align 16 -; CHECK-NEXT: [[WIDE_LOAD_10:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 320), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_10:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 336), align 16 -; CHECK-NEXT: [[WIDE_LOAD4_10:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 320), align 16 -; CHECK-NEXT: [[WIDE_LOAD5_10:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 336), align 16 +; CHECK-NEXT: store <2 x double> [[TMP18]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 288), align 16 +; CHECK-NEXT: store <2 x double> [[TMP19]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 304), align 16 +; CHECK-NEXT: [[WIDE_LOAD_10:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 320), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_10:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 336), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_10:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 320), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_10:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 336), align 16 ; CHECK-NEXT: [[TMP20:%.*]] = fadd <2 x double> [[WIDE_LOAD_10]], [[WIDE_LOAD4_10]] ; CHECK-NEXT: [[TMP21:%.*]] = fadd <2 x double> [[WIDE_LOAD3_10]], [[WIDE_LOAD5_10]] -; CHECK-NEXT: store <2 x double> [[TMP20]], ptr getelementptr inbounds (i8, ptr @a, i64 320), align 16 -; CHECK-NEXT: store <2 x double> [[TMP21]], ptr getelementptr inbounds (i8, ptr @a, i64 336), align 16 -; CHECK-NEXT: [[WIDE_LOAD_11:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 352), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_11:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 368), align 16 -; CHECK-NEXT: [[WIDE_LOAD4_11:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 352), align 16 -; CHECK-NEXT: [[WIDE_LOAD5_11:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 368), align 16 +; CHECK-NEXT: store <2 x double> [[TMP20]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 320), align 16 +; CHECK-NEXT: store <2 x double> [[TMP21]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 336), align 16 +; CHECK-NEXT: [[WIDE_LOAD_11:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 352), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_11:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 368), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_11:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 352), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_11:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 368), align 16 ; CHECK-NEXT: [[TMP22:%.*]] = fadd <2 x double> [[WIDE_LOAD_11]], [[WIDE_LOAD4_11]] ; CHECK-NEXT: [[TMP23:%.*]] = fadd <2 x double> [[WIDE_LOAD3_11]], [[WIDE_LOAD5_11]] -; CHECK-NEXT: store <2 x double> [[TMP22]], ptr getelementptr inbounds (i8, ptr @a, i64 352), align 16 -; CHECK-NEXT: store <2 x double> [[TMP23]], ptr getelementptr inbounds (i8, ptr @a, i64 368), align 16 -; CHECK-NEXT: [[WIDE_LOAD_12:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 384), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_12:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 400), align 16 -; CHECK-NEXT: [[WIDE_LOAD4_12:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 384), align 16 -; CHECK-NEXT: [[WIDE_LOAD5_12:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 400), align 16 +; CHECK-NEXT: store <2 x double> [[TMP22]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 352), align 16 +; CHECK-NEXT: store <2 x double> [[TMP23]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 368), align 16 +; CHECK-NEXT: [[WIDE_LOAD_12:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 384), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_12:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 400), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_12:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 384), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_12:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 400), align 16 ; CHECK-NEXT: [[TMP24:%.*]] = fadd <2 x double> [[WIDE_LOAD_12]], [[WIDE_LOAD4_12]] ; CHECK-NEXT: [[TMP25:%.*]] = fadd <2 x double> [[WIDE_LOAD3_12]], [[WIDE_LOAD5_12]] -; CHECK-NEXT: store <2 x double> [[TMP24]], ptr getelementptr inbounds (i8, ptr @a, i64 384), align 16 -; CHECK-NEXT: store <2 x double> [[TMP25]], ptr getelementptr inbounds (i8, ptr @a, i64 400), align 16 -; CHECK-NEXT: [[WIDE_LOAD_13:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 416), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_13:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 432), align 16 -; CHECK-NEXT: [[WIDE_LOAD4_13:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 416), align 16 -; CHECK-NEXT: [[WIDE_LOAD5_13:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 432), align 16 +; CHECK-NEXT: store <2 x double> [[TMP24]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 384), align 16 +; CHECK-NEXT: store <2 x double> [[TMP25]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 400), align 16 +; CHECK-NEXT: [[WIDE_LOAD_13:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 416), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_13:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 432), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_13:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 416), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_13:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 432), align 16 ; CHECK-NEXT: [[TMP26:%.*]] = fadd <2 x double> [[WIDE_LOAD_13]], [[WIDE_LOAD4_13]] ; CHECK-NEXT: [[TMP27:%.*]] = fadd <2 x double> [[WIDE_LOAD3_13]], [[WIDE_LOAD5_13]] -; CHECK-NEXT: store <2 x double> [[TMP26]], ptr getelementptr inbounds (i8, ptr @a, i64 416), align 16 -; CHECK-NEXT: store <2 x double> [[TMP27]], ptr getelementptr inbounds (i8, ptr @a, i64 432), align 16 -; CHECK-NEXT: [[WIDE_LOAD_14:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 448), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_14:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @b, i64 464), align 16 -; CHECK-NEXT: [[WIDE_LOAD4_14:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 448), align 16 -; CHECK-NEXT: [[WIDE_LOAD5_14:%.*]] = load <2 x double>, ptr getelementptr inbounds (i8, ptr @c, i64 464), align 16 +; CHECK-NEXT: store <2 x double> [[TMP26]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 416), align 16 +; CHECK-NEXT: store <2 x double> [[TMP27]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 432), align 16 +; CHECK-NEXT: [[WIDE_LOAD_14:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 448), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_14:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @b, i64 464), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_14:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 448), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_14:%.*]] = load <2 x double>, ptr getelementptr inbounds nuw (i8, ptr @c, i64 464), align 16 ; CHECK-NEXT: [[TMP28:%.*]] = fadd <2 x double> [[WIDE_LOAD_14]], [[WIDE_LOAD4_14]] ; CHECK-NEXT: [[TMP29:%.*]] = fadd <2 x double> [[WIDE_LOAD3_14]], [[WIDE_LOAD5_14]] -; CHECK-NEXT: store <2 x double> [[TMP28]], ptr getelementptr inbounds (i8, ptr @a, i64 448), align 16 -; CHECK-NEXT: store <2 x double> [[TMP29]], ptr getelementptr inbounds (i8, ptr @a, i64 464), align 16 +; CHECK-NEXT: store <2 x double> [[TMP28]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 448), align 16 +; CHECK-NEXT: store <2 x double> [[TMP29]], ptr getelementptr inbounds nuw (i8, ptr @a, i64 464), align 16 ; CHECK-NEXT: [[TMP30:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @b, i64 480), align 16 ; CHECK-NEXT: [[TMP31:%.*]] = load double, ptr getelementptr inbounds nuw (i8, ptr @c, i64 480), align 16 ; CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP30]], [[TMP31]] @@ -179,17 +179,17 @@ define void @test_runtime_trip_count(i32 %N) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [58 x double], ptr @b, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw [58 x double], ptr @b, i64 0, i64 [[INDEX]] ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP0]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP0]], align 16 ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <2 x double>, ptr [[TMP1]], align 16 -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [58 x double], ptr @c, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw [58 x double], ptr @c, i64 0, i64 [[INDEX]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP2]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x double>, ptr [[TMP2]], align 16 ; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <2 x double>, ptr [[TMP3]], align 16 ; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[WIDE_LOAD]], [[WIDE_LOAD5]] ; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[WIDE_LOAD4]], [[WIDE_LOAD6]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [58 x double], ptr @a, i64 0, i64 [[INDEX]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw [58 x double], ptr @a, i64 0, i64 [[INDEX]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP6]], i64 16 ; CHECK-NEXT: store <2 x double> [[TMP4]], ptr [[TMP6]], align 16 ; CHECK-NEXT: store <2 x double> [[TMP5]], ptr [[TMP7]], align 16 diff --git a/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll b/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll index 61938d01e57ac..eac438ac07252 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll @@ -349,12 +349,12 @@ define void @good_load_order() { ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_COND1_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY3]] ] ; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[INDVARS_IV]] to i32 ; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], 1 -; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32000 x float], ptr @a, i32 0, i32 [[TMP3]] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [32000 x float], ptr @a, i32 0, i32 [[TMP3]] ; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [32000 x float], ptr @a, i32 0, i32 [[TMP4]] +; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw [32000 x float], ptr @a, i32 0, i32 [[TMP4]] ; CHECK-NEXT: [[TMP5:%.*]] = trunc i64 [[INDVARS_IV]] to i32 ; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TMP5]], 4 -; CHECK-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds [32000 x float], ptr @a, i32 0, i32 [[TMP6]] +; CHECK-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds nuw [32000 x float], ptr @a, i32 0, i32 [[TMP6]] ; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX31]], align 4 ; CHECK-NEXT: [[TMP8:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x float> [[TMP8]], <4 x float> poison, <4 x i32> @@ -363,7 +363,7 @@ define void @good_load_order() { ; CHECK-NEXT: store <4 x float> [[TMP11]], ptr [[ARRAYIDX5]], align 4 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 5 ; CHECK-NEXT: [[TMP12:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds [32000 x float], ptr @a, i32 0, i32 [[TMP12]] +; CHECK-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds nuw [32000 x float], ptr @a, i32 0, i32 [[TMP12]] ; CHECK-NEXT: [[TMP13]] = load float, ptr [[ARRAYIDX41]], align 4 ; CHECK-NEXT: [[MUL45:%.*]] = fmul float [[TMP13]], [[TMP7]] ; CHECK-NEXT: store float [[MUL45]], ptr [[ARRAYIDX31]], align 4 @@ -384,12 +384,12 @@ define void @good_load_order() { ; SSE2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_COND1_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY3]] ] ; SSE2-NEXT: [[TMP2:%.*]] = trunc i64 [[INDVARS_IV]] to i32 ; SSE2-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], 1 -; SSE2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32000 x float], ptr @a, i32 0, i32 [[TMP3]] +; SSE2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw [32000 x float], ptr @a, i32 0, i32 [[TMP3]] ; SSE2-NEXT: [[TMP4:%.*]] = trunc i64 [[INDVARS_IV]] to i32 -; SSE2-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [32000 x float], ptr @a, i32 0, i32 [[TMP4]] +; SSE2-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw [32000 x float], ptr @a, i32 0, i32 [[TMP4]] ; SSE2-NEXT: [[TMP5:%.*]] = trunc i64 [[INDVARS_IV]] to i32 ; SSE2-NEXT: [[TMP6:%.*]] = add i32 [[TMP5]], 4 -; SSE2-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds [32000 x float], ptr @a, i32 0, i32 [[TMP6]] +; SSE2-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds nuw [32000 x float], ptr @a, i32 0, i32 [[TMP6]] ; SSE2-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX31]], align 4 ; SSE2-NEXT: [[TMP8:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 4 ; SSE2-NEXT: [[TMP9:%.*]] = shufflevector <4 x float> [[TMP8]], <4 x float> poison, <4 x i32> @@ -398,7 +398,7 @@ define void @good_load_order() { ; SSE2-NEXT: store <4 x float> [[TMP11]], ptr [[ARRAYIDX5]], align 4 ; SSE2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 5 ; SSE2-NEXT: [[TMP12:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; SSE2-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds [32000 x float], ptr @a, i32 0, i32 [[TMP12]] +; SSE2-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds nuw [32000 x float], ptr @a, i32 0, i32 [[TMP12]] ; SSE2-NEXT: [[TMP13]] = load float, ptr [[ARRAYIDX41]], align 4 ; SSE2-NEXT: [[MUL45:%.*]] = fmul float [[TMP13]], [[TMP7]] ; SSE2-NEXT: store float [[MUL45]], ptr [[ARRAYIDX31]], align 4