diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse-output.ll b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse-output.ll new file mode 100644 index 0000000000000..55a969b7c9e76 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/RISCV/riscv-vector-reverse-output.ll @@ -0,0 +1,441 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5 +;; This is the loop in c++ being vectorize in this file with +;; vector.reverse +;; #pragma clang loop vectorize_width(4, scalable) +;; for (int i = N-1; i >= 0; --i) +;; a[i] = b[i] + 1.0; + +; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v \ +; RUN: -riscv-v-vector-bits-min=128 -S < %s \ +; RUN: | FileCheck --check-prefix=RV64 %s + +; RUN: opt -passes=loop-vectorize -mtriple=riscv32 -mattr=+v \ +; RUN: -riscv-v-vector-bits-min=128 -S < %s \ +; RUN: | FileCheck --check-prefix=RV32 %s + +; RUN: opt -passes=loop-vectorize -mtriple=riscv64 -mattr=+v \ +; RUN: -riscv-v-vector-bits-min=128 -force-vector-interleave=2 -S < %s \ +; RUN: | FileCheck --check-prefix=RV64-UF2 %s + +define void @vector_reverse_i32(ptr noalias %A, ptr noalias %B) { +; RV64-LABEL: define void @vector_reverse_i32( +; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { +; RV64-NEXT: [[ENTRY:.*]]: +; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 +; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] +; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV64: [[VECTOR_PH]]: +; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 +; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] +; RV64-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] +; RV64-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; RV64-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] +; RV64-NEXT: br label %[[VECTOR_BODY:.*]] +; RV64: [[VECTOR_BODY]]: +; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] +; RV64-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 0 +; RV64-NEXT: [[TMP8:%.*]] = add nsw i64 [[TMP7]], -1 +; RV64-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP8]] +; RV64-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP5]] +; RV64-NEXT: [[TMP11:%.*]] = sub i64 1, [[TMP5]] +; RV64-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i64 [[TMP10]] +; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP12]], i64 [[TMP11]] +; RV64-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP13]], align 4 +; RV64-NEXT: [[REVERSE:%.*]] = call @llvm.vector.reverse.nxv4i32( [[WIDE_LOAD]]) +; RV64-NEXT: [[TMP14:%.*]] = add [[REVERSE]], splat (i32 1) +; RV64-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP8]] +; RV64-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]] +; RV64-NEXT: [[TMP17:%.*]] = sub i64 1, [[TMP5]] +; RV64-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[TMP16]] +; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP18]], i64 [[TMP17]] +; RV64-NEXT: [[REVERSE1:%.*]] = call @llvm.vector.reverse.nxv4i32( [[TMP14]]) +; RV64-NEXT: store [[REVERSE1]], ptr [[TMP19]], align 4 +; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; RV64-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV64-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; RV64: [[MIDDLE_BLOCK]]: +; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] +; RV64-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; RV64: [[SCALAR_PH]]: +; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV64-NEXT: br label %[[FOR_BODY:.*]] +; RV64: [[FOR_BODY]]: +; RV64-NEXT: [[DEC_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; RV64-NEXT: [[IV_NEXT]] = add nsw i64 [[DEC_IV]], -1 +; RV64-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV_NEXT]] +; RV64-NEXT: [[TMP21:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4 +; RV64-NEXT: [[ADD:%.*]] = add i32 [[TMP21]], 1 +; RV64-NEXT: [[ARRAYIDX_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV_NEXT]] +; RV64-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX_A]], align 4 +; RV64-NEXT: [[CMP:%.*]] = icmp ugt i64 [[DEC_IV]], 1 +; RV64-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT]], !llvm.loop [[LOOP3:![0-9]+]] +; RV64: [[EXIT]]: +; RV64-NEXT: ret void +; +; RV32-LABEL: define void @vector_reverse_i32( +; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { +; RV32-NEXT: [[ENTRY:.*]]: +; RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 +; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] +; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV32: [[VECTOR_PH]]: +; RV32-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 +; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] +; RV32-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] +; RV32-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; RV32-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] +; RV32-NEXT: br label %[[VECTOR_BODY:.*]] +; RV32: [[VECTOR_BODY]]: +; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] +; RV32-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 0 +; RV32-NEXT: [[TMP8:%.*]] = add nsw i64 [[TMP7]], -1 +; RV32-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP8]] +; RV32-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP5]] to i32 +; RV32-NEXT: [[TMP11:%.*]] = mul i32 0, [[TMP10]] +; RV32-NEXT: [[TMP12:%.*]] = sub i32 1, [[TMP10]] +; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i32 [[TMP11]] +; RV32-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i32 [[TMP12]] +; RV32-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP14]], align 4 +; RV32-NEXT: [[REVERSE:%.*]] = call @llvm.vector.reverse.nxv4i32( [[WIDE_LOAD]]) +; RV32-NEXT: [[TMP15:%.*]] = add [[REVERSE]], splat (i32 1) +; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP8]] +; RV32-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP5]] to i32 +; RV32-NEXT: [[TMP18:%.*]] = mul i32 0, [[TMP17]] +; RV32-NEXT: [[TMP19:%.*]] = sub i32 1, [[TMP17]] +; RV32-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP16]], i32 [[TMP18]] +; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[TMP20]], i32 [[TMP19]] +; RV32-NEXT: [[REVERSE1:%.*]] = call @llvm.vector.reverse.nxv4i32( [[TMP15]]) +; RV32-NEXT: store [[REVERSE1]], ptr [[TMP21]], align 4 +; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; RV32-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV32-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; RV32: [[MIDDLE_BLOCK]]: +; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] +; RV32-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; RV32: [[SCALAR_PH]]: +; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV32-NEXT: br label %[[FOR_BODY:.*]] +; RV32: [[FOR_BODY]]: +; RV32-NEXT: [[DEC_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; RV32-NEXT: [[IV_NEXT]] = add nsw i64 [[DEC_IV]], -1 +; RV32-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV_NEXT]] +; RV32-NEXT: [[TMP23:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4 +; RV32-NEXT: [[ADD:%.*]] = add i32 [[TMP23]], 1 +; RV32-NEXT: [[ARRAYIDX_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV_NEXT]] +; RV32-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX_A]], align 4 +; RV32-NEXT: [[CMP:%.*]] = icmp ugt i64 [[DEC_IV]], 1 +; RV32-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT]], !llvm.loop [[LOOP3:![0-9]+]] +; RV32: [[EXIT]]: +; RV32-NEXT: ret void +; +; RV64-UF2-LABEL: define void @vector_reverse_i32( +; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0:[0-9]+]] { +; RV64-UF2-NEXT: [[ENTRY:.*]]: +; RV64-UF2-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8 +; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] +; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV64-UF2: [[VECTOR_PH]]: +; RV64-UF2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 8 +; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] +; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] +; RV64-UF2-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; RV64-UF2-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; RV64-UF2-NEXT: [[TMP7:%.*]] = sub i64 1023, [[N_VEC]] +; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]] +; RV64-UF2: [[VECTOR_BODY]]: +; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] +; RV64-UF2-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], 0 +; RV64-UF2-NEXT: [[TMP9:%.*]] = add nsw i64 [[TMP8]], -1 +; RV64-UF2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP9]] +; RV64-UF2-NEXT: [[TMP11:%.*]] = mul i64 0, [[TMP5]] +; RV64-UF2-NEXT: [[TMP12:%.*]] = sub i64 1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP10]], i64 [[TMP11]] +; RV64-UF2-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i64 [[TMP12]] +; RV64-UF2-NEXT: [[TMP15:%.*]] = mul i64 -1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP16:%.*]] = sub i64 1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP10]], i64 [[TMP15]] +; RV64-UF2-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i64 [[TMP16]] +; RV64-UF2-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP14]], align 4 +; RV64-UF2-NEXT: [[REVERSE:%.*]] = call @llvm.vector.reverse.nxv4i32( [[WIDE_LOAD]]) +; RV64-UF2-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP18]], align 4 +; RV64-UF2-NEXT: [[REVERSE2:%.*]] = call @llvm.vector.reverse.nxv4i32( [[WIDE_LOAD1]]) +; RV64-UF2-NEXT: [[TMP19:%.*]] = add [[REVERSE]], splat (i32 1) +; RV64-UF2-NEXT: [[TMP20:%.*]] = add [[REVERSE2]], splat (i32 1) +; RV64-UF2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP9]] +; RV64-UF2-NEXT: [[TMP22:%.*]] = mul i64 0, [[TMP5]] +; RV64-UF2-NEXT: [[TMP23:%.*]] = sub i64 1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i64 [[TMP22]] +; RV64-UF2-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[TMP24]], i64 [[TMP23]] +; RV64-UF2-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP27:%.*]] = sub i64 1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[TMP21]], i64 [[TMP26]] +; RV64-UF2-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[TMP28]], i64 [[TMP27]] +; RV64-UF2-NEXT: [[REVERSE3:%.*]] = call @llvm.vector.reverse.nxv4i32( [[TMP19]]) +; RV64-UF2-NEXT: store [[REVERSE3]], ptr [[TMP25]], align 4 +; RV64-UF2-NEXT: [[REVERSE4:%.*]] = call @llvm.vector.reverse.nxv4i32( [[TMP20]]) +; RV64-UF2-NEXT: store [[REVERSE4]], ptr [[TMP29]], align 4 +; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; RV64-UF2-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV64-UF2-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; RV64-UF2: [[MIDDLE_BLOCK]]: +; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] +; RV64-UF2-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; RV64-UF2: [[SCALAR_PH]]: +; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]] +; RV64-UF2: [[FOR_BODY]]: +; RV64-UF2-NEXT: [[DEC_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; RV64-UF2-NEXT: [[IV_NEXT]] = add nsw i64 [[DEC_IV]], -1 +; RV64-UF2-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV_NEXT]] +; RV64-UF2-NEXT: [[TMP31:%.*]] = load i32, ptr [[ARRAYIDX_B]], align 4 +; RV64-UF2-NEXT: [[ADD:%.*]] = add i32 [[TMP31]], 1 +; RV64-UF2-NEXT: [[ARRAYIDX_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV_NEXT]] +; RV64-UF2-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX_A]], align 4 +; RV64-UF2-NEXT: [[CMP:%.*]] = icmp ugt i64 [[DEC_IV]], 1 +; RV64-UF2-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT]], !llvm.loop [[LOOP3:![0-9]+]] +; RV64-UF2: [[EXIT]]: +; RV64-UF2-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %dec.iv = phi i64 [ 1023, %entry ], [ %iv.next, %for.body ] + %iv.next = add nsw i64 %dec.iv, -1 + %arrayidx.b = getelementptr inbounds i32, ptr %B, i64 %iv.next + %0 = load i32, ptr %arrayidx.b, align 4 + %add = add i32 %0, 1 + %arrayidx.a = getelementptr inbounds i32, ptr %A, i64 %iv.next + store i32 %add, ptr %arrayidx.a, align 4 + %cmp = icmp ugt i64 %dec.iv, 1 + br i1 %cmp, label %for.body, label %exit, !llvm.loop !0 + +exit: + ret void +} + +define void @vector_reverse_f32(ptr noalias %A, ptr noalias %B) { +; RV64-LABEL: define void @vector_reverse_f32( +; RV64-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { +; RV64-NEXT: [[ENTRY:.*]]: +; RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 +; RV64-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] +; RV64-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV64: [[VECTOR_PH]]: +; RV64-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 +; RV64-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] +; RV64-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] +; RV64-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; RV64-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; RV64-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] +; RV64-NEXT: br label %[[VECTOR_BODY:.*]] +; RV64: [[VECTOR_BODY]]: +; RV64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] +; RV64-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 0 +; RV64-NEXT: [[TMP8:%.*]] = add nsw i64 [[TMP7]], -1 +; RV64-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP8]] +; RV64-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP5]] +; RV64-NEXT: [[TMP11:%.*]] = sub i64 1, [[TMP5]] +; RV64-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i64 [[TMP10]] +; RV64-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i64 [[TMP11]] +; RV64-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP13]], align 4 +; RV64-NEXT: [[REVERSE:%.*]] = call @llvm.vector.reverse.nxv4f32( [[WIDE_LOAD]]) +; RV64-NEXT: [[TMP14:%.*]] = fadd [[REVERSE]], splat (float 1.000000e+00) +; RV64-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP8]] +; RV64-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]] +; RV64-NEXT: [[TMP17:%.*]] = sub i64 1, [[TMP5]] +; RV64-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP15]], i64 [[TMP16]] +; RV64-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP18]], i64 [[TMP17]] +; RV64-NEXT: [[REVERSE1:%.*]] = call @llvm.vector.reverse.nxv4f32( [[TMP14]]) +; RV64-NEXT: store [[REVERSE1]], ptr [[TMP19]], align 4 +; RV64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; RV64-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV64-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; RV64: [[MIDDLE_BLOCK]]: +; RV64-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] +; RV64-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; RV64: [[SCALAR_PH]]: +; RV64-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV64-NEXT: br label %[[FOR_BODY:.*]] +; RV64: [[FOR_BODY]]: +; RV64-NEXT: [[DEC_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; RV64-NEXT: [[IV_NEXT]] = add nsw i64 [[DEC_IV]], -1 +; RV64-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV_NEXT]] +; RV64-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX_B]], align 4 +; RV64-NEXT: [[FADD:%.*]] = fadd float [[TMP21]], 1.000000e+00 +; RV64-NEXT: [[ARRAYIDX_A:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV_NEXT]] +; RV64-NEXT: store float [[FADD]], ptr [[ARRAYIDX_A]], align 4 +; RV64-NEXT: [[CMP:%.*]] = icmp ugt i64 [[DEC_IV]], 1 +; RV64-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT]], !llvm.loop [[LOOP5:![0-9]+]] +; RV64: [[EXIT]]: +; RV64-NEXT: ret void +; +; RV32-LABEL: define void @vector_reverse_f32( +; RV32-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { +; RV32-NEXT: [[ENTRY:.*]]: +; RV32-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 +; RV32-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] +; RV32-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV32: [[VECTOR_PH]]: +; RV32-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 +; RV32-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] +; RV32-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] +; RV32-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; RV32-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; RV32-NEXT: [[TMP6:%.*]] = sub i64 1023, [[N_VEC]] +; RV32-NEXT: br label %[[VECTOR_BODY:.*]] +; RV32: [[VECTOR_BODY]]: +; RV32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV32-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] +; RV32-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 0 +; RV32-NEXT: [[TMP8:%.*]] = add nsw i64 [[TMP7]], -1 +; RV32-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP8]] +; RV32-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP5]] to i32 +; RV32-NEXT: [[TMP11:%.*]] = mul i32 0, [[TMP10]] +; RV32-NEXT: [[TMP12:%.*]] = sub i32 1, [[TMP10]] +; RV32-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP9]], i32 [[TMP11]] +; RV32-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i32 [[TMP12]] +; RV32-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP14]], align 4 +; RV32-NEXT: [[REVERSE:%.*]] = call @llvm.vector.reverse.nxv4f32( [[WIDE_LOAD]]) +; RV32-NEXT: [[TMP15:%.*]] = fadd [[REVERSE]], splat (float 1.000000e+00) +; RV32-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP8]] +; RV32-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP5]] to i32 +; RV32-NEXT: [[TMP18:%.*]] = mul i32 0, [[TMP17]] +; RV32-NEXT: [[TMP19:%.*]] = sub i32 1, [[TMP17]] +; RV32-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP16]], i32 [[TMP18]] +; RV32-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP20]], i32 [[TMP19]] +; RV32-NEXT: [[REVERSE1:%.*]] = call @llvm.vector.reverse.nxv4f32( [[TMP15]]) +; RV32-NEXT: store [[REVERSE1]], ptr [[TMP21]], align 4 +; RV32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; RV32-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV32-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; RV32: [[MIDDLE_BLOCK]]: +; RV32-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] +; RV32-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; RV32: [[SCALAR_PH]]: +; RV32-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV32-NEXT: br label %[[FOR_BODY:.*]] +; RV32: [[FOR_BODY]]: +; RV32-NEXT: [[DEC_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; RV32-NEXT: [[IV_NEXT]] = add nsw i64 [[DEC_IV]], -1 +; RV32-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV_NEXT]] +; RV32-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX_B]], align 4 +; RV32-NEXT: [[FADD:%.*]] = fadd float [[TMP23]], 1.000000e+00 +; RV32-NEXT: [[ARRAYIDX_A:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV_NEXT]] +; RV32-NEXT: store float [[FADD]], ptr [[ARRAYIDX_A]], align 4 +; RV32-NEXT: [[CMP:%.*]] = icmp ugt i64 [[DEC_IV]], 1 +; RV32-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT]], !llvm.loop [[LOOP5:![0-9]+]] +; RV32: [[EXIT]]: +; RV32-NEXT: ret void +; +; RV64-UF2-LABEL: define void @vector_reverse_f32( +; RV64-UF2-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]]) #[[ATTR0]] { +; RV64-UF2-NEXT: [[ENTRY:.*]]: +; RV64-UF2-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 8 +; RV64-UF2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1023, [[TMP1]] +; RV64-UF2-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; RV64-UF2: [[VECTOR_PH]]: +; RV64-UF2-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 8 +; RV64-UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 1023, [[TMP3]] +; RV64-UF2-NEXT: [[N_VEC:%.*]] = sub i64 1023, [[N_MOD_VF]] +; RV64-UF2-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; RV64-UF2-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; RV64-UF2-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; RV64-UF2-NEXT: [[TMP7:%.*]] = sub i64 1023, [[N_VEC]] +; RV64-UF2-NEXT: br label %[[VECTOR_BODY:.*]] +; RV64-UF2: [[VECTOR_BODY]]: +; RV64-UF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; RV64-UF2-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]] +; RV64-UF2-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], 0 +; RV64-UF2-NEXT: [[TMP9:%.*]] = add nsw i64 [[TMP8]], -1 +; RV64-UF2-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP9]] +; RV64-UF2-NEXT: [[TMP11:%.*]] = mul i64 0, [[TMP5]] +; RV64-UF2-NEXT: [[TMP12:%.*]] = sub i64 1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[TMP11]] +; RV64-UF2-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[TMP13]], i64 [[TMP12]] +; RV64-UF2-NEXT: [[TMP15:%.*]] = mul i64 -1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP16:%.*]] = sub i64 1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP10]], i64 [[TMP15]] +; RV64-UF2-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP17]], i64 [[TMP16]] +; RV64-UF2-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP14]], align 4 +; RV64-UF2-NEXT: [[REVERSE:%.*]] = call @llvm.vector.reverse.nxv4f32( [[WIDE_LOAD]]) +; RV64-UF2-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP18]], align 4 +; RV64-UF2-NEXT: [[REVERSE2:%.*]] = call @llvm.vector.reverse.nxv4f32( [[WIDE_LOAD1]]) +; RV64-UF2-NEXT: [[TMP19:%.*]] = fadd [[REVERSE]], splat (float 1.000000e+00) +; RV64-UF2-NEXT: [[TMP20:%.*]] = fadd [[REVERSE2]], splat (float 1.000000e+00) +; RV64-UF2-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP9]] +; RV64-UF2-NEXT: [[TMP22:%.*]] = mul i64 0, [[TMP5]] +; RV64-UF2-NEXT: [[TMP23:%.*]] = sub i64 1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[TMP22]] +; RV64-UF2-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i64 [[TMP23]] +; RV64-UF2-NEXT: [[TMP26:%.*]] = mul i64 -1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP27:%.*]] = sub i64 1, [[TMP5]] +; RV64-UF2-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP21]], i64 [[TMP26]] +; RV64-UF2-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[TMP28]], i64 [[TMP27]] +; RV64-UF2-NEXT: [[REVERSE3:%.*]] = call @llvm.vector.reverse.nxv4f32( [[TMP19]]) +; RV64-UF2-NEXT: store [[REVERSE3]], ptr [[TMP25]], align 4 +; RV64-UF2-NEXT: [[REVERSE4:%.*]] = call @llvm.vector.reverse.nxv4f32( [[TMP20]]) +; RV64-UF2-NEXT: store [[REVERSE4]], ptr [[TMP29]], align 4 +; RV64-UF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] +; RV64-UF2-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; RV64-UF2-NEXT: br i1 [[TMP30]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; RV64-UF2: [[MIDDLE_BLOCK]]: +; RV64-UF2-NEXT: [[CMP_N:%.*]] = icmp eq i64 1023, [[N_VEC]] +; RV64-UF2-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; RV64-UF2: [[SCALAR_PH]]: +; RV64-UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 1023, %[[ENTRY]] ] +; RV64-UF2-NEXT: br label %[[FOR_BODY:.*]] +; RV64-UF2: [[FOR_BODY]]: +; RV64-UF2-NEXT: [[DEC_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ] +; RV64-UF2-NEXT: [[IV_NEXT]] = add nsw i64 [[DEC_IV]], -1 +; RV64-UF2-NEXT: [[ARRAYIDX_B:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV_NEXT]] +; RV64-UF2-NEXT: [[TMP31:%.*]] = load float, ptr [[ARRAYIDX_B]], align 4 +; RV64-UF2-NEXT: [[FADD:%.*]] = fadd float [[TMP31]], 1.000000e+00 +; RV64-UF2-NEXT: [[ARRAYIDX_A:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV_NEXT]] +; RV64-UF2-NEXT: store float [[FADD]], ptr [[ARRAYIDX_A]], align 4 +; RV64-UF2-NEXT: [[CMP:%.*]] = icmp ugt i64 [[DEC_IV]], 1 +; RV64-UF2-NEXT: br i1 [[CMP]], label %[[FOR_BODY]], label %[[EXIT]], !llvm.loop [[LOOP5:![0-9]+]] +; RV64-UF2: [[EXIT]]: +; RV64-UF2-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %dec.iv = phi i64 [ 1023, %entry ], [ %iv.next, %for.body ] + %iv.next = add nsw i64 %dec.iv, -1 + %arrayidx.b = getelementptr inbounds float, ptr %B, i64 %iv.next + %0 = load float, ptr %arrayidx.b, align 4 + %fadd = fadd float %0, 1.000000e+00 + %arrayidx.a = getelementptr inbounds float, ptr %A, i64 %iv.next + store float %fadd, ptr %arrayidx.a, align 4 + %cmp = icmp ugt i64 %dec.iv, 1 + br i1 %cmp, label %for.body, label %exit, !llvm.loop !0 + +exit: + ret void +} + +!0 = distinct !{!0, !1, !2, !3} +!1 = !{!"llvm.loop.vectorize.width", i32 4} +!2 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} +!3 = !{!"llvm.loop.vectorize.enable", i1 true}