From 0c7d7228c8b45ab0f0e3a337c646f5826bd99c43 Mon Sep 17 00:00:00 2001 From: Min-Yih Hsu Date: Wed, 15 Jan 2025 10:34:51 -0800 Subject: [PATCH 1/2] Pre-commit tests --- llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll | 98 ++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll | 105 ++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vse.ll | 18 ++++ llvm/test/CodeGen/RISCV/rvv/vsse.ll | 19 ++++ llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll | 91 +++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll | 19 ++++ llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll | 91 +++++++++++++++++ 7 files changed, 441 insertions(+) diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll index 809ae2d2bebfe..8efdd8489b016 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll @@ -29,6 +29,20 @@ entry: ret %1 } +define @test_vlseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i64 %vl) { +; CHECK-LABEL: test_vlseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64, i64) @@ -191,6 +205,20 @@ entry: ret %1 } +define @test_vlseg3_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg3_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv2i8_3t(target("riscv.vector.tuple", , 3), ptr, i64, i64) declare target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv2i8_3t.nxv2i1(target("riscv.vector.tuple", , 3), ptr, , i64, i64, i64) @@ -326,6 +354,20 @@ entry: ret %1 } +define @test_vlseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) declare target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64, i64) @@ -461,6 +503,20 @@ entry: ret %1 } +define @test_vlseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) declare target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64, i64) @@ -569,6 +625,20 @@ entry: ret %1 } +define @test_vlseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) declare target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64, i64) @@ -677,6 +747,20 @@ entry: ret %1 } +define @test_vlseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) declare target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64, i64) @@ -785,6 +869,20 @@ entry: ret %1 } +define @test_vlseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vlseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) declare target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll index 3b9db2655e033..896866fb60ebc 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll @@ -31,6 +31,21 @@ entry: ret %1 } +define @test_vluxseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 2) undef, ptr %base, %index, splat (i1 true), i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %0, i32 1) + ret %1 +} + declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) declare target("riscv.vector.tuple", , 2) @llvm.riscv.vluxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64, i64) @@ -640,6 +655,21 @@ entry: ret %1 } +define @test_vluxseg3_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg3_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 3) undef, ptr %base, %index, splat (i1 true), i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_3t(target("riscv.vector.tuple", , 3) %0, i32 1) + ret %1 +} + declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) declare target("riscv.vector.tuple", , 3) @llvm.riscv.vluxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64, i64) @@ -1191,6 +1221,21 @@ entry: ret %1 } +define @test_vluxseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 4) undef, ptr %base, %index, splat (i1 true), i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %0, i32 1) + ret %1 +} + declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) declare target("riscv.vector.tuple", , 4) @llvm.riscv.vluxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64, i64) @@ -1742,6 +1787,21 @@ entry: ret %1 } +define @test_vluxseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 5) undef, ptr %base, %index, splat (i1 true), i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %0, i32 1) + ret %1 +} + declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) declare target("riscv.vector.tuple", , 5) @llvm.riscv.vluxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64, i64) @@ -2206,6 +2266,21 @@ entry: ret %1 } +define @test_vluxseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 6) undef, ptr %base, %index, splat (i1 true), i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %0, i32 1) + ret %1 +} + declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) declare target("riscv.vector.tuple", , 6) @llvm.riscv.vluxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64, i64) @@ -2670,6 +2745,21 @@ entry: ret %1 } +define @test_vluxseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 7) undef, ptr %base, %index, splat (i1 true), i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %0, i32 1) + ret %1 +} + declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) declare target("riscv.vector.tuple", , 7) @llvm.riscv.vluxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64, i64) @@ -3134,6 +3224,21 @@ entry: ret %1 } +define @test_vluxseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vluxseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1.nxv1i8(target("riscv.vector.tuple", , 8) undef, ptr %base, %index, splat (i1 true), i64 %vl, i64 1, i64 3) + %1 = call @llvm.riscv.tuple.extract.nxv1i8.triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %0, i32 1) + ret %1 +} + declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) declare target("riscv.vector.tuple", , 8) @llvm.riscv.vluxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/vse.ll b/llvm/test/CodeGen/RISCV/rvv/vse.ll index f2ae2136078c0..416bfbdd95f3b 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vse.ll @@ -46,6 +46,24 @@ entry: ret void } +define void @intrinsic_vse_allonesmask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vse_allonesmask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vse.mask.nxv1i64( + %0, + ptr %1, + splat (i1 true), + iXLen %3) + + ret void +} + declare void @llvm.riscv.vse.nxv2i64( , ptr, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsse.ll b/llvm/test/CodeGen/RISCV/rvv/vsse.ll index 6908a2000653b..e2eb018954d29 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsse.ll @@ -50,6 +50,25 @@ entry: ret void } +define void @intrinsic_vsse_allonesmask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vsse_allonesmask_v_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a3, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsse.mask.nxv1i64( + %0, + ptr %1, + iXLen %2, + splat (i1 true), + iXLen %4) + + ret void +} + declare void @llvm.riscv.vsse.nxv2i64( , ptr, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll index 6ce326be23ee3..b4349200db8db 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll @@ -27,6 +27,19 @@ entry: ret void } +define void @test_vsseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, splat (i1 true), i64 %vl, i64 3) + ret void +} + declare void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv2i8_2t(target("riscv.vector.tuple", , 2), ptr, i64, i64) declare void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv2i8_2t.nxv2i1(target("riscv.vector.tuple", , 2), ptr, , i64, i64) @@ -277,6 +290,19 @@ entry: ret void } +define void @test_vsseg3_allonesmask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg3_allonesmask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma +; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, splat (i1 true), i64 %vl, i64 3) + ret void +} + declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) @@ -302,6 +328,19 @@ entry: ret void } +define void @test_vsseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, splat (i1 true), i64 %vl, i64 3) + ret void +} + declare void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv2i8_4t(target("riscv.vector.tuple", , 4), ptr, i64, i64) declare void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv2i8_4t.nxv2i1(target("riscv.vector.tuple", , 4), ptr, , i64, i64) @@ -427,6 +466,19 @@ entry: ret void } +define void @test_vsseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, splat (i1 true), i64 %vl, i64 3) + ret void +} + declare void @llvm.riscv.vsseg5.triscv.vector.tuple_nxv2i8_5t(target("riscv.vector.tuple", , 5), ptr, i64, i64) declare void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv2i8_5t.nxv2i1(target("riscv.vector.tuple", , 5), ptr, , i64, i64) @@ -527,6 +579,19 @@ entry: ret void } +define void @test_vsseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, splat (i1 true), i64 %vl, i64 3) + ret void +} + declare void @llvm.riscv.vsseg6.triscv.vector.tuple_nxv2i8_6t(target("riscv.vector.tuple", , 6), ptr, i64, i64) declare void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv2i8_6t.nxv2i1(target("riscv.vector.tuple", , 6), ptr, , i64, i64) @@ -627,6 +692,19 @@ entry: ret void } +define void @test_vsseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, splat (i1 true), i64 %vl, i64 3) + ret void +} + declare void @llvm.riscv.vsseg7.triscv.vector.tuple_nxv2i8_7t(target("riscv.vector.tuple", , 7), ptr, i64, i64) declare void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv2i8_7t.nxv2i1(target("riscv.vector.tuple", , 7), ptr, , i64, i64) @@ -727,6 +805,19 @@ entry: ret void } +define void @test_vsseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { +; CHECK-LABEL: test_vsseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, splat (i1 true), i64 %vl, i64 3) + ret void +} + declare void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv2i8_8t(target("riscv.vector.tuple", , 8), ptr, i64, i64) declare void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv2i8_8t.nxv2i1(target("riscv.vector.tuple", , 8), ptr, , i64, i64) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll index 3572f5909400a..064a0437edd53 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll @@ -50,6 +50,25 @@ entry: ret void } +define void @intrinsic_vsuxei_allonesmask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vsuxei_allonesmask_v_nxv1i8_nxv1i8_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: ret +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( + %0, + ptr %1, + %2, + splat (i1 true), + i64 %4) + + ret void +} + declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( , ptr, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll index 22be2ebca8fde..68ecc1f7ba7ca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll @@ -27,6 +27,19 @@ entry: ret void } +define void @test_vsuxseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, splat (i1 true), i64 %vl, i64 3) + ret void +} + declare void @llvm.riscv.vsuxseg2.triscv.vector.tuple_nxv1i8_2t.nxv1i16(target("riscv.vector.tuple", , 2), ptr, , i64, i64) declare void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 2), ptr, , , i64, i64) @@ -552,6 +565,19 @@ entry: ret void } +define void @test_vsuxseg3_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg3_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, splat (i1 true), i64 %vl, i64 3) + ret void +} + declare void @llvm.riscv.vsuxseg3.triscv.vector.tuple_nxv1i8_3t.nxv1i16(target("riscv.vector.tuple", , 3), ptr, , i64, i64) declare void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 3), ptr, , , i64, i64) @@ -1027,6 +1053,19 @@ entry: ret void } +define void @test_vsuxseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, splat (i1 true), i64 %vl, i64 3) + ret void +} + declare void @llvm.riscv.vsuxseg4.triscv.vector.tuple_nxv1i8_4t.nxv1i16(target("riscv.vector.tuple", , 4), ptr, , i64, i64) declare void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 4), ptr, , , i64, i64) @@ -1502,6 +1541,19 @@ entry: ret void } +define void @test_vsuxseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, splat (i1 true), i64 %vl, i64 3) + ret void +} + declare void @llvm.riscv.vsuxseg5.triscv.vector.tuple_nxv1i8_5t.nxv1i16(target("riscv.vector.tuple", , 5), ptr, , i64, i64) declare void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 5), ptr, , , i64, i64) @@ -1902,6 +1954,19 @@ entry: ret void } +define void @test_vsuxseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, splat (i1 true), i64 %vl, i64 3) + ret void +} + declare void @llvm.riscv.vsuxseg6.triscv.vector.tuple_nxv1i8_6t.nxv1i16(target("riscv.vector.tuple", , 6), ptr, , i64, i64) declare void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 6), ptr, , , i64, i64) @@ -2302,6 +2367,19 @@ entry: ret void } +define void @test_vsuxseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, splat (i1 true), i64 %vl, i64 3) + ret void +} + declare void @llvm.riscv.vsuxseg7.triscv.vector.tuple_nxv1i8_7t.nxv1i16(target("riscv.vector.tuple", , 7), ptr, , i64, i64) declare void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 7), ptr, , , i64, i64) @@ -2702,6 +2780,19 @@ entry: ret void } +define void @test_vsuxseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { +; CHECK-LABEL: test_vsuxseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: ret +entry: + tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, splat (i1 true), i64 %vl, i64 3) + ret void +} + declare void @llvm.riscv.vsuxseg8.triscv.vector.tuple_nxv1i8_8t.nxv1i16(target("riscv.vector.tuple", , 8), ptr, , i64, i64) declare void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i16.nxv1i1(target("riscv.vector.tuple", , 8), ptr, , , i64, i64) From 4ad7a62a04838aaa7f4c4f9b43c704e3f4fa0539 Mon Sep 17 00:00:00 2001 From: Min-Yih Hsu Date: Wed, 15 Jan 2025 10:26:17 -0800 Subject: [PATCH 2/2] [RISCV] Add missing RISCVMaskedPseudo on some RVV memory ops So that we can turn masked operations with all-ones masks into their unmasked counterpart. --- llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp | 14 ++++++---- .../Target/RISCV/RISCVInstrInfoVPseudos.td | 14 ++++++++-- llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll | 28 +++++-------------- llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll | 28 +++++-------------- llvm/test/CodeGen/RISCV/rvv/vse.ll | 4 +-- llvm/test/CodeGen/RISCV/rvv/vsse.ll | 4 +-- llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll | 28 +++++-------------- llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll | 4 +-- llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll | 28 +++++-------------- 9 files changed, 51 insertions(+), 101 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 9ccf95970e5b5..4603fc4109f1f 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -3791,13 +3791,14 @@ static bool isImplicitDef(SDValue V) { return V.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF; } -static bool hasGPROut(unsigned Opc) { - switch (RISCV::getRVVMCOpcode(Opc)) { +static bool hasPassthru(const MCInstrDesc &MCID) { + switch (RISCV::getRVVMCOpcode(MCID.getOpcode())) { case RISCV::VCPOP_M: case RISCV::VFIRST_M: - return true; + return false; } - return false; + // None of the stores has passthru. + return !MCID.mayStore(); } // Optimize masked RVV pseudo instructions with a known all-ones mask to their @@ -3829,8 +3830,9 @@ bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(MachineSDNode *N) { #endif SmallVector Ops; - // Skip the passthru operand at index 0 if !UseTUPseudo and no GPR out. - bool ShouldSkip = !UseTUPseudo && !hasGPROut(Opc); + // Skip the passthru operand at index 0 if !UseTUPseudo and has the passthru + // operand. + bool ShouldSkip = !UseTUPseudo && hasPassthru(MCID); for (unsigned I = ShouldSkip, E = N->getNumOperands(); I != E; I++) { // Skip the mask, and the Glue. SDValue Op = N->getOperand(I); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 21eedbb626846..268bfe70673a2 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1932,6 +1932,7 @@ multiclass VPseudoUSStore { def "E" # eew # "_V_" # LInfo : VPseudoUSStoreNoMask, VSESched; def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSStoreMask, + RISCVMaskedPseudo, VSESched; } } @@ -1958,6 +1959,7 @@ multiclass VPseudoSStore { def "E" # eew # "_V_" # LInfo : VPseudoSStoreNoMask, VSSSched; def "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSStoreMask, + RISCVMaskedPseudo, VSSSched; } } @@ -1984,6 +1986,7 @@ multiclass VPseudoIStore { VSXSched; def "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" : VPseudoIStoreMask, + RISCVMaskedPseudo, VSXSched; } } @@ -3709,7 +3712,8 @@ multiclass VPseudoUSSegLoad { def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegLoadNoMask, VLSEGSched; def nf # "E" # eew # "_V_" # LInfo # "_MASK" : - VPseudoUSSegLoadMask, VLSEGSched; + VPseudoUSSegLoadMask, RISCVMaskedPseudo, + VLSEGSched; } } } @@ -3726,7 +3730,8 @@ multiclass VPseudoUSSegLoadFF { def nf # "E" # eew # "FF_V_" # LInfo : VPseudoUSSegLoadFFNoMask, VLSEGFFSched; def nf # "E" # eew # "FF_V_" # LInfo # "_MASK" : - VPseudoUSSegLoadFFMask, VLSEGFFSched; + VPseudoUSSegLoadFFMask, RISCVMaskedPseudo, + VLSEGFFSched; } } } @@ -3743,6 +3748,7 @@ multiclass VPseudoSSegLoad { def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask, VLSSEGSched; def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask, + RISCVMaskedPseudo, VLSSEGSched; } } @@ -3773,6 +3779,7 @@ multiclass VPseudoISegLoad { def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" : VPseudoISegLoadMask, + RISCVMaskedPseudo, VLXSEGSched; } } @@ -3792,6 +3799,7 @@ multiclass VPseudoUSSegStore { def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegStoreNoMask, VSSEGSched; def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegStoreMask, + RISCVMaskedPseudo, VSSEGSched; } } @@ -3809,6 +3817,7 @@ multiclass VPseudoSSegStore { def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask, VSSSEGSched; def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask, + RISCVMaskedPseudo, VSSSEGSched; } } @@ -3839,6 +3848,7 @@ multiclass VPseudoISegStore { def nf # "EI" # idxEEW # "_V_" # IdxLInfo # "_" # DataLInfo # "_MASK" : VPseudoISegStoreMask, + RISCVMaskedPseudo, VSXSEGSched; } } diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll index 8efdd8489b016..16e5e7b9199a3 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll @@ -32,10 +32,8 @@ entry: define @test_vlseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(ptr %base, i64 %vl) { ; CHECK-LABEL: test_vlseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t +; CHECK-NEXT: vlseg2e8.v v7, (a0) ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) @@ -208,10 +206,8 @@ entry: define @test_vlseg3_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_3t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg3_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t +; CHECK-NEXT: vlseg3e8.v v7, (a0) ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 3) @llvm.riscv.vlseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i1(target("riscv.vector.tuple", , 3) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) @@ -357,10 +353,8 @@ entry: define @test_vlseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t +; CHECK-NEXT: vlseg4e8.v v7, (a0) ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 4) @llvm.riscv.vlseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) @@ -506,10 +500,8 @@ entry: define @test_vlseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t +; CHECK-NEXT: vlseg5e8.v v7, (a0) ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 5) @llvm.riscv.vlseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) @@ -628,10 +620,8 @@ entry: define @test_vlseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t +; CHECK-NEXT: vlseg6e8.v v7, (a0) ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 6) @llvm.riscv.vlseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) @@ -750,10 +740,8 @@ entry: define @test_vlseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t +; CHECK-NEXT: vlseg7e8.v v7, (a0) ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 7) @llvm.riscv.vlseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) @@ -872,10 +860,8 @@ entry: define @test_vlseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vlseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t +; CHECK-NEXT: vlseg8e8.v v7, (a0) ; CHECK-NEXT: ret entry: %0 = tail call target("riscv.vector.tuple", , 8) @llvm.riscv.vlseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) undef, ptr %base, splat (i1 true), i64 %vl, i64 1, i64 3) diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll index 896866fb60ebc..0f7348b474ee4 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll @@ -34,10 +34,8 @@ entry: define @test_vluxseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vluxseg2ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -658,10 +656,8 @@ entry: define @test_vluxseg3_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg3_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vluxseg3ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -1224,10 +1220,8 @@ entry: define @test_vluxseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vluxseg4ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -1790,10 +1784,8 @@ entry: define @test_vluxseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vluxseg5ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -2269,10 +2261,8 @@ entry: define @test_vluxseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vluxseg6ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -2748,10 +2738,8 @@ entry: define @test_vluxseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vluxseg7ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: @@ -3227,10 +3215,8 @@ entry: define @test_vluxseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(ptr %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vluxseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8, v0.t +; CHECK-NEXT: vluxseg8ei8.v v9, (a0), v8 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vse.ll b/llvm/test/CodeGen/RISCV/rvv/vse.ll index 416bfbdd95f3b..607ce2394ee81 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vse.ll @@ -49,10 +49,8 @@ entry: define void @intrinsic_vse_allonesmask_v_nxv1i64_nxv1i64( %0, ptr %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vse_allonesmask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma -; CHECK-NEXT: vse64.v v8, (a0), v0.t +; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret entry: call void @llvm.riscv.vse.mask.nxv1i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vsse.ll b/llvm/test/CodeGen/RISCV/rvv/vsse.ll index e2eb018954d29..770e06749c348 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsse.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsse.ll @@ -53,10 +53,8 @@ entry: define void @intrinsic_vsse_allonesmask_v_nxv1i64_nxv1i64( %0, ptr %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsse_allonesmask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma -; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t +; CHECK-NEXT: vsse64.v v8, (a0), a1 ; CHECK-NEXT: ret entry: call void @llvm.riscv.vsse.mask.nxv1i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll index b4349200db8db..880066bf45990 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll @@ -30,10 +30,8 @@ entry: define void @test_vsseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t(target("riscv.vector.tuple", , 2) %val, ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vsseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t +; CHECK-NEXT: vsseg2e8.v v8, (a0) ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, splat (i1 true), i64 %vl, i64 3) @@ -293,10 +291,8 @@ entry: define void @test_vsseg3_allonesmask_nxv16i8_triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", , 3) %val, ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vsseg3_allonesmask_nxv16i8_triscv.vector.tuple_nxv16i8_3t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, m2, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma -; CHECK-NEXT: vsseg3e8.v v8, (a0), v0.t +; CHECK-NEXT: vsseg3e8.v v8, (a0) ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg3.mask.triscv.vector.tuple_nxv16i8_3t.nxv16i1(target("riscv.vector.tuple", , 3) %val, ptr %base, splat (i1 true), i64 %vl, i64 3) @@ -331,10 +327,8 @@ entry: define void @test_vsseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t(target("riscv.vector.tuple", , 4) %val, ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vsseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg4e8.v v8, (a0), v0.t +; CHECK-NEXT: vsseg4e8.v v8, (a0) ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, splat (i1 true), i64 %vl, i64 3) @@ -469,10 +463,8 @@ entry: define void @test_vsseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t(target("riscv.vector.tuple", , 5) %val, ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vsseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg5e8.v v8, (a0), v0.t +; CHECK-NEXT: vsseg5e8.v v8, (a0) ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, splat (i1 true), i64 %vl, i64 3) @@ -582,10 +574,8 @@ entry: define void @test_vsseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t(target("riscv.vector.tuple", , 6) %val, ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vsseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg6e8.v v8, (a0), v0.t +; CHECK-NEXT: vsseg6e8.v v8, (a0) ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, splat (i1 true), i64 %vl, i64 3) @@ -695,10 +685,8 @@ entry: define void @test_vsseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t(target("riscv.vector.tuple", , 7) %val, ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vsseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg7e8.v v8, (a0), v0.t +; CHECK-NEXT: vsseg7e8.v v8, (a0) ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, splat (i1 true), i64 %vl, i64 3) @@ -808,10 +796,8 @@ entry: define void @test_vsseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t(target("riscv.vector.tuple", , 8) %val, ptr %base, i64 %vl, %mask) { ; CHECK-LABEL: test_vsseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsseg8e8.v v8, (a0), v0.t +; CHECK-NEXT: vsseg8e8.v v8, (a0) ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, splat (i1 true), i64 %vl, i64 3) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll index 064a0437edd53..6ba2b405c943e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll @@ -53,10 +53,8 @@ entry: define void @intrinsic_vsuxei_allonesmask_v_nxv1i8_nxv1i8_nxv1i64( %0, ptr %1, %2, %3, i64 %4) nounwind { ; CHECK-LABEL: intrinsic_vsuxei_allonesmask_v_nxv1i8_nxv1i8_nxv1i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t +; CHECK-NEXT: vsuxei64.v v8, (a0), v9 ; CHECK-NEXT: ret entry: call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll index 68ecc1f7ba7ca..b297d33611242 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll @@ -30,10 +30,8 @@ entry: define void @test_vsuxseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vsuxseg2_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_2t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10, v0.t +; CHECK-NEXT: vsuxseg2ei8.v v8, (a0), v10 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg2.mask.triscv.vector.tuple_nxv1i8_2t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 2) %val, ptr %base, %index, splat (i1 true), i64 %vl, i64 3) @@ -568,10 +566,8 @@ entry: define void @test_vsuxseg3_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vsuxseg3_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_3t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11, v0.t +; CHECK-NEXT: vsuxseg3ei8.v v8, (a0), v11 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg3.mask.triscv.vector.tuple_nxv1i8_3t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 3) %val, ptr %base, %index, splat (i1 true), i64 %vl, i64 3) @@ -1056,10 +1052,8 @@ entry: define void @test_vsuxseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vsuxseg4_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_4t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12, v0.t +; CHECK-NEXT: vsuxseg4ei8.v v8, (a0), v12 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg4.mask.triscv.vector.tuple_nxv1i8_4t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 4) %val, ptr %base, %index, splat (i1 true), i64 %vl, i64 3) @@ -1544,10 +1538,8 @@ entry: define void @test_vsuxseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vsuxseg5_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_5t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13, v0.t +; CHECK-NEXT: vsuxseg5ei8.v v8, (a0), v13 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg5.mask.triscv.vector.tuple_nxv1i8_5t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 5) %val, ptr %base, %index, splat (i1 true), i64 %vl, i64 3) @@ -1957,10 +1949,8 @@ entry: define void @test_vsuxseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vsuxseg6_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_6t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14, v0.t +; CHECK-NEXT: vsuxseg6ei8.v v8, (a0), v14 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg6.mask.triscv.vector.tuple_nxv1i8_6t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 6) %val, ptr %base, %index, splat (i1 true), i64 %vl, i64 3) @@ -2370,10 +2360,8 @@ entry: define void @test_vsuxseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vsuxseg7_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_7t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15, v0.t +; CHECK-NEXT: vsuxseg7ei8.v v8, (a0), v15 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg7.mask.triscv.vector.tuple_nxv1i8_7t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 7) %val, ptr %base, %index, splat (i1 true), i64 %vl, i64 3) @@ -2783,10 +2771,8 @@ entry: define void @test_vsuxseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, i64 %vl, %mask) { ; CHECK-LABEL: test_vsuxseg8_allonesmask_nxv1i8_triscv.vector.tuple_nxv1i8_8t_nxv1i8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmset.m v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma -; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16, v0.t +; CHECK-NEXT: vsuxseg8ei8.v v8, (a0), v16 ; CHECK-NEXT: ret entry: tail call void @llvm.riscv.vsuxseg8.mask.triscv.vector.tuple_nxv1i8_8t.nxv1i8.nxv1i1(target("riscv.vector.tuple", , 8) %val, ptr %base, %index, splat (i1 true), i64 %vl, i64 3)