Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/coreclr/jit/hwintrinsic.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ enum HWIntrinsicCategory : uint8_t
// These are Arm64 that share some features in a given category (e.g. immediate operand value range)
HW_Category_ShiftLeftByImmediate,
HW_Category_ShiftRightByImmediate,
HW_Category_RotateByImmediate,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why can't we just use HW_Category_ShiftRightByImmediate?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was trying to differentiate the two types of modifier from the start, just in case they do need to be handled differently in future. I'm not sure if they will though, they are very similar.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

in future.

let's introduce the new flag when there is a need. For now, just reuse HW_Category_ShiftRightByImmediate

HW_Category_SIMDByIndexedElement,

// Helper intrinsics
Expand Down
5 changes: 5 additions & 0 deletions src/coreclr/jit/hwintrinsicarm64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -552,6 +552,11 @@ void HWIntrinsicInfo::lookupImmBounds(
immUpperBound = 7;
break;

case NI_Sve2_XorRotateRight:
immLowerBound = 1;
immUpperBound = genTypeSize(baseType) * BITS_PER_BYTE;
break;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is not needed. It will be handled by else if (category == HW_Category_ShiftRightByImmediate) logic above.


default:
unreached();
}
Expand Down
4 changes: 3 additions & 1 deletion src/coreclr/jit/hwintrinsiccodegenarm64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,8 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
}
}
else if ((intrin.category == HW_Category_ShiftLeftByImmediate) ||
(intrin.category == HW_Category_ShiftRightByImmediate))
(intrin.category == HW_Category_ShiftRightByImmediate) ||
(intrin.category == HW_Category_RotateByImmediate))
{
assert(hasImmediateOperand);

Expand Down Expand Up @@ -2659,6 +2660,7 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
}

case NI_Sve2_BitwiseClearXor:
case NI_Sve2_Xor:
if (targetReg != op1Reg)
{
assert(targetReg != op2Reg);
Expand Down
4 changes: 3 additions & 1 deletion src/coreclr/jit/hwintrinsiclistarm64sve.h
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,9 @@ HARDWARE_INTRINSIC(Sve, ZipLow,
#define FIRST_NI_Sve2 NI_Sve2_BitwiseClearXor
HARDWARE_INTRINSIC(Sve2, BitwiseClearXor, -1, 3, {INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_HasRMWSemantics)
HARDWARE_INTRINSIC(Sve2, ShiftLeftAndInsert, -1, 3, {INS_sve_sli, INS_sve_sli, INS_sve_sli, INS_sve_sli, INS_sve_sli, INS_sve_sli, INS_sve_sli, INS_sve_sli, INS_invalid, INS_invalid}, HW_Category_ShiftLeftByImmediate, HW_Flag_Scalable|HW_Flag_HasImmediateOperand|HW_Flag_HasRMWSemantics)
#define LAST_NI_Sve2 NI_Sve2_ShiftLeftAndInsert
HARDWARE_INTRINSIC(Sve2, Xor, -1, 3, {INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_HasRMWSemantics)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

remind me, but why we need HW_Flag_SpecialCodeGen for this one? Did you try without adding this flag and what problems we run into?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's because the instruction only accepts a D lane arrangement but the intrinsic attempts to pass B/H/S based on the BaseJitType. As it's bitwise it's OK to override it.

Maybe this could be generalized later with more flags to deal with a fixed lane arrangement, or the emitter could just be modified to be smarter about the options passed in. We'd need more space for flags if we wanted to go down the first route. For now I've just used SpecialCodeGen.

HARDWARE_INTRINSIC(Sve2, XorRotateRight, -1, 3, {INS_sve_xar, INS_sve_xar, INS_sve_xar, INS_sve_xar, INS_sve_xar, INS_sve_xar, INS_sve_xar, INS_sve_xar, INS_invalid, INS_invalid}, HW_Category_RotateByImmediate, HW_Flag_Scalable|HW_Flag_HasRMWSemantics|HW_Flag_HasImmediateOperand)
#define LAST_NI_Sve2 NI_Sve2_XorRotateRight

// ***************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************
// ISA Function name SIMD size NumArg Instructions Category Flags
Expand Down
3 changes: 2 additions & 1 deletion src/coreclr/jit/lowerarmarch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3758,7 +3758,7 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node)
const bool hasImmediateOperand = HWIntrinsicInfo::HasImmediateOperand(intrin.id);

if ((intrin.category == HW_Category_ShiftLeftByImmediate) ||
(intrin.category == HW_Category_ShiftRightByImmediate) ||
(intrin.category == HW_Category_ShiftRightByImmediate) || (intrin.category == HW_Category_RotateByImmediate) ||
((intrin.category == HW_Category_SIMDByIndexedElement) && hasImmediateOperand))
{
switch (intrin.numOperands)
Expand Down Expand Up @@ -3828,6 +3828,7 @@ void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node)
case NI_Sve_AddRotateComplex:
case NI_Sve_TrigonometricMultiplyAddCoefficient:
case NI_Sve2_ShiftLeftAndInsert:
case NI_Sve2_XorRotateRight:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i don't think we need this as well. It should get handled in intrin.category == HW_Category_ShiftRightByImmediate condition above.

assert(hasImmediateOperand);
assert(varTypeIsIntegral(intrin.op3));
if (intrin.op3->IsCnsIntOrI())
Expand Down
3 changes: 2 additions & 1 deletion src/coreclr/jit/lsraarm64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1611,7 +1611,8 @@ void LinearScan::BuildHWIntrinsicImmediate(GenTreeHWIntrinsic* intrinsicTree, co
{
if ((intrin.category == HW_Category_SIMDByIndexedElement) ||
(intrin.category == HW_Category_ShiftLeftByImmediate) ||
(intrin.category == HW_Category_ShiftRightByImmediate))
(intrin.category == HW_Category_ShiftRightByImmediate) ||
(intrin.category == HW_Category_RotateByImmediate))
{
switch (intrin.numOperands)
{
Expand Down
1 change: 1 addition & 0 deletions src/coreclr/jit/optcse.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1980,6 +1980,7 @@ bool CSE_HeuristicCommon::CanConsiderTree(GenTree* tree, bool isReturn)
case HW_Category_SIMDByIndexedElement:
case HW_Category_ShiftLeftByImmediate:
case HW_Category_ShiftRightByImmediate:
case HW_Category_RotateByImmediate:
case HW_Category_Scalar:
case HW_Category_Helper:
break;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,5 +127,107 @@ internal Arm64() { }
/// SLI Ztied1.D, Zop2.D, #imm3
/// </summary>
public static Vector<ulong> ShiftLeftAndInsert(Vector<ulong> left, Vector<ulong> right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); }


// Bitwise exclusive OR of three vectors

/// <summary>
/// svuint8_t sveor3[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<byte> Xor(Vector<byte> value1, Vector<byte> value2, Vector<byte> value3) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svint16_t sveor3[_s16](svint16_t op1, svint16_t op2, svint16_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<short> Xor(Vector<short> value1, Vector<short> value2, Vector<short> value3) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svint32_t sveor3[_s32](svint32_t op1, svint32_t op2, svint32_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<int> Xor(Vector<int> value1, Vector<int> value2, Vector<int> value3) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svint64_t sveor3[_s64](svint64_t op1, svint64_t op2, svint64_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<long> Xor(Vector<long> value1, Vector<long> value2, Vector<long> value3) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svint8_t sveor3[_s8](svint8_t op1, svint8_t op2, svint8_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<sbyte> Xor(Vector<sbyte> value1, Vector<sbyte> value2, Vector<sbyte> value3) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svuint16_t sveor3[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<ushort> Xor(Vector<ushort> value1, Vector<ushort> value2, Vector<ushort> value3) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svuint32_t sveor3[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<uint> Xor(Vector<uint> value1, Vector<uint> value2, Vector<uint> value3) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svuint64_t sveor3[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<ulong> Xor(Vector<ulong> value1, Vector<ulong> value2, Vector<ulong> value3) { throw new PlatformNotSupportedException(); }


// Bitwise exclusive OR and rotate right

/// <summary>
/// svuint8_t svxar[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3)
/// XAR Ztied1.B, Ztied1.B, Zop2.B, #imm3
/// </summary>
public static Vector<byte> XorRotateRight(Vector<byte> left, Vector<byte> right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svint16_t svxar[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3)
/// XAR Ztied1.H, Ztied1.H, Zop2.H, #imm3
/// </summary>
public static Vector<short> XorRotateRight(Vector<short> left, Vector<short> right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svint32_t svxar[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3)
/// XAR Ztied1.S, Ztied1.S, Zop2.S, #imm3
/// </summary>
public static Vector<int> XorRotateRight(Vector<int> left, Vector<int> right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svint64_t svxar[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3)
/// XAR Ztied1.D, Ztied1.D, Zop2.D, #imm3
/// </summary>
public static Vector<long> XorRotateRight(Vector<long> left, Vector<long> right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svint8_t svxar[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3)
/// XAR Ztied1.B, Ztied1.B, Zop2.B, #imm3
/// </summary>
public static Vector<sbyte> XorRotateRight(Vector<sbyte> left, Vector<sbyte> right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svuint16_t svxar[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3)
/// XAR Ztied1.H, Ztied1.H, Zop2.H, #imm3
/// </summary>
public static Vector<ushort> XorRotateRight(Vector<ushort> left, Vector<ushort> right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svuint32_t svxar[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3)
/// XAR Ztied1.S, Ztied1.S, Zop2.S, #imm3
/// </summary>
public static Vector<uint> XorRotateRight(Vector<uint> left, Vector<uint> right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); }

/// <summary>
/// svuint64_t svxar[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3)
/// XAR Ztied1.D, Ztied1.D, Zop2.D, #imm3
/// </summary>
public static Vector<ulong> XorRotateRight(Vector<ulong> left, Vector<ulong> right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); }
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -127,5 +127,107 @@ internal Arm64() { }
/// SLI Ztied1.D, Zop2.D, #imm3
/// </summary>
public static Vector<ulong> ShiftLeftAndInsert(Vector<ulong> left, Vector<ulong> right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift);


// Bitwise exclusive OR of three vectors

/// <summary>
/// svuint8_t sveor3[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<byte> Xor(Vector<byte> value1, Vector<byte> value2, Vector<byte> value3) => Xor(value1, value2, value3);

/// <summary>
/// svint16_t sveor3[_s16](svint16_t op1, svint16_t op2, svint16_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<short> Xor(Vector<short> value1, Vector<short> value2, Vector<short> value3) => Xor(value1, value2, value3);

/// <summary>
/// svint32_t sveor3[_s32](svint32_t op1, svint32_t op2, svint32_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<int> Xor(Vector<int> value1, Vector<int> value2, Vector<int> value3) => Xor(value1, value2, value3);

/// <summary>
/// svint64_t sveor3[_s64](svint64_t op1, svint64_t op2, svint64_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<long> Xor(Vector<long> value1, Vector<long> value2, Vector<long> value3) => Xor(value1, value2, value3);

/// <summary>
/// svint8_t sveor3[_s8](svint8_t op1, svint8_t op2, svint8_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<sbyte> Xor(Vector<sbyte> value1, Vector<sbyte> value2, Vector<sbyte> value3) => Xor(value1, value2, value3);

/// <summary>
/// svuint16_t sveor3[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<ushort> Xor(Vector<ushort> value1, Vector<ushort> value2, Vector<ushort> value3) => Xor(value1, value2, value3);

/// <summary>
/// svuint32_t sveor3[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<uint> Xor(Vector<uint> value1, Vector<uint> value2, Vector<uint> value3) => Xor(value1, value2, value3);

/// <summary>
/// svuint64_t sveor3[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3)
/// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D
/// </summary>
public static Vector<ulong> Xor(Vector<ulong> value1, Vector<ulong> value2, Vector<ulong> value3) => Xor(value1, value2, value3);


// Bitwise exclusive OR and rotate right

/// <summary>
/// svuint8_t svxar[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3)
/// XAR Ztied1.B, Ztied1.B, Zop2.B, #imm3
/// </summary>
public static Vector<byte> XorRotateRight(Vector<byte> left, Vector<byte> right, [ConstantExpected] byte count) => XorRotateRight(left, right, count);

/// <summary>
/// svint16_t svxar[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3)
/// XAR Ztied1.H, Ztied1.H, Zop2.H, #imm3
/// </summary>
public static Vector<short> XorRotateRight(Vector<short> left, Vector<short> right, [ConstantExpected] byte count) => XorRotateRight(left, right, count);

/// <summary>
/// svint32_t svxar[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3)
/// XAR Ztied1.S, Ztied1.S, Zop2.S, #imm3
/// </summary>
public static Vector<int> XorRotateRight(Vector<int> left, Vector<int> right, [ConstantExpected] byte count) => XorRotateRight(left, right, count);

/// <summary>
/// svint64_t svxar[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3)
/// XAR Ztied1.D, Ztied1.D, Zop2.D, #imm3
/// </summary>
public static Vector<long> XorRotateRight(Vector<long> left, Vector<long> right, [ConstantExpected] byte count) => XorRotateRight(left, right, count);

/// <summary>
/// svint8_t svxar[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3)
/// XAR Ztied1.B, Ztied1.B, Zop2.B, #imm3
/// </summary>
public static Vector<sbyte> XorRotateRight(Vector<sbyte> left, Vector<sbyte> right, [ConstantExpected] byte count) => XorRotateRight(left, right, count);

/// <summary>
/// svuint16_t svxar[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3)
/// XAR Ztied1.H, Ztied1.H, Zop2.H, #imm3
/// </summary>
public static Vector<ushort> XorRotateRight(Vector<ushort> left, Vector<ushort> right, [ConstantExpected] byte count) => XorRotateRight(left, right, count);

/// <summary>
/// svuint32_t svxar[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3)
/// XAR Ztied1.S, Ztied1.S, Zop2.S, #imm3
/// </summary>
public static Vector<uint> XorRotateRight(Vector<uint> left, Vector<uint> right, [ConstantExpected] byte count) => XorRotateRight(left, right, count);

/// <summary>
/// svuint64_t svxar[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3)
/// XAR Ztied1.D, Ztied1.D, Zop2.D, #imm3
/// </summary>
public static Vector<ulong> XorRotateRight(Vector<ulong> left, Vector<ulong> right, [ConstantExpected] byte count) => XorRotateRight(left, right, count);
}
}
Loading
Loading