diff --git a/crates/core_arch/src/aarch64/neon/generated.rs b/crates/core_arch/src/aarch64/neon/generated.rs index 44d83d0131..9dc1b049d2 100644 --- a/crates/core_arch/src/aarch64/neon/generated.rs +++ b/crates/core_arch/src/aarch64/neon/generated.rs @@ -4672,7 +4672,7 @@ pub unsafe fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { /// Polynomial multiply long #[inline] -#[target_feature(enable = "neon,crypto")] +#[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(pmull))] pub unsafe fn vmull_p64(a: p64, b: p64) -> p128 { #[allow(improper_ctypes)] @@ -4695,7 +4695,7 @@ pub unsafe fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t { /// Polynomial multiply long #[inline] -#[target_feature(enable = "neon,crypto")] +#[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(pmull))] pub unsafe fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 { vmull_p64(simd_extract(a, 1), simd_extract(b, 1)) diff --git a/crates/core_arch/src/arm_shared/crypto.rs b/crates/core_arch/src/arm_shared/crypto.rs index 19d9190917..4cdebb1da4 100644 --- a/crates/core_arch/src/arm_shared/crypto.rs +++ b/crates/core_arch/src/arm_shared/crypto.rs @@ -51,17 +51,12 @@ extern "C" { #[cfg(test)] use stdarch_test::assert_instr; -// Rust compilers without 8a57820bca64a252489790a57cb5ea23db6f9198 need crypto (hence the bootstrap check) -// LLVM builds without b8baa2a9132498ea286dbb0d03f005760ecc6fdb need crypto for arm (hence the target_arch check) +// TODO: Use AES for ARM when the minimum LLVM version includes b8baa2a9132498ea286dbb0d03f005760ecc6fdb /// AES single round encryption. #[inline] -#[cfg_attr(any(bootstrap, target_arch = "arm"), target_feature(enable = "crypto"))] -#[cfg_attr( - not(any(bootstrap, target_arch = "arm")), - target_feature(enable = "aes") -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(test, assert_instr(aese))] pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { vaeseq_u8_(data, key) @@ -69,12 +64,8 @@ pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { /// AES single round decryption. #[inline] -#[cfg_attr(any(bootstrap, target_arch = "arm"), target_feature(enable = "crypto"))] -#[cfg_attr( - not(any(bootstrap, target_arch = "arm")), - target_feature(enable = "aes") -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(test, assert_instr(aesd))] pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { vaesdq_u8_(data, key) @@ -82,12 +73,8 @@ pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { /// AES mix columns. #[inline] -#[cfg_attr(any(bootstrap, target_arch = "arm"), target_feature(enable = "crypto"))] -#[cfg_attr( - not(any(bootstrap, target_arch = "arm")), - target_feature(enable = "aes") -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(test, assert_instr(aesmc))] pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { vaesmcq_u8_(data) @@ -95,12 +82,8 @@ pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { /// AES inverse mix columns. #[inline] -#[cfg_attr(any(bootstrap, target_arch = "arm"), target_feature(enable = "crypto"))] -#[cfg_attr( - not(any(bootstrap, target_arch = "arm")), - target_feature(enable = "aes") -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "aes"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(test, assert_instr(aesimc))] pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { vaesimcq_u8_(data) @@ -108,12 +91,8 @@ pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { /// SHA1 fixed rotate. #[inline] -#[cfg_attr(any(bootstrap, target_arch = "arm"), target_feature(enable = "crypto"))] -#[cfg_attr( - not(any(bootstrap, target_arch = "arm")), - target_feature(enable = "sha2") -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(test, assert_instr(sha1h))] pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 { vsha1h_u32_(hash_e) @@ -121,12 +100,8 @@ pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 { /// SHA1 hash update accelerator, choose. #[inline] -#[cfg_attr(any(bootstrap, target_arch = "arm"), target_feature(enable = "crypto"))] -#[cfg_attr( - not(any(bootstrap, target_arch = "arm")), - target_feature(enable = "sha2") -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(test, assert_instr(sha1c))] pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { vsha1cq_u32_(hash_abcd, hash_e, wk) @@ -134,12 +109,8 @@ pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> /// SHA1 hash update accelerator, majority. #[inline] -#[cfg_attr(any(bootstrap, target_arch = "arm"), target_feature(enable = "crypto"))] -#[cfg_attr( - not(any(bootstrap, target_arch = "arm")), - target_feature(enable = "sha2") -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(test, assert_instr(sha1m))] pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { vsha1mq_u32_(hash_abcd, hash_e, wk) @@ -147,12 +118,8 @@ pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> /// SHA1 hash update accelerator, parity. #[inline] -#[cfg_attr(any(bootstrap, target_arch = "arm"), target_feature(enable = "crypto"))] -#[cfg_attr( - not(any(bootstrap, target_arch = "arm")), - target_feature(enable = "sha2") -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(test, assert_instr(sha1p))] pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t { vsha1pq_u32_(hash_abcd, hash_e, wk) @@ -160,12 +127,8 @@ pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> /// SHA1 schedule update accelerator, first part. #[inline] -#[cfg_attr(any(bootstrap, target_arch = "arm"), target_feature(enable = "crypto"))] -#[cfg_attr( - not(any(bootstrap, target_arch = "arm")), - target_feature(enable = "sha2") -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(test, assert_instr(sha1su0))] pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t { vsha1su0q_u32_(w0_3, w4_7, w8_11) @@ -173,12 +136,8 @@ pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_ /// SHA1 schedule update accelerator, second part. #[inline] -#[cfg_attr(any(bootstrap, target_arch = "arm"), target_feature(enable = "crypto"))] -#[cfg_attr( - not(any(bootstrap, target_arch = "arm")), - target_feature(enable = "sha2") -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(test, assert_instr(sha1su1))] pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { vsha1su1q_u32_(tw0_3, w12_15) @@ -186,12 +145,8 @@ pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t /// SHA256 hash update accelerator. #[inline] -#[cfg_attr(any(bootstrap, target_arch = "arm"), target_feature(enable = "crypto"))] -#[cfg_attr( - not(any(bootstrap, target_arch = "arm")), - target_feature(enable = "sha2") -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(test, assert_instr(sha256h))] pub unsafe fn vsha256hq_u32( hash_abcd: uint32x4_t, @@ -203,12 +158,8 @@ pub unsafe fn vsha256hq_u32( /// SHA256 hash update accelerator, upper part. #[inline] -#[cfg_attr(any(bootstrap, target_arch = "arm"), target_feature(enable = "crypto"))] -#[cfg_attr( - not(any(bootstrap, target_arch = "arm")), - target_feature(enable = "sha2") -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(test, assert_instr(sha256h2))] pub unsafe fn vsha256h2q_u32( hash_efgh: uint32x4_t, @@ -220,12 +171,8 @@ pub unsafe fn vsha256h2q_u32( /// SHA256 schedule update accelerator, first part. #[inline] -#[cfg_attr(any(bootstrap, target_arch = "arm"), target_feature(enable = "crypto"))] -#[cfg_attr( - not(any(bootstrap, target_arch = "arm")), - target_feature(enable = "sha2") -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(test, assert_instr(sha256su0))] pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t { vsha256su0q_u32_(w0_3, w4_7) @@ -233,12 +180,8 @@ pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t /// SHA256 schedule update accelerator, second part. #[inline] -#[cfg_attr(any(bootstrap, target_arch = "arm"), target_feature(enable = "crypto"))] -#[cfg_attr( - not(any(bootstrap, target_arch = "arm")), - target_feature(enable = "sha2") -)] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] +#[cfg_attr(not(target_arch = "arm"), target_feature(enable = "sha2"))] +#[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(test, assert_instr(sha256su1))] pub unsafe fn vsha256su1q_u32( tw0_3: uint32x4_t, @@ -255,11 +198,8 @@ mod tests { use std::mem; use stdarch_test::simd_test; - #[cfg_attr(any(bootstrap, target_arch = "arm"), simd_test(enable = "crypto"))] - #[cfg_attr( - all(not(bootstrap), target_arch = "aarch64"), - simd_test(enable = "aes") - )] + #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))] + #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "aes"))] unsafe fn test_vaeseq_u8() { let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)); let key = mem::transmute(u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7)); @@ -272,11 +212,8 @@ mod tests { ); } - #[cfg_attr(any(bootstrap, target_arch = "arm"), simd_test(enable = "crypto"))] - #[cfg_attr( - all(not(bootstrap), target_arch = "aarch64"), - simd_test(enable = "aes") - )] + #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))] + #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "aes"))] unsafe fn test_vaesdq_u8() { let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)); let key = mem::transmute(u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7)); @@ -287,11 +224,8 @@ mod tests { ); } - #[cfg_attr(any(bootstrap, target_arch = "arm"), simd_test(enable = "crypto"))] - #[cfg_attr( - all(not(bootstrap), target_arch = "aarch64"), - simd_test(enable = "aes") - )] + #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))] + #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "aes"))] unsafe fn test_vaesmcq_u8() { let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)); let r: u8x16 = mem::transmute(vaesmcq_u8(data)); @@ -301,11 +235,8 @@ mod tests { ); } - #[cfg_attr(any(bootstrap, target_arch = "arm"), simd_test(enable = "crypto"))] - #[cfg_attr( - all(not(bootstrap), target_arch = "aarch64"), - simd_test(enable = "aes") - )] + #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))] + #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "aes"))] unsafe fn test_vaesimcq_u8() { let data = mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)); let r: u8x16 = mem::transmute(vaesimcq_u8(data)); @@ -315,15 +246,15 @@ mod tests { ); } - #[cfg_attr(any(bootstrap, target_arch = "arm"), simd_test(enable = "crypto"))] - #[cfg_attr(not(any(bootstrap, target_arch = "arm")), simd_test(enable = "sha2"))] + #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))] + #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))] unsafe fn test_vsha1h_u32() { assert_eq!(vsha1h_u32(0x1234), 0x048d); assert_eq!(vsha1h_u32(0x5678), 0x159e); } - #[cfg_attr(any(bootstrap, target_arch = "arm"), simd_test(enable = "crypto"))] - #[cfg_attr(not(any(bootstrap, target_arch = "arm")), simd_test(enable = "sha2"))] + #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))] + #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))] unsafe fn test_vsha1su0q_u32() { let r: u32x4 = mem::transmute(vsha1su0q_u32( mem::transmute(u32x4::new(0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32)), @@ -333,8 +264,8 @@ mod tests { assert_eq!(r, u32x4::new(0x9abc, 0xdef0, 0x1234, 0x5678)); } - #[cfg_attr(any(bootstrap, target_arch = "arm"), simd_test(enable = "crypto"))] - #[cfg_attr(not(any(bootstrap, target_arch = "arm")), simd_test(enable = "sha2"))] + #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))] + #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))] unsafe fn test_vsha1su1q_u32() { let r: u32x4 = mem::transmute(vsha1su1q_u32( mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), @@ -346,8 +277,8 @@ mod tests { ); } - #[cfg_attr(any(bootstrap, target_arch = "arm"), simd_test(enable = "crypto"))] - #[cfg_attr(not(any(bootstrap, target_arch = "arm")), simd_test(enable = "sha2"))] + #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))] + #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))] unsafe fn test_vsha1cq_u32() { let r: u32x4 = mem::transmute(vsha1cq_u32( mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), @@ -360,8 +291,8 @@ mod tests { ); } - #[cfg_attr(any(bootstrap, target_arch = "arm"), simd_test(enable = "crypto"))] - #[cfg_attr(not(any(bootstrap, target_arch = "arm")), simd_test(enable = "sha2"))] + #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))] + #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))] unsafe fn test_vsha1pq_u32() { let r: u32x4 = mem::transmute(vsha1pq_u32( mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), @@ -374,8 +305,8 @@ mod tests { ); } - #[cfg_attr(any(bootstrap, target_arch = "arm"), simd_test(enable = "crypto"))] - #[cfg_attr(not(any(bootstrap, target_arch = "arm")), simd_test(enable = "sha2"))] + #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))] + #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))] unsafe fn test_vsha1mq_u32() { let r: u32x4 = mem::transmute(vsha1mq_u32( mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), @@ -388,8 +319,8 @@ mod tests { ); } - #[cfg_attr(any(bootstrap, target_arch = "arm"), simd_test(enable = "crypto"))] - #[cfg_attr(not(any(bootstrap, target_arch = "arm")), simd_test(enable = "sha2"))] + #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))] + #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))] unsafe fn test_vsha256hq_u32() { let r: u32x4 = mem::transmute(vsha256hq_u32( mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), @@ -402,8 +333,8 @@ mod tests { ); } - #[cfg_attr(any(bootstrap, target_arch = "arm"), simd_test(enable = "crypto"))] - #[cfg_attr(not(any(bootstrap, target_arch = "arm")), simd_test(enable = "sha2"))] + #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))] + #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))] unsafe fn test_vsha256h2q_u32() { let r: u32x4 = mem::transmute(vsha256h2q_u32( mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), @@ -416,8 +347,8 @@ mod tests { ); } - #[cfg_attr(any(bootstrap, target_arch = "arm"), simd_test(enable = "crypto"))] - #[cfg_attr(not(any(bootstrap, target_arch = "arm")), simd_test(enable = "sha2"))] + #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))] + #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))] unsafe fn test_vsha256su0q_u32() { let r: u32x4 = mem::transmute(vsha256su0q_u32( mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), @@ -429,8 +360,8 @@ mod tests { ); } - #[cfg_attr(any(bootstrap, target_arch = "arm"), simd_test(enable = "crypto"))] - #[cfg_attr(not(any(bootstrap, target_arch = "arm")), simd_test(enable = "sha2"))] + #[cfg_attr(target_arch = "arm", simd_test(enable = "crypto"))] + #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "sha2"))] unsafe fn test_vsha256su1q_u32() { let r: u32x4 = mem::transmute(vsha256su1q_u32( mem::transmute(u32x4::new(0x1234, 0x5678, 0x9abc, 0xdef0)), diff --git a/crates/core_arch/src/arm_shared/neon/generated.rs b/crates/core_arch/src/arm_shared/neon/generated.rs index 296b86469a..b1ec4dbc5a 100644 --- a/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/crates/core_arch/src/arm_shared/neon/generated.rs @@ -2086,7 +2086,7 @@ pub unsafe fn vcreate_p16(a: u64) -> poly16x4_t { /// Insert vector element from another vector element #[inline] -#[target_feature(enable = "neon,crypto")] +#[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop))] @@ -13212,7 +13212,7 @@ pub unsafe fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_ /// Insert vector element from another vector element #[inline] -#[target_feature(enable = "neon,crypto")] +#[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] @@ -13344,7 +13344,7 @@ pub unsafe fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8 /// Insert vector element from another vector element #[inline] -#[target_feature(enable = "neon,crypto")] +#[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "crypto,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(nop, LANE = 0))] diff --git a/crates/stdarch-gen/neon.spec b/crates/stdarch-gen/neon.spec index 159419285f..7d4d878284 100644 --- a/crates/stdarch-gen/neon.spec +++ b/crates/stdarch-gen/neon.spec @@ -851,7 +851,7 @@ arm = nop generate u64:int8x8_t, u64:int16x4_t: u64:int32x2_t, u64:int64x1_t generate u64:uint8x8_t, u64:uint16x4_t: u64:uint32x2_t, u64:uint64x1_t generate u64:poly8x8_t, u64:poly16x4_t -target = crypto +target = aes generate u64:poly64x1_t /// Insert vector element from another vector element @@ -2238,7 +2238,7 @@ no-q a = 15 b = 3 validate 17 -target = crypto +target = aes aarch64 = pmull link-aarch64 = pmull64:p64:p64:p64:int8x16_t @@ -2269,7 +2269,7 @@ multi_fn = vmull-noqself-noext, {simd_extract, a, 1}, {simd_extract, b, 1} a = 1, 15 b = 1, 3 validate 17 -target = crypto +target = aes aarch64 = pmull generate poly64x2_t:poly64x2_t:p128 @@ -4582,7 +4582,7 @@ generate u8:uint8x8_t:uint8x8_t, u16:uint16x4_t:uint16x4_t generate u32:uint32x2_t:uint32x2_t, u64:uint64x1_t:uint64x1_t generate p8:poly8x8_t:poly8x8_t, p16:poly16x4_t:poly16x4_t -target = crypto +target = aes generate p64:poly64x1_t:poly64x1_t /// Insert vector element from another vector element @@ -4604,7 +4604,7 @@ generate u8:uint8x16_t:uint8x16_t, u16:uint16x8_t:uint16x8_t generate u32:uint32x4_t:uint32x4_t, u64:uint64x2_t:uint64x2_t generate p8:poly8x16_t:poly8x16_t, p16:poly16x8_t:poly16x8_t -target = crypto +target = aes generate p64:poly64x2_t:poly64x2_t /// Insert vector element from another vector element diff --git a/crates/stdarch-gen/src/main.rs b/crates/stdarch-gen/src/main.rs index 03e6f409ab..7f506acb77 100644 --- a/crates/stdarch-gen/src/main.rs +++ b/crates/stdarch-gen/src/main.rs @@ -379,7 +379,7 @@ enum TargetFeature { Default, ArmV7, FPArmV8, - Crypto, + AES, } fn type_to_global_type(t: &str) -> &str { @@ -891,7 +891,7 @@ fn gen_aarch64( Default => "neon", ArmV7 => "v7", FPArmV8 => "fp-armv8,v8", - Crypto => "neon,crypto", + AES => "neon,aes", }; let current_fn = if let Some(current_fn) = current_fn.clone() { if link_aarch64.is_some() { @@ -1341,13 +1341,13 @@ fn gen_arm( Default => "neon", ArmV7 => "neon", FPArmV8 => "neon", - Crypto => "neon,crypto", + AES => "neon,aes", }; let current_target_arm = match target { Default => "v7", ArmV7 => "v7", FPArmV8 => "fp-armv8,v8", - Crypto => "crypto,v8", + AES => "crypto,v8", // TODO: Replace with AES when the minimum LLVM version has b8baa2a9132498ea286dbb0d03f005760ecc6fdb }; let current_fn = if let Some(current_fn) = current_fn.clone() { @@ -2552,7 +2552,7 @@ mod test { Some(input) => match input.as_str() { "v7" => ArmV7, "fp-armv8" => FPArmV8, - "crypto" => Crypto, + "aes" => AES, _ => Default, }, _ => Default,