Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 16 additions & 16 deletions crates/core_arch/src/aarch64/neon/generated.rs
Original file line number Diff line number Diff line change
Expand Up @@ -613,7 +613,7 @@ pub fn vaddvq_f64(a: float64x2_t) -> f64 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addp))]
pub fn vaddv_s32(a: int32x2_t) -> i32 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Add across vector"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"]
Expand All @@ -622,7 +622,7 @@ pub fn vaddv_s32(a: int32x2_t) -> i32 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addv))]
pub fn vaddv_s8(a: int8x8_t) -> i8 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Add across vector"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"]
Expand All @@ -631,7 +631,7 @@ pub fn vaddv_s8(a: int8x8_t) -> i8 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addv))]
pub fn vaddvq_s8(a: int8x16_t) -> i8 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Add across vector"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"]
Expand All @@ -640,7 +640,7 @@ pub fn vaddvq_s8(a: int8x16_t) -> i8 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addv))]
pub fn vaddv_s16(a: int16x4_t) -> i16 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Add across vector"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"]
Expand All @@ -649,7 +649,7 @@ pub fn vaddv_s16(a: int16x4_t) -> i16 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addv))]
pub fn vaddvq_s16(a: int16x8_t) -> i16 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Add across vector"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"]
Expand All @@ -658,7 +658,7 @@ pub fn vaddvq_s16(a: int16x8_t) -> i16 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addv))]
pub fn vaddvq_s32(a: int32x4_t) -> i32 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Add across vector"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"]
Expand All @@ -667,7 +667,7 @@ pub fn vaddvq_s32(a: int32x4_t) -> i32 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addp))]
pub fn vaddv_u32(a: uint32x2_t) -> u32 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Add across vector"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"]
Expand All @@ -676,7 +676,7 @@ pub fn vaddv_u32(a: uint32x2_t) -> u32 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addv))]
pub fn vaddv_u8(a: uint8x8_t) -> u8 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Add across vector"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"]
Expand All @@ -685,7 +685,7 @@ pub fn vaddv_u8(a: uint8x8_t) -> u8 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addv))]
pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Add across vector"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"]
Expand All @@ -694,7 +694,7 @@ pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addv))]
pub fn vaddv_u16(a: uint16x4_t) -> u16 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Add across vector"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"]
Expand All @@ -703,7 +703,7 @@ pub fn vaddv_u16(a: uint16x4_t) -> u16 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addv))]
pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Add across vector"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"]
Expand All @@ -712,7 +712,7 @@ pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addv))]
pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Add across vector"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"]
Expand All @@ -721,7 +721,7 @@ pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addp))]
pub fn vaddvq_s64(a: int64x2_t) -> i64 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Add across vector"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"]
Expand All @@ -730,7 +730,7 @@ pub fn vaddvq_s64(a: int64x2_t) -> i64 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addp))]
pub fn vaddvq_u64(a: uint64x2_t) -> u64 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Multi-vector floating-point absolute maximum"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"]
Expand Down Expand Up @@ -15774,7 +15774,7 @@ pub fn vpadds_f32(a: float32x2_t) -> f32 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addp))]
pub fn vpaddd_s64(a: int64x2_t) -> i64 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Add pairwise"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"]
Expand All @@ -15783,7 +15783,7 @@ pub fn vpaddd_s64(a: int64x2_t) -> i64 {
#[stable(feature = "neon_intrinsics", since = "1.59.0")]
#[cfg_attr(test, assert_instr(addp))]
pub fn vpaddd_u64(a: uint64x2_t) -> u64 {
unsafe { simd_reduce_add_unordered(a) }
unsafe { simd_reduce_add_ordered(a, 0) }
}
#[doc = "Floating-point add pairwise"]
#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"]
Expand Down
16 changes: 8 additions & 8 deletions crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13261,7 +13261,7 @@ intrinsics:
types:
- [int64x2_t, i64]
compose:
- FnCall: [simd_reduce_add_unordered, [a]]
- FnCall: [simd_reduce_add_ordered, [a, 0]]

- name: "vpaddd_u64"
doc: "Add pairwise"
Expand All @@ -13274,7 +13274,7 @@ intrinsics:
types:
- [uint64x2_t, u64]
compose:
- FnCall: [simd_reduce_add_unordered, [a]]
- FnCall: [simd_reduce_add_ordered, [a, 0]]

- name: "vaddv{neon_type[0].no}"
doc: "Add across vector"
Expand All @@ -13291,7 +13291,7 @@ intrinsics:
- [int16x8_t, i16]
- [int32x4_t, i32]
compose:
- FnCall: [simd_reduce_add_unordered, [a]]
- FnCall: [simd_reduce_add_ordered, [a, 0]]

- name: "vaddv{neon_type[0].no}"
doc: "Add across vector"
Expand All @@ -13304,7 +13304,7 @@ intrinsics:
types:
- [int32x2_t, i32]
compose:
- FnCall: [simd_reduce_add_unordered, [a]]
- FnCall: [simd_reduce_add_ordered, [a, 0]]

- name: "vaddv{neon_type[0].no}"
doc: "Add across vector"
Expand All @@ -13317,7 +13317,7 @@ intrinsics:
types:
- [int64x2_t, i64]
compose:
- FnCall: [simd_reduce_add_unordered, [a]]
- FnCall: [simd_reduce_add_ordered, [a, 0]]

- name: "vaddv{neon_type[0].no}"
doc: "Add across vector"
Expand All @@ -13334,7 +13334,7 @@ intrinsics:
- [uint16x8_t, u16]
- [uint32x4_t, u32]
compose:
- FnCall: [simd_reduce_add_unordered, [a]]
- FnCall: [simd_reduce_add_ordered, [a, 0]]

- name: "vaddv{neon_type[0].no}"
doc: "Add across vector"
Expand All @@ -13347,7 +13347,7 @@ intrinsics:
types:
- [uint32x2_t, u32, i32]
compose:
- FnCall: [simd_reduce_add_unordered, [a]]
- FnCall: [simd_reduce_add_ordered, [a, 0]]

- name: "vaddv{neon_type[0].no}"
doc: "Add across vector"
Expand All @@ -13360,7 +13360,7 @@ intrinsics:
types:
- [uint64x2_t, u64, i64]
compose:
- FnCall: [simd_reduce_add_unordered, [a]]
- FnCall: [simd_reduce_add_ordered, [a, 0]]

- name: "vaddlv{neon_type[0].no}"
doc: "Signed Add Long across Vector"
Expand Down