@@ -613,7 +613,7 @@ pub fn vaddvq_f64(a: float64x2_t) -> f64 {
613613#[stable(feature = "neon_intrinsics", since = "1.59.0")]
614614#[cfg_attr(test, assert_instr(addp))]
615615pub fn vaddv_s32(a: int32x2_t) -> i32 {
616- unsafe { simd_reduce_add_unordered(a ) }
616+ unsafe { simd_reduce_add_ordered(a, 0 ) }
617617}
618618#[doc = "Add across vector"]
619619#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"]
@@ -622,7 +622,7 @@ pub fn vaddv_s32(a: int32x2_t) -> i32 {
622622#[stable(feature = "neon_intrinsics", since = "1.59.0")]
623623#[cfg_attr(test, assert_instr(addv))]
624624pub fn vaddv_s8(a: int8x8_t) -> i8 {
625- unsafe { simd_reduce_add_unordered(a ) }
625+ unsafe { simd_reduce_add_ordered(a, 0 ) }
626626}
627627#[doc = "Add across vector"]
628628#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"]
@@ -631,7 +631,7 @@ pub fn vaddv_s8(a: int8x8_t) -> i8 {
631631#[stable(feature = "neon_intrinsics", since = "1.59.0")]
632632#[cfg_attr(test, assert_instr(addv))]
633633pub fn vaddvq_s8(a: int8x16_t) -> i8 {
634- unsafe { simd_reduce_add_unordered(a ) }
634+ unsafe { simd_reduce_add_ordered(a, 0 ) }
635635}
636636#[doc = "Add across vector"]
637637#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"]
@@ -640,7 +640,7 @@ pub fn vaddvq_s8(a: int8x16_t) -> i8 {
640640#[stable(feature = "neon_intrinsics", since = "1.59.0")]
641641#[cfg_attr(test, assert_instr(addv))]
642642pub fn vaddv_s16(a: int16x4_t) -> i16 {
643- unsafe { simd_reduce_add_unordered(a ) }
643+ unsafe { simd_reduce_add_ordered(a, 0 ) }
644644}
645645#[doc = "Add across vector"]
646646#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"]
@@ -649,7 +649,7 @@ pub fn vaddv_s16(a: int16x4_t) -> i16 {
649649#[stable(feature = "neon_intrinsics", since = "1.59.0")]
650650#[cfg_attr(test, assert_instr(addv))]
651651pub fn vaddvq_s16(a: int16x8_t) -> i16 {
652- unsafe { simd_reduce_add_unordered(a ) }
652+ unsafe { simd_reduce_add_ordered(a, 0 ) }
653653}
654654#[doc = "Add across vector"]
655655#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"]
@@ -658,7 +658,7 @@ pub fn vaddvq_s16(a: int16x8_t) -> i16 {
658658#[stable(feature = "neon_intrinsics", since = "1.59.0")]
659659#[cfg_attr(test, assert_instr(addv))]
660660pub fn vaddvq_s32(a: int32x4_t) -> i32 {
661- unsafe { simd_reduce_add_unordered(a ) }
661+ unsafe { simd_reduce_add_ordered(a, 0 ) }
662662}
663663#[doc = "Add across vector"]
664664#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"]
@@ -667,7 +667,7 @@ pub fn vaddvq_s32(a: int32x4_t) -> i32 {
667667#[stable(feature = "neon_intrinsics", since = "1.59.0")]
668668#[cfg_attr(test, assert_instr(addp))]
669669pub fn vaddv_u32(a: uint32x2_t) -> u32 {
670- unsafe { simd_reduce_add_unordered(a ) }
670+ unsafe { simd_reduce_add_ordered(a, 0 ) }
671671}
672672#[doc = "Add across vector"]
673673#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"]
@@ -676,7 +676,7 @@ pub fn vaddv_u32(a: uint32x2_t) -> u32 {
676676#[stable(feature = "neon_intrinsics", since = "1.59.0")]
677677#[cfg_attr(test, assert_instr(addv))]
678678pub fn vaddv_u8(a: uint8x8_t) -> u8 {
679- unsafe { simd_reduce_add_unordered(a ) }
679+ unsafe { simd_reduce_add_ordered(a, 0 ) }
680680}
681681#[doc = "Add across vector"]
682682#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"]
@@ -685,7 +685,7 @@ pub fn vaddv_u8(a: uint8x8_t) -> u8 {
685685#[stable(feature = "neon_intrinsics", since = "1.59.0")]
686686#[cfg_attr(test, assert_instr(addv))]
687687pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
688- unsafe { simd_reduce_add_unordered(a ) }
688+ unsafe { simd_reduce_add_ordered(a, 0 ) }
689689}
690690#[doc = "Add across vector"]
691691#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"]
@@ -694,7 +694,7 @@ pub fn vaddvq_u8(a: uint8x16_t) -> u8 {
694694#[stable(feature = "neon_intrinsics", since = "1.59.0")]
695695#[cfg_attr(test, assert_instr(addv))]
696696pub fn vaddv_u16(a: uint16x4_t) -> u16 {
697- unsafe { simd_reduce_add_unordered(a ) }
697+ unsafe { simd_reduce_add_ordered(a, 0 ) }
698698}
699699#[doc = "Add across vector"]
700700#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"]
@@ -703,7 +703,7 @@ pub fn vaddv_u16(a: uint16x4_t) -> u16 {
703703#[stable(feature = "neon_intrinsics", since = "1.59.0")]
704704#[cfg_attr(test, assert_instr(addv))]
705705pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
706- unsafe { simd_reduce_add_unordered(a ) }
706+ unsafe { simd_reduce_add_ordered(a, 0 ) }
707707}
708708#[doc = "Add across vector"]
709709#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"]
@@ -712,7 +712,7 @@ pub fn vaddvq_u16(a: uint16x8_t) -> u16 {
712712#[stable(feature = "neon_intrinsics", since = "1.59.0")]
713713#[cfg_attr(test, assert_instr(addv))]
714714pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
715- unsafe { simd_reduce_add_unordered(a ) }
715+ unsafe { simd_reduce_add_ordered(a, 0 ) }
716716}
717717#[doc = "Add across vector"]
718718#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"]
@@ -721,7 +721,7 @@ pub fn vaddvq_u32(a: uint32x4_t) -> u32 {
721721#[stable(feature = "neon_intrinsics", since = "1.59.0")]
722722#[cfg_attr(test, assert_instr(addp))]
723723pub fn vaddvq_s64(a: int64x2_t) -> i64 {
724- unsafe { simd_reduce_add_unordered(a ) }
724+ unsafe { simd_reduce_add_ordered(a, 0 ) }
725725}
726726#[doc = "Add across vector"]
727727#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"]
@@ -730,7 +730,7 @@ pub fn vaddvq_s64(a: int64x2_t) -> i64 {
730730#[stable(feature = "neon_intrinsics", since = "1.59.0")]
731731#[cfg_attr(test, assert_instr(addp))]
732732pub fn vaddvq_u64(a: uint64x2_t) -> u64 {
733- unsafe { simd_reduce_add_unordered(a ) }
733+ unsafe { simd_reduce_add_ordered(a, 0 ) }
734734}
735735#[doc = "Multi-vector floating-point absolute maximum"]
736736#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"]
@@ -15774,7 +15774,7 @@ pub fn vpadds_f32(a: float32x2_t) -> f32 {
1577415774#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1577515775#[cfg_attr(test, assert_instr(addp))]
1577615776pub fn vpaddd_s64(a: int64x2_t) -> i64 {
15777- unsafe { simd_reduce_add_unordered(a ) }
15777+ unsafe { simd_reduce_add_ordered(a, 0 ) }
1577815778}
1577915779#[doc = "Add pairwise"]
1578015780#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"]
@@ -15783,7 +15783,7 @@ pub fn vpaddd_s64(a: int64x2_t) -> i64 {
1578315783#[stable(feature = "neon_intrinsics", since = "1.59.0")]
1578415784#[cfg_attr(test, assert_instr(addp))]
1578515785pub fn vpaddd_u64(a: uint64x2_t) -> u64 {
15786- unsafe { simd_reduce_add_unordered(a ) }
15786+ unsafe { simd_reduce_add_ordered(a, 0 ) }
1578715787}
1578815788#[doc = "Floating-point add pairwise"]
1578915789#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"]
0 commit comments