From ca6a85155b35cdb2139ff7fef69867724c022171 Mon Sep 17 00:00:00 2001 From: David Wood Date: Fri, 10 Apr 2026 05:33:13 +0000 Subject: [PATCH 1/2] cg_llvm: replace `sve_cast` with `simd_cast` Previously `sve_cast`'s implementation was abstracted to power both `sve_cast` and `simd_cast` which supported scalable and non-scalable vectors respectively. In anticipation of having to do this for another `simd_*` intrinsic, `sve_cast` is removed and `simd_cast` is changed to accept both scalable and non-scalable intrinsics, an approach that will scale better to the other intrinsics. --- compiler/rustc_abi/src/lib.rs | 20 ++ compiler/rustc_codegen_llvm/src/intrinsic.rs | 234 ++++++++++--------- compiler/rustc_codegen_ssa/src/errors.rs | 12 + library/core/src/intrinsics/simd/scalable.rs | 21 -- tests/ui/scalable-vectors/cast-intrinsic.rs | 4 +- 5 files changed, 158 insertions(+), 133 deletions(-) diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index ec6eb7e7dc106..450a93ee8481e 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -47,6 +47,8 @@ use bitflags::bitflags; #[cfg(feature = "nightly")] use rustc_data_structures::stable_hasher::StableOrd; #[cfg(feature = "nightly")] +use rustc_error_messages::{DiagArgValue, IntoDiagArg}; +#[cfg(feature = "nightly")] use rustc_errors::{Diag, DiagCtxtHandle, Diagnostic, EmissionGuarantee, Level, msg}; use rustc_hashes::Hash64; use rustc_index::{Idx, IndexSlice, IndexVec}; @@ -1775,6 +1777,24 @@ impl NumScalableVectors { } } +#[cfg(feature = "nightly")] +impl IntoDiagArg for NumScalableVectors { + fn into_diag_arg(self, _: &mut Option) -> DiagArgValue { + DiagArgValue::Str(std::borrow::Cow::Borrowed(match self.0 { + 0 => panic!("`NumScalableVectors(0)` is illformed"), + 1 => "one", + 2 => "two", + 3 => "three", + 4 => "four", + 5 => "five", + 6 => "six", + 7 => "seven", + 8 => "eight", + _ => panic!("`NumScalableVectors(N)` for N>8 is illformed"), + })) + } +} + /// The way we represent values to the backend /// /// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI. diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 3e600914d6f42..49c72aee7e00d 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -606,27 +606,6 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { self.pointercast(val, self.type_ptr()) } - sym::sve_cast => { - let Some((in_cnt, in_elem, in_num_vecs)) = - args[0].layout.ty.scalable_vector_parts(self.cx.tcx) - else { - bug!("input parameter to `sve_cast` was not scalable vector"); - }; - let out_layout = self.layout_of(fn_args.type_at(1)); - let Some((out_cnt, out_elem, out_num_vecs)) = - out_layout.ty.scalable_vector_parts(self.cx.tcx) - else { - bug!("output parameter to `sve_cast` was not scalable vector"); - }; - assert_eq!(in_cnt, out_cnt); - assert_eq!(in_num_vecs, out_num_vecs); - let out_llty = self.backend_type(out_layout); - match simd_cast(self, sym::simd_cast, args, out_llty, in_elem, out_elem) { - Some(val) => val, - _ => bug!("could not cast scalable vectors"), - } - } - sym::sve_tuple_create2 => { assert_matches!( self.layout_of(fn_args.type_at(0)).backend_repr, @@ -1668,6 +1647,23 @@ fn generic_simd_intrinsic<'ll, 'tcx>( }}; } + macro_rules! require_simd_or_scalable { + ($ty: expr, $variant:ident) => {{ + require!( + $ty.is_simd() || $ty.is_scalable_vector(), + InvalidMonomorphization::$variant { span, name, ty: $ty } + ); + if $ty.is_simd() { + let (len, ty) = $ty.simd_size_and_type(bx.tcx()); + (len, ty, None) + } else { + let (count, ty, num_vecs) = + $ty.scalable_vector_parts(bx.tcx()).expect("`is_scalable_vector` was wrong"); + (count as u64, ty, Some(num_vecs)) + } + }}; + } + /// Returns the bitwidth of the `$ty` argument if it is an `Int` or `Uint` type. macro_rules! require_int_or_uint_ty { ($ty: expr, $diag: expr) => { @@ -1787,8 +1783,19 @@ fn generic_simd_intrinsic<'ll, 'tcx>( return Ok(splat); } - // every intrinsic below takes a SIMD vector as its first argument - let (in_len, in_elem) = require_simd!(args[0].layout.ty, SimdInput); + let supports_scalable = match name { + sym::simd_cast => true, + _ => false, + }; + + // Every intrinsic below takes a SIMD vector as its first argument. Some intrinsics also accept + // scalable vectors. `require_simd_or_scalable` is used regardless as it'll do the right thing + // for non-scalable vectors, and an additional check to prohibit scalable vectors for those + // intrinsics that do not support them is added. + if !supports_scalable { + let _ = require_simd!(args[0].layout.ty, SimdInput); + } + let (in_len, in_elem, in_num_vecs) = require_simd_or_scalable!(args[0].layout.ty, SimdInput); let in_ty = args[0].layout.ty; let comparison = match name { @@ -2781,7 +2788,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } if name == sym::simd_cast || name == sym::simd_as { - let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn); + let (out_len, out_elem, out_num_vecs) = require_simd_or_scalable!(ret_ty, SimdReturn); require!( in_len == out_len, InvalidMonomorphization::ReturnLengthInputType { @@ -2793,9 +2800,99 @@ fn generic_simd_intrinsic<'ll, 'tcx>( out_len } ); - match simd_cast(bx, name, args, llret_ty, in_elem, out_elem) { - Some(val) => return Ok(val), - None => return_error!(InvalidMonomorphization::UnsupportedCast { + require!( + in_num_vecs == out_num_vecs, + InvalidMonomorphization::ReturnNumVecsInputType { + span, + name, + in_num_vecs: in_num_vecs.unwrap_or(NumScalableVectors(1)), + in_ty, + ret_ty, + out_num_vecs: out_num_vecs.unwrap_or(NumScalableVectors(1)) + } + ); + + // Casting cares about nominal type, not just structural type + if in_elem == out_elem { + return Ok(args[0].immediate()); + } + + #[derive(Copy, Clone)] + enum Sign { + Unsigned, + Signed, + } + use Sign::*; + + enum Style { + Float, + Int(Sign), + Unsupported, + } + + let (in_style, in_width) = match in_elem.kind() { + // vectors of pointer-sized integers should've been + // disallowed before here, so this unwrap is safe. + ty::Int(i) => ( + Style::Int(Signed), + i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), + ), + ty::Uint(u) => ( + Style::Int(Unsigned), + u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), + ), + ty::Float(f) => (Style::Float, f.bit_width()), + _ => (Style::Unsupported, 0), + }; + let (out_style, out_width) = match out_elem.kind() { + ty::Int(i) => ( + Style::Int(Signed), + i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), + ), + ty::Uint(u) => ( + Style::Int(Unsigned), + u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), + ), + ty::Float(f) => (Style::Float, f.bit_width()), + _ => (Style::Unsupported, 0), + }; + + match (in_style, out_style) { + (Style::Int(sign), Style::Int(_)) => { + return Ok(match in_width.cmp(&out_width) { + Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty), + Ordering::Equal => args[0].immediate(), + Ordering::Less => match sign { + Sign::Signed => bx.sext(args[0].immediate(), llret_ty), + Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty), + }, + }); + } + (Style::Int(Sign::Signed), Style::Float) => { + return Ok(bx.sitofp(args[0].immediate(), llret_ty)); + } + (Style::Int(Sign::Unsigned), Style::Float) => { + return Ok(bx.uitofp(args[0].immediate(), llret_ty)); + } + (Style::Float, Style::Int(sign)) => { + return Ok(match (sign, name == sym::simd_as) { + (Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty), + (Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty), + (_, true) => bx.cast_float_to_int( + matches!(sign, Sign::Signed), + args[0].immediate(), + llret_ty, + ), + }); + } + (Style::Float, Style::Float) => { + return Ok(match in_width.cmp(&out_width) { + Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty), + Ordering::Equal => args[0].immediate(), + Ordering::Less => bx.fpext(args[0].immediate(), llret_ty), + }); + } + _ => return_error!(InvalidMonomorphization::UnsupportedCast { span, name, in_ty, @@ -2977,86 +3074,3 @@ fn generic_simd_intrinsic<'ll, 'tcx>( span_bug!(span, "unknown SIMD intrinsic"); } - -/// Implementation of `core::intrinsics::simd_cast`, re-used by `core::scalable::sve_cast`. -fn simd_cast<'ll, 'tcx>( - bx: &mut Builder<'_, 'll, 'tcx>, - name: Symbol, - args: &[OperandRef<'tcx, &'ll Value>], - llret_ty: &'ll Type, - in_elem: Ty<'tcx>, - out_elem: Ty<'tcx>, -) -> Option<&'ll Value> { - // Casting cares about nominal type, not just structural type - if in_elem == out_elem { - return Some(args[0].immediate()); - } - - #[derive(Copy, Clone)] - enum Sign { - Unsigned, - Signed, - } - use Sign::*; - - enum Style { - Float, - Int(Sign), - Unsupported, - } - - let (in_style, in_width) = match in_elem.kind() { - // vectors of pointer-sized integers should've been - // disallowed before here, so this unwrap is safe. - ty::Int(i) => ( - Style::Int(Signed), - i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Uint(u) => ( - Style::Int(Unsigned), - u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Float(f) => (Style::Float, f.bit_width()), - _ => (Style::Unsupported, 0), - }; - let (out_style, out_width) = match out_elem.kind() { - ty::Int(i) => ( - Style::Int(Signed), - i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Uint(u) => ( - Style::Int(Unsigned), - u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(), - ), - ty::Float(f) => (Style::Float, f.bit_width()), - _ => (Style::Unsupported, 0), - }; - - match (in_style, out_style) { - (Style::Int(sign), Style::Int(_)) => Some(match in_width.cmp(&out_width) { - Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty), - Ordering::Equal => args[0].immediate(), - Ordering::Less => match sign { - Sign::Signed => bx.sext(args[0].immediate(), llret_ty), - Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty), - }, - }), - (Style::Int(Sign::Signed), Style::Float) => Some(bx.sitofp(args[0].immediate(), llret_ty)), - (Style::Int(Sign::Unsigned), Style::Float) => { - Some(bx.uitofp(args[0].immediate(), llret_ty)) - } - (Style::Float, Style::Int(sign)) => Some(match (sign, name == sym::simd_as) { - (Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty), - (Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty), - (_, true) => { - bx.cast_float_to_int(matches!(sign, Sign::Signed), args[0].immediate(), llret_ty) - } - }), - (Style::Float, Style::Float) => Some(match in_width.cmp(&out_width) { - Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty), - Ordering::Equal => args[0].immediate(), - Ordering::Less => bx.fpext(args[0].immediate(), llret_ty), - }), - _ => None, - } -} diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs index cec84f60a7b0e..8a97521feb436 100644 --- a/compiler/rustc_codegen_ssa/src/errors.rs +++ b/compiler/rustc_codegen_ssa/src/errors.rs @@ -6,6 +6,7 @@ use std::io::Error; use std::path::{Path, PathBuf}; use std::process::ExitStatus; +use rustc_abi::NumScalableVectors; use rustc_errors::codes::*; use rustc_errors::{ Diag, DiagArgValue, DiagCtxtHandle, DiagSymbolList, Diagnostic, EmissionGuarantee, IntoDiagArg, @@ -809,6 +810,17 @@ pub enum InvalidMonomorphization<'tcx> { out_len: u64, }, + #[diag("invalid monomorphization of `{$name}` intrinsic: expected return type with {$in_num_vecs} vectors (same as input type `{$in_ty}`), found `{$ret_ty}` with length {$out_num_vecs}", code = E0511)] + ReturnNumVecsInputType { + #[primary_span] + span: Span, + name: Symbol, + in_num_vecs: NumScalableVectors, + in_ty: Ty<'tcx>, + ret_ty: Ty<'tcx>, + out_num_vecs: NumScalableVectors, + }, + #[diag("invalid monomorphization of `{$name}` intrinsic: expected second argument with length {$in_len} (same as input type `{$in_ty}`), found `{$arg_ty}` with length {$out_len}", code = E0511)] SecondArgumentLength { #[primary_span] diff --git a/library/core/src/intrinsics/simd/scalable.rs b/library/core/src/intrinsics/simd/scalable.rs index b2b0fec487c08..a8984b3a2f7db 100644 --- a/library/core/src/intrinsics/simd/scalable.rs +++ b/library/core/src/intrinsics/simd/scalable.rs @@ -2,27 +2,6 @@ //! //! In this module, a "vector" is any `#[rustc_scalable_vector]`-annotated type. -/// Numerically casts a vector, elementwise. -/// -/// `T` and `U` must be vectors of integers or floats, and must have the same length. -/// -/// When casting floats to integers, the result is truncated. Out-of-bounds result lead to UB. -/// When casting integers to floats, the result is rounded. -/// Otherwise, truncates or extends the value, maintaining the sign for signed integers. -/// -/// # Safety -/// Casting from integer types is always safe. -/// Casting between two float types is also always safe. -/// -/// Casting floats to integers truncates, following the same rules as `to_int_unchecked`. -/// Specifically, each element must: -/// * Not be `NaN` -/// * Not be infinite -/// * Be representable in the return type, after truncating off its fractional part -#[rustc_intrinsic] -#[rustc_nounwind] -pub unsafe fn sve_cast(x: T) -> U; - /// Create a tuple of two vectors. /// /// `SVecTup` must be a scalable vector tuple (`#[rustc_scalable_vector]`) and `SVec` must be a diff --git a/tests/ui/scalable-vectors/cast-intrinsic.rs b/tests/ui/scalable-vectors/cast-intrinsic.rs index f2157d8bcc14b..e5d2efb0b6c85 100644 --- a/tests/ui/scalable-vectors/cast-intrinsic.rs +++ b/tests/ui/scalable-vectors/cast-intrinsic.rs @@ -4,7 +4,7 @@ #![allow(incomplete_features, internal_features, improper_ctypes)] #![feature(abi_unadjusted, core_intrinsics, link_llvm_intrinsics, rustc_attrs)] -use std::intrinsics::simd::scalable::sve_cast; +use std::intrinsics::simd::simd_cast; #[derive(Copy, Clone)] #[rustc_scalable_vector(16)] @@ -61,5 +61,5 @@ pub unsafe fn svld1sh_gather_s64offset_s64( offsets: svint64_t, ) -> nxv2i16; } - sve_cast(_svld1sh_gather_s64offset_s64(pg.sve_into(), base, offsets)) + simd_cast(_svld1sh_gather_s64offset_s64(pg.sve_into(), base, offsets)) } From 62ffc899143d19af073bcaee552a132822477a6c Mon Sep 17 00:00:00 2001 From: David Wood Date: Fri, 10 Apr 2026 05:50:58 +0000 Subject: [PATCH 2/2] cg_llvm: scalable vectors with `simd_select` Building on the previous change, support scalable vectors with `simd_select`. Previous patches already landed the necessary changes in the implementation of this intrinsic, but didn't allow scalable vector arguments to be passed in. --- compiler/rustc_codegen_llvm/src/intrinsic.rs | 4 ++-- tests/ui/scalable-vectors/select-intrinsic.rs | 22 +++++++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) create mode 100644 tests/ui/scalable-vectors/select-intrinsic.rs diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 49c72aee7e00d..3663f66f9c1fd 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -1784,7 +1784,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } let supports_scalable = match name { - sym::simd_cast => true, + sym::simd_cast | sym::simd_select => true, _ => false, }; @@ -1984,7 +1984,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if name == sym::simd_select { let m_elem_ty = in_elem; let m_len = in_len; - let (v_len, _) = require_simd!(args[1].layout.ty, SimdArgument); + let (v_len, _, _) = require_simd_or_scalable!(args[1].layout.ty, SimdArgument); require!( m_len == v_len, InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len } diff --git a/tests/ui/scalable-vectors/select-intrinsic.rs b/tests/ui/scalable-vectors/select-intrinsic.rs new file mode 100644 index 0000000000000..7ae2683b3dfef --- /dev/null +++ b/tests/ui/scalable-vectors/select-intrinsic.rs @@ -0,0 +1,22 @@ +//@ check-pass +//@ only-aarch64 +#![crate_type = "lib"] +#![allow(incomplete_features, internal_features, improper_ctypes)] +#![feature(abi_unadjusted, core_intrinsics, link_llvm_intrinsics, rustc_attrs)] + +use std::intrinsics::simd::simd_select; + +#[derive(Copy, Clone)] +#[rustc_scalable_vector(16)] +#[allow(non_camel_case_types)] +pub struct svbool_t(bool); + +#[derive(Copy, Clone)] +#[rustc_scalable_vector(16)] +#[allow(non_camel_case_types)] +pub struct svint8_t(i8); + +#[target_feature(enable = "sve")] +pub fn svsel_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { + unsafe { simd_select::(pg, op1, op2) } +}