Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions compiler/rustc_abi/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,8 @@ use bitflags::bitflags;
#[cfg(feature = "nightly")]
use rustc_data_structures::stable_hasher::StableOrd;
#[cfg(feature = "nightly")]
use rustc_error_messages::{DiagArgValue, IntoDiagArg};
#[cfg(feature = "nightly")]
use rustc_errors::{Diag, DiagCtxtHandle, Diagnostic, EmissionGuarantee, Level, msg};
use rustc_hashes::Hash64;
use rustc_index::{Idx, IndexSlice, IndexVec};
Expand Down Expand Up @@ -1775,6 +1777,24 @@ impl NumScalableVectors {
}
}

#[cfg(feature = "nightly")]
impl IntoDiagArg for NumScalableVectors {
fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue {
DiagArgValue::Str(std::borrow::Cow::Borrowed(match self.0 {
0 => panic!("`NumScalableVectors(0)` is illformed"),
1 => "one",
2 => "two",
3 => "three",
4 => "four",
5 => "five",
6 => "six",
7 => "seven",
8 => "eight",
_ => panic!("`NumScalableVectors(N)` for N>8 is illformed"),
}))
}
}

/// The way we represent values to the backend
///
/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
Expand Down
236 changes: 125 additions & 111 deletions compiler/rustc_codegen_llvm/src/intrinsic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -606,27 +606,6 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
self.pointercast(val, self.type_ptr())
}

sym::sve_cast => {
let Some((in_cnt, in_elem, in_num_vecs)) =
args[0].layout.ty.scalable_vector_parts(self.cx.tcx)
else {
bug!("input parameter to `sve_cast` was not scalable vector");
};
let out_layout = self.layout_of(fn_args.type_at(1));
let Some((out_cnt, out_elem, out_num_vecs)) =
out_layout.ty.scalable_vector_parts(self.cx.tcx)
else {
bug!("output parameter to `sve_cast` was not scalable vector");
};
assert_eq!(in_cnt, out_cnt);
assert_eq!(in_num_vecs, out_num_vecs);
let out_llty = self.backend_type(out_layout);
match simd_cast(self, sym::simd_cast, args, out_llty, in_elem, out_elem) {
Some(val) => val,
_ => bug!("could not cast scalable vectors"),
}
}

sym::sve_tuple_create2 => {
assert_matches!(
self.layout_of(fn_args.type_at(0)).backend_repr,
Expand Down Expand Up @@ -1668,6 +1647,23 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}};
}

macro_rules! require_simd_or_scalable {
($ty: expr, $variant:ident) => {{
require!(
$ty.is_simd() || $ty.is_scalable_vector(),
InvalidMonomorphization::$variant { span, name, ty: $ty }
);
if $ty.is_simd() {
let (len, ty) = $ty.simd_size_and_type(bx.tcx());
(len, ty, None)
} else {
let (count, ty, num_vecs) =
$ty.scalable_vector_parts(bx.tcx()).expect("`is_scalable_vector` was wrong");
(count as u64, ty, Some(num_vecs))
}
}};
}

/// Returns the bitwidth of the `$ty` argument if it is an `Int` or `Uint` type.
macro_rules! require_int_or_uint_ty {
($ty: expr, $diag: expr) => {
Expand Down Expand Up @@ -1787,8 +1783,19 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
return Ok(splat);
}

// every intrinsic below takes a SIMD vector as its first argument
let (in_len, in_elem) = require_simd!(args[0].layout.ty, SimdInput);
let supports_scalable = match name {
sym::simd_cast | sym::simd_select => true,
_ => false,
};

// Every intrinsic below takes a SIMD vector as its first argument. Some intrinsics also accept
// scalable vectors. `require_simd_or_scalable` is used regardless as it'll do the right thing
// for non-scalable vectors, and an additional check to prohibit scalable vectors for those
// intrinsics that do not support them is added.
if !supports_scalable {
let _ = require_simd!(args[0].layout.ty, SimdInput);
}
let (in_len, in_elem, in_num_vecs) = require_simd_or_scalable!(args[0].layout.ty, SimdInput);
let in_ty = args[0].layout.ty;

let comparison = match name {
Expand Down Expand Up @@ -1977,7 +1984,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
if name == sym::simd_select {
let m_elem_ty = in_elem;
let m_len = in_len;
let (v_len, _) = require_simd!(args[1].layout.ty, SimdArgument);
let (v_len, _, _) = require_simd_or_scalable!(args[1].layout.ty, SimdArgument);
require!(
m_len == v_len,
InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
Expand Down Expand Up @@ -2781,7 +2788,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}

if name == sym::simd_cast || name == sym::simd_as {
let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
let (out_len, out_elem, out_num_vecs) = require_simd_or_scalable!(ret_ty, SimdReturn);
require!(
in_len == out_len,
InvalidMonomorphization::ReturnLengthInputType {
Expand All @@ -2793,9 +2800,99 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
out_len
}
);
match simd_cast(bx, name, args, llret_ty, in_elem, out_elem) {
Some(val) => return Ok(val),
None => return_error!(InvalidMonomorphization::UnsupportedCast {
require!(
in_num_vecs == out_num_vecs,
InvalidMonomorphization::ReturnNumVecsInputType {
span,
name,
in_num_vecs: in_num_vecs.unwrap_or(NumScalableVectors(1)),
in_ty,
ret_ty,
out_num_vecs: out_num_vecs.unwrap_or(NumScalableVectors(1))
}
);

// Casting cares about nominal type, not just structural type
if in_elem == out_elem {
return Ok(args[0].immediate());
}

#[derive(Copy, Clone)]
enum Sign {
Unsigned,
Signed,
}
use Sign::*;

enum Style {
Float,
Int(Sign),
Unsupported,
}

let (in_style, in_width) = match in_elem.kind() {
// vectors of pointer-sized integers should've been
// disallowed before here, so this unwrap is safe.
ty::Int(i) => (
Style::Int(Signed),
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Uint(u) => (
Style::Int(Unsigned),
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0),
};
let (out_style, out_width) = match out_elem.kind() {
ty::Int(i) => (
Style::Int(Signed),
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Uint(u) => (
Style::Int(Unsigned),
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0),
};

match (in_style, out_style) {
(Style::Int(sign), Style::Int(_)) => {
return Ok(match in_width.cmp(&out_width) {
Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
Ordering::Less => match sign {
Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
},
});
}
(Style::Int(Sign::Signed), Style::Float) => {
return Ok(bx.sitofp(args[0].immediate(), llret_ty));
}
(Style::Int(Sign::Unsigned), Style::Float) => {
return Ok(bx.uitofp(args[0].immediate(), llret_ty));
}
(Style::Float, Style::Int(sign)) => {
return Ok(match (sign, name == sym::simd_as) {
(Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
(Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
(_, true) => bx.cast_float_to_int(
matches!(sign, Sign::Signed),
args[0].immediate(),
llret_ty,
),
});
}
(Style::Float, Style::Float) => {
return Ok(match in_width.cmp(&out_width) {
Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
});
}
_ => return_error!(InvalidMonomorphization::UnsupportedCast {
span,
name,
in_ty,
Expand Down Expand Up @@ -2977,86 +3074,3 @@ fn generic_simd_intrinsic<'ll, 'tcx>(

span_bug!(span, "unknown SIMD intrinsic");
}

/// Implementation of `core::intrinsics::simd_cast`, re-used by `core::scalable::sve_cast`.
fn simd_cast<'ll, 'tcx>(
bx: &mut Builder<'_, 'll, 'tcx>,
name: Symbol,
args: &[OperandRef<'tcx, &'ll Value>],
llret_ty: &'ll Type,
in_elem: Ty<'tcx>,
out_elem: Ty<'tcx>,
) -> Option<&'ll Value> {
// Casting cares about nominal type, not just structural type
if in_elem == out_elem {
return Some(args[0].immediate());
}

#[derive(Copy, Clone)]
enum Sign {
Unsigned,
Signed,
}
use Sign::*;

enum Style {
Float,
Int(Sign),
Unsupported,
}

let (in_style, in_width) = match in_elem.kind() {
// vectors of pointer-sized integers should've been
// disallowed before here, so this unwrap is safe.
ty::Int(i) => (
Style::Int(Signed),
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Uint(u) => (
Style::Int(Unsigned),
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0),
};
let (out_style, out_width) = match out_elem.kind() {
ty::Int(i) => (
Style::Int(Signed),
i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Uint(u) => (
Style::Int(Unsigned),
u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
),
ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0),
};

match (in_style, out_style) {
(Style::Int(sign), Style::Int(_)) => Some(match in_width.cmp(&out_width) {
Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
Ordering::Less => match sign {
Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
},
}),
(Style::Int(Sign::Signed), Style::Float) => Some(bx.sitofp(args[0].immediate(), llret_ty)),
(Style::Int(Sign::Unsigned), Style::Float) => {
Some(bx.uitofp(args[0].immediate(), llret_ty))
}
(Style::Float, Style::Int(sign)) => Some(match (sign, name == sym::simd_as) {
(Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
(Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
(_, true) => {
bx.cast_float_to_int(matches!(sign, Sign::Signed), args[0].immediate(), llret_ty)
}
}),
(Style::Float, Style::Float) => Some(match in_width.cmp(&out_width) {
Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
}),
_ => None,
}
}
12 changes: 12 additions & 0 deletions compiler/rustc_codegen_ssa/src/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use std::io::Error;
use std::path::{Path, PathBuf};
use std::process::ExitStatus;

use rustc_abi::NumScalableVectors;
use rustc_errors::codes::*;
use rustc_errors::{
Diag, DiagArgValue, DiagCtxtHandle, DiagSymbolList, Diagnostic, EmissionGuarantee, IntoDiagArg,
Expand Down Expand Up @@ -809,6 +810,17 @@ pub enum InvalidMonomorphization<'tcx> {
out_len: u64,
},

#[diag("invalid monomorphization of `{$name}` intrinsic: expected return type with {$in_num_vecs} vectors (same as input type `{$in_ty}`), found `{$ret_ty}` with length {$out_num_vecs}", code = E0511)]
ReturnNumVecsInputType {
#[primary_span]
span: Span,
name: Symbol,
in_num_vecs: NumScalableVectors,
in_ty: Ty<'tcx>,
ret_ty: Ty<'tcx>,
out_num_vecs: NumScalableVectors,
},

#[diag("invalid monomorphization of `{$name}` intrinsic: expected second argument with length {$in_len} (same as input type `{$in_ty}`), found `{$arg_ty}` with length {$out_len}", code = E0511)]
SecondArgumentLength {
#[primary_span]
Expand Down
21 changes: 0 additions & 21 deletions library/core/src/intrinsics/simd/scalable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,6 @@
//!
//! In this module, a "vector" is any `#[rustc_scalable_vector]`-annotated type.

/// Numerically casts a vector, elementwise.
///
/// `T` and `U` must be vectors of integers or floats, and must have the same length.
///
/// When casting floats to integers, the result is truncated. Out-of-bounds result lead to UB.
/// When casting integers to floats, the result is rounded.
/// Otherwise, truncates or extends the value, maintaining the sign for signed integers.
///
/// # Safety
/// Casting from integer types is always safe.
/// Casting between two float types is also always safe.
///
/// Casting floats to integers truncates, following the same rules as `to_int_unchecked`.
/// Specifically, each element must:
/// * Not be `NaN`
/// * Not be infinite
/// * Be representable in the return type, after truncating off its fractional part
#[rustc_intrinsic]
#[rustc_nounwind]
pub unsafe fn sve_cast<T, U>(x: T) -> U;

/// Create a tuple of two vectors.
///
/// `SVecTup` must be a scalable vector tuple (`#[rustc_scalable_vector]`) and `SVec` must be a
Expand Down
4 changes: 2 additions & 2 deletions tests/ui/scalable-vectors/cast-intrinsic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
#![allow(incomplete_features, internal_features, improper_ctypes)]
#![feature(abi_unadjusted, core_intrinsics, link_llvm_intrinsics, rustc_attrs)]

use std::intrinsics::simd::scalable::sve_cast;
use std::intrinsics::simd::simd_cast;

#[derive(Copy, Clone)]
#[rustc_scalable_vector(16)]
Expand Down Expand Up @@ -61,5 +61,5 @@ pub unsafe fn svld1sh_gather_s64offset_s64(
offsets: svint64_t,
) -> nxv2i16;
}
sve_cast(_svld1sh_gather_s64offset_s64(pg.sve_into(), base, offsets))
simd_cast(_svld1sh_gather_s64offset_s64(pg.sve_into(), base, offsets))
}
Loading
Loading