From e2971a00397aad3531d23b866b0d0022c5035557 Mon Sep 17 00:00:00 2001 From: Clinton Ingram Date: Fri, 6 Feb 2026 16:11:27 -0800 Subject: [PATCH] accelerate uint->floating casts on pre-AVX-512 x86 --- src/coreclr/jit/compiler.h | 1 + src/coreclr/jit/decomposelongs.cpp | 19 ++-- src/coreclr/jit/flowgraph.cpp | 38 +++++++ src/coreclr/jit/importer.cpp | 24 +---- src/coreclr/jit/lowerxarch.cpp | 94 ++++++++++++++++- src/coreclr/jit/morph.cpp | 163 ++++++++++------------------- 6 files changed, 197 insertions(+), 142 deletions(-) diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 88f701d06c9f9f..a508b60ce3665d 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -6236,6 +6236,7 @@ class Compiler void fgConvertBBToThrowBB(BasicBlock* block); bool fgCastNeeded(GenTree* tree, var_types toType); + bool fgCastRequiresHelper(var_types fromType, var_types toType, bool overflow = false); void fgLoopCallTest(BasicBlock* srcBB, BasicBlock* dstBB); void fgLoopCallMark(); diff --git a/src/coreclr/jit/decomposelongs.cpp b/src/coreclr/jit/decomposelongs.cpp index cc2b66af5063d7..97ba9bb6ad53b7 100644 --- a/src/coreclr/jit/decomposelongs.cpp +++ b/src/coreclr/jit/decomposelongs.cpp @@ -138,6 +138,8 @@ GenTree* DecomposeLongs::DecomposeNode(GenTree* tree) } #if defined(FEATURE_HW_INTRINSICS) && defined(TARGET_X86) + // On x86, long->floating casts are implemented in DecomposeCast. + // Those nodes, plus any nodes that produce a long, will be examined. if (!tree->TypeIs(TYP_LONG) && !(tree->OperIs(GT_CAST) && varTypeIsLong(tree->AsCast()->CastOp()) && varTypeIsFloating(tree))) #else @@ -159,6 +161,9 @@ GenTree* DecomposeLongs::DecomposeNode(GenTree* tree) // HWIntrinsics can consume/produce a long directly, provided its source/target is memory. // Here we do a conservative check for specific cases where it is certain the load/store // can be contained. In those cases, we can skip decomposition. + // + // We also look for longs consumed directly by a long->floating cast. These can skip + // decomposition because the cast is implemented using HWIntrinsics. GenTree* user = use.User(); @@ -589,21 +594,17 @@ GenTree* DecomposeLongs::DecomposeCast(LIR::Use& use) // The sequence this creates is simply: // AVX512DQ.VL.ConvertToVector128Single(Vector128.CreateScalarUnsafe(LONG)).ToScalar() - NamedIntrinsic intrinsicId = NI_Illegal; - GenTree* srcOp = cast->CastOp(); - var_types dstType = cast->CastToType(); - var_types baseFloatingType = (dstType == TYP_FLOAT) ? TYP_FLOAT : TYP_DOUBLE; - var_types baseIntegralType = cast->IsUnsigned() ? TYP_ULONG : TYP_LONG; + NamedIntrinsic intrinsicId = NI_Illegal; + GenTree* srcOp = cast->CastOp(); assert(!cast->gtOverflow()); assert(m_compiler->compIsaSupportedDebugOnly(InstructionSet_AVX512)); intrinsicId = (dstType == TYP_FLOAT) ? NI_AVX512_ConvertToVector128Single : NI_AVX512_ConvertToVector128Double; - GenTree* createScalar = m_compiler->gtNewSimdCreateScalarUnsafeNode(TYP_SIMD16, srcOp, baseIntegralType, 16); - GenTree* convert = - m_compiler->gtNewSimdHWIntrinsicNode(TYP_SIMD16, createScalar, intrinsicId, baseIntegralType, 16); - GenTree* toScalar = m_compiler->gtNewSimdToScalarNode(dstType, convert, baseFloatingType, 16); + GenTree* createScalar = m_compiler->gtNewSimdCreateScalarUnsafeNode(TYP_SIMD16, srcOp, srcType, 16); + GenTree* convert = m_compiler->gtNewSimdHWIntrinsicNode(TYP_SIMD16, createScalar, intrinsicId, srcType, 16); + GenTree* toScalar = m_compiler->gtNewSimdToScalarNode(dstType, convert, dstType, 16); Range().InsertAfter(cast, createScalar, convert, toScalar); Range().Remove(cast); diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index f6fca0009125cb..b97880dc50be7b 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -1271,6 +1271,44 @@ bool Compiler::fgCastNeeded(GenTree* tree, var_types toType) return true; } +//------------------------------------------------------------------------------------- +// fgCastRequiresHelper: Check whether a given cast must be converted to a helper call. +// +// Arguments: +// fromType - The source type of the cast. +// toType - The target type of the cast. +// overflow - True if the cast has the GTF_OVERFLOW flag set. +// +// Return Value: +// True if the cast requires a helper call, otherwise false. +// +bool Compiler::fgCastRequiresHelper(var_types fromType, var_types toType, bool overflow /* false */) +{ + if (varTypeIsFloating(fromType) && overflow) + { + assert(varTypeIsIntegral(toType)); + return true; + } + +#if !defined(TARGET_64BIT) + if (varTypeIsFloating(fromType) && varTypeIsLong(toType)) + { + return true; + } + + if (varTypeIsLong(fromType) && varTypeIsFloating(toType)) + { +#if defined(TARGET_X86) + return !compOpportunisticallyDependsOn(InstructionSet_AVX512); +#endif // TARGET_X86 + + return true; + } +#endif // !TARGET_64BIT + + return false; +} + GenTree* Compiler::fgGetCritSectOfStaticMethod() { noway_assert(!compIsForInlining()); diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 2fa1023a1587d0..38cacea5ece469 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -8167,28 +8167,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) goto _CONV; _CONV: - // only converts from FLOAT or DOUBLE to an integer type - // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls - - if (varTypeIsFloating(lclTyp)) - { - callNode = varTypeIsLong(impStackTop().val) || - uns // uint->dbl gets turned into uint->long->dbl -#ifdef TARGET_64BIT - // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK? - // TYP_BYREF could be used as TYP_I_IMPL which is long. - // TODO-CQ: remove this when we lower casts long/ulong --> float/double - // and generate SSE2 code instead of going through helper calls. - || impStackTop().val->TypeIs(TYP_BYREF) -#endif - ; - } - else - { - callNode = varTypeIsFloating(impStackTop().val->TypeGet()); - } - - op1 = impPopStack().val; + op1 = impPopStack().val; + callNode = fgCastRequiresHelper(op1->TypeGet(), lclTyp, ovfl); impBashVarAddrsToI(op1); diff --git a/src/coreclr/jit/lowerxarch.cpp b/src/coreclr/jit/lowerxarch.cpp index eb3edc5933402f..2e9ba8b9e7aaae 100644 --- a/src/coreclr/jit/lowerxarch.cpp +++ b/src/coreclr/jit/lowerxarch.cpp @@ -827,7 +827,7 @@ void Lowering::LowerCast(GenTree* tree) GenTree* castOp = tree->AsCast()->CastOp(); var_types dstType = tree->CastToType(); - var_types srcType = castOp->TypeGet(); + var_types srcType = genActualType(castOp); // force the srcType to unsigned if GT_UNSIGNED flag is set if (tree->IsUnsigned()) @@ -844,12 +844,96 @@ void Lowering::LowerCast(GenTree* tree) // Long types should have been handled by helper call or in DecomposeLongs on x86. assert(!varTypeIsLong(dstType) || TargetArchitecture::Is64Bit); } - else if (srcType == TYP_UINT) + +#ifdef TARGET_X86 + if ((srcType == TYP_UINT) && varTypeIsFloating(dstType) && + !m_compiler->compOpportunisticallyDependsOn(InstructionSet_AVX512)) { - // uint->float casts should have an intermediate cast to long unless - // we have the EVEX unsigned conversion instructions available. - assert(dstType != TYP_FLOAT || m_compiler->canUseEvexEncodingDebugOnly()); + // Pre-AVX-512, there was no conversion instruction for uint->floating, so we emulate it + // using signed int conversion. This is necessary only on 32-bit, because x64 simply casts + // the uint up to a signed long before conversion. + // + // This logic depends on the fact that conversion from int to double is lossless. When + // converting to float, we use a double intermediate, and convert to float only after the + // double result is fixed up. This ensures the floating result is rounded correctly. + + LABELEDDISPTREERANGE("LowerCast before", BlockRange(), tree); + + LIR::Range castRange = LIR::EmptyRange(); + + // This creates the equivalent of the following C# code: + // var castResult = Sse2.ConvertScalarToVector128Double(Vector128.Zero, (int)castOp); + + GenTree* zero = m_compiler->gtNewZeroConNode(TYP_SIMD16); + GenTree* castResult = + m_compiler->gtNewSimdHWIntrinsicNode(TYP_SIMD16, zero, castOp, NI_X86Base_ConvertScalarToVector128Double, + TYP_INT, 16); + + castRange.InsertAtEnd(zero); + castRange.InsertAtEnd(castResult); + + // We will use the conversion result multiple times, so replace it with a lclVar. + LIR::Use resUse; + LIR::Use::MakeDummyUse(castRange, castResult, &resUse); + resUse.ReplaceWithLclVar(m_compiler); + castResult = resUse.Def(); + + // If the input had the MSB set, it will have converted as a negative, so we must wrap the + // result back around to positive by adding 2^32. `blendvpd` uses only the MSB of the mask + // element. + // + // This creates the equivalent of the following C# code: + // var addRes = Sse2.AddScalar(castResult, Vector128.CreateScalar(4294967296.0)); + // castResult = Sse41.BlendVariable(castResult, addRes, castResult); + + GenTreeVecCon* addCns = m_compiler->gtNewVconNode(TYP_SIMD16); + addCns->gtSimdVal.f64[0] = 4294967296.0; + + GenTree* addRes = + m_compiler->gtNewSimdHWIntrinsicNode(TYP_SIMD16, castResult, addCns, NI_X86Base_AddScalar, TYP_DOUBLE, 16); + + castRange.InsertAtEnd(addCns); + castRange.InsertAtEnd(addRes); + + GenTree* resClone1 = m_compiler->gtClone(castResult); + GenTree* resClone2 = m_compiler->gtClone(castResult); + castResult = m_compiler->gtNewSimdHWIntrinsicNode(TYP_SIMD16, resClone1, addRes, resClone2, + NI_X86Base_BlendVariable, TYP_DOUBLE, 16); + castRange.InsertAtEnd(resClone1); + castRange.InsertAtEnd(resClone2); + castRange.InsertAtEnd(castResult); + + // Convert to float if necessary, then ToScalar() the result out. + if (dstType == TYP_FLOAT) + { + castResult = m_compiler->gtNewSimdHWIntrinsicNode(TYP_SIMD16, castResult, + NI_X86Base_ConvertToVector128Single, TYP_DOUBLE, 16); + castRange.InsertAtEnd(castResult); + } + + GenTree* toScalar = m_compiler->gtNewSimdToScalarNode(dstType, castResult, dstType, 16); + castRange.InsertAtEnd(toScalar); + + LIR::ReadOnlyRange lowerRange(castRange.FirstNode(), castRange.LastNode()); + BlockRange().InsertBefore(tree, std::move(castRange)); + + LABELEDDISPTREERANGE("LowerCast after", BlockRange(), toScalar); + + LIR::Use castUse; + if (BlockRange().TryGetUse(tree, &castUse)) + { + castUse.ReplaceWith(toScalar); + } + else + { + toScalar->SetUnusedValue(); + } + + BlockRange().Remove(tree); + LowerRange(lowerRange); + return; } +#endif // TARGET_X86 if (varTypeIsFloating(srcType) && varTypeIsIntegral(dstType)) { diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index c0ce59215c821b..7f9748b503b11e 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -264,8 +264,7 @@ GenTree* Compiler::fgMorphIntoHelperCall(GenTree* tree, int helper, bool morphAr // casts for all targets. // 2. Morphs casts not supported by the target directly into helpers. // These mostly have to do with casts from and to floating point -// types, especially checked ones. Refer to the implementation for -// what specific casts need to be handled - it is a complex matrix. +// types, especially checked ones. // 3. "Casts away" the GC-ness of a tree (for CAST(nint <- byref)) via // storing the GC tree to an inline non-GC temporary. // 3. "Pushes down" truncating long -> int casts for some operations: @@ -288,27 +287,11 @@ GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) GenTree* oper = tree->CastOp(); var_types srcType = genActualType(oper); var_types dstType = tree->CastToType(); - unsigned dstSize = genTypeSize(dstType); - // See if the cast has to be done in two steps. R -> I if (varTypeIsFloating(srcType) && varTypeIsIntegral(dstType)) { - if (srcType == TYP_FLOAT -#ifdef TARGET_64BIT - // 64-bit: src = float, dst is overflow conversion. - // This goes through helper and hence src needs to be converted to double. - && tree->gtOverflow() -#else - // 32-bit: src = float, dst = int64/uint64 or overflow conversion. - && (tree->gtOverflow() || varTypeIsLong(dstType)) -#endif // TARGET_64BIT - ) - { - oper = gtNewCastNode(TYP_DOUBLE, oper, false, TYP_DOUBLE); - } - // Do we need to do it in two steps R -> I -> smallType? - if (dstSize < genTypeSize(TYP_INT)) + if (varTypeIsSmall(dstType)) { oper = gtNewCastNodeL(TYP_INT, oper, /* fromUnsigned */ false, TYP_INT); oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); @@ -318,45 +301,51 @@ GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) // CAST_OVF(BYTE <- INT) != CAST_OVF(BYTE <- UINT). assert(!tree->IsUnsigned()); } - else + else if (fgCastRequiresHelper(srcType, dstType, tree->gtOverflow())) { - if (!tree->gtOverflow()) + CorInfoHelpFunc helper = CORINFO_HELP_UNDEF; + + if (srcType == TYP_FLOAT) { -#if defined(TARGET_64BIT) || defined(TARGET_WASM) - return nullptr; -#else - if (!varTypeIsLong(dstType)) - { - return nullptr; - } + oper = gtNewCastNode(TYP_DOUBLE, oper, false, TYP_DOUBLE); + } + if (tree->gtOverflow()) + { switch (dstType) { + case TYP_INT: + helper = CORINFO_HELP_DBL2INT_OVF; + break; + case TYP_UINT: + helper = CORINFO_HELP_DBL2UINT_OVF; + break; case TYP_LONG: - return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG, oper); + helper = CORINFO_HELP_DBL2LNG_OVF; + break; case TYP_ULONG: - return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG, oper); + helper = CORINFO_HELP_DBL2ULNG_OVF; + break; default: unreached(); } -#endif // TARGET_64BIT || TARGET_WASM } else { switch (dstType) { - case TYP_INT: - return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT_OVF, oper); - case TYP_UINT: - return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT_OVF, oper); case TYP_LONG: - return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG_OVF, oper); + helper = CORINFO_HELP_DBL2LNG; + break; case TYP_ULONG: - return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG_OVF, oper); + helper = CORINFO_HELP_DBL2ULNG; + break; default: unreached(); } } + + return fgMorphCastIntoHelper(tree, helper, oper); } } @@ -376,90 +365,52 @@ GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree) } #ifndef TARGET_64BIT - // The code generation phase (for x86 & ARM32) does not handle casts - // directly from [u]long to anything other than [u]int. Insert an - // intermediate cast to native int. - else if (varTypeIsLong(srcType) && varTypeIsSmall(dstType)) + else if (varTypeIsLong(srcType)) { - oper = gtNewCastNode(TYP_I_IMPL, oper, tree->IsUnsigned(), TYP_I_IMPL); - oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); - tree->ClearUnsigned(); - tree->AsCast()->CastOp() = oper; - } -#endif //! TARGET_64BIT - -#ifdef TARGET_ARM - // converts long/ulong --> float/double casts into helper calls. - else if (varTypeIsFloating(dstType) && varTypeIsLong(srcType)) - { - CorInfoHelpFunc helper = CORINFO_HELP_UNDEF; - if (dstType == TYP_FLOAT) + // The code generation phase (for x86 & ARM32) does not handle casts + // directly from [u]long to anything other than [u]int. Insert an + // intermediate cast to native int. + if (varTypeIsSmall(dstType)) { - helper = tree->IsUnsigned() ? CORINFO_HELP_ULNG2FLT : CORINFO_HELP_LNG2FLT; + oper = gtNewCastNode(TYP_I_IMPL, oper, tree->IsUnsigned(), TYP_I_IMPL); + oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); + tree->ClearUnsigned(); + tree->AsCast()->CastOp() = oper; } - else + // Convert long/ulong --> float/double casts into helper calls if necessary. + else if (fgCastRequiresHelper(srcType, dstType)) { - helper = tree->IsUnsigned() ? CORINFO_HELP_ULNG2DBL : CORINFO_HELP_LNG2DBL; + CorInfoHelpFunc helper = CORINFO_HELP_UNDEF; + + if (dstType == TYP_FLOAT) + { + helper = tree->IsUnsigned() ? CORINFO_HELP_ULNG2FLT : CORINFO_HELP_LNG2FLT; + } + else + { + assert(dstType == TYP_DOUBLE); + helper = tree->IsUnsigned() ? CORINFO_HELP_ULNG2DBL : CORINFO_HELP_LNG2DBL; + } + + return fgMorphCastIntoHelper(tree, helper, oper); } - return fgMorphCastIntoHelper(tree, helper, oper); } -#endif // TARGET_ARM +#endif // !TARGET_64BIT #ifdef TARGET_AMD64 // Do we have to do two step U4 -> R4/8 ? // If we don't have the EVEX unsigned conversion instructions available, // we will widen to long and use signed conversion: U4 -> Long -> R4/8. - // U8 -> R4/R8 is handled directly in codegen, so we ignore it here. - else if (tree->IsUnsigned() && varTypeIsFloating(dstType)) + else if (tree->IsUnsigned() && varTypeIsInt(srcType) && varTypeIsFloating(dstType) && + !compOpportunisticallyDependsOn(InstructionSet_AVX512)) { - srcType = varTypeToUnsigned(srcType); - - if (srcType == TYP_UINT && !canUseEvexEncoding()) - { - oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG); - oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); - tree->ClearUnsigned(); - tree->CastOp() = oper; - } + oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG); + oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); + tree->ClearUnsigned(); + tree->CastOp() = oper; } #endif // TARGET_AMD64 -#ifdef TARGET_X86 -#ifdef FEATURE_HW_INTRINSICS - else if (varTypeIsLong(srcType) && varTypeIsFloating(dstType) && canUseEvexEncoding()) - { - // We can handle these casts directly using SIMD instructions. - // The transform to SIMD is done in DecomposeLongs. - return nullptr; - } -#endif // FEATURE_HW_INTRINSICS - - // Do we have to do two step U4/8 -> R4/8 ? - else if (tree->IsUnsigned() && varTypeIsFloating(dstType)) - { - srcType = varTypeToUnsigned(srcType); - - if (srcType == TYP_ULONG) - { - CorInfoHelpFunc helper = (dstType == TYP_FLOAT) ? CORINFO_HELP_ULNG2FLT : CORINFO_HELP_ULNG2DBL; - return fgMorphCastIntoHelper(tree, helper, oper); - } - else if (srcType == TYP_UINT && !canUseEvexEncoding()) - { - oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG); - oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT)); - tree->ClearUnsigned(); - - CorInfoHelpFunc helper = (dstType == TYP_FLOAT) ? CORINFO_HELP_LNG2FLT : CORINFO_HELP_LNG2DBL; - return fgMorphCastIntoHelper(tree, helper, oper); - } - } - else if (!tree->IsUnsigned() && (srcType == TYP_LONG) && varTypeIsFloating(dstType)) - { - CorInfoHelpFunc helper = (dstType == TYP_FLOAT) ? CORINFO_HELP_LNG2FLT : CORINFO_HELP_LNG2DBL; - return fgMorphCastIntoHelper(tree, helper, oper); - } -#endif // TARGET_X86 else if (varTypeIsGC(srcType) != varTypeIsGC(dstType)) { // We are casting away GC information. we would like to just