From 2d7ce3cc5afd180b8f3bc06885ea7c20cbb9eb04 Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Tue, 17 Feb 2026 09:16:43 -0500 Subject: [PATCH 1/8] Add a disamgiguating conversion --- Project.toml | 4 +- src/tensors/abstractblocktensor/conversion.jl | 41 ++++++++++++------- src/tensors/tensoroperations.jl | 7 ++++ test/abstracttensor/blocktensor.jl | 9 ++++ 4 files changed, 46 insertions(+), 15 deletions(-) diff --git a/Project.toml b/Project.toml index 4c999ec..10b9d80 100644 --- a/Project.toml +++ b/Project.toml @@ -27,6 +27,7 @@ Aqua = "0.8" BlockArrays = "1" Combinatorics = "1" Compat = "4.13" +JLArrays = "0.3" LinearAlgebra = "1" MatrixAlgebraKit = "0.6" Random = "1" @@ -44,10 +45,11 @@ julia = "1.10" Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" +JLArrays = "27aeb0d3-9eb9-45fb-866b-73c2ecf80fcb" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" TestExtras = "5ed8adda-3752-4e41-b88a-e8b09835ee3a" [targets] -test = ["Test", "TestExtras", "Random", "Combinatorics", "SafeTestsets", "Aqua", "Adapt"] +test = ["Test", "TestExtras", "Random", "Combinatorics", "SafeTestsets", "Aqua", "Adapt", "JLArrays"] diff --git a/src/tensors/abstractblocktensor/conversion.jl b/src/tensors/abstractblocktensor/conversion.jl index 1108cda..a356c6d 100644 --- a/src/tensors/abstractblocktensor/conversion.jl +++ b/src/tensors/abstractblocktensor/conversion.jl @@ -1,37 +1,50 @@ # Conversion # ---------- -function Base.convert(::Type{TensorMap}, t::AbstractBlockTensorMap) - S = spacetype(t) - N₁, N₂ = numout(t), numin(t) - cod = ProductSpace{S, N₁}(oplus.(codomain(t).spaces)) - dom = ProductSpace{S, N₂}(oplus.(domain(t).spaces)) - tdst = similar(t, cod ← dom) - - issparse(t) && zerovector!(tdst) +function _copy_subblocks!(tdst, tsrc) + S = spacetype(tsrc) + N₁, N₂ = numout(tsrc), numin(tsrc) for ((f₁, f₂), arr) in subblocks(tdst) blockax = ntuple(N₁ + N₂) do i return if i <= N₁ - blockedrange(map(Base.Fix2(dim, f₁.uncoupled[i]), space(t, i))) + blockedrange(map(Base.Fix2(dim, f₁.uncoupled[i]), space(tsrc, i))) else - blockedrange(map(Base.Fix2(dim, f₂.uncoupled[i - N₁]), space(t, i)')) + blockedrange(map(Base.Fix2(dim, f₂.uncoupled[i - N₁]), space(tsrc, i)')) end end - for (k, v) in nonzero_pairs(t) + for (k, v) in nonzero_pairs(tsrc) indices = getindex.(blockax, Block.(Tuple(k))) arr_slice = arr[indices...] # need to check for empty since fusion tree pair might not be present isempty(arr_slice) || copy!(arr_slice, v[f₁, f₂]) end end + return tdst +end +function Base.convert(::Type{TensorMap}, t::AbstractBlockTensorMap) + S = spacetype(t) + N₁, N₂ = numout(t), numin(t) + cod = ProductSpace{S, N₁}(oplus.(codomain(t).spaces)) + dom = ProductSpace{S, N₂}(oplus.(domain(t).spaces)) + tdst = TensorKit.TensorMapWithStorage{scalartype(t), storagetype(t)}(undef, cod, dom) + + issparse(t) && zerovector!(tdst) + _copy_subblocks!(tdst, t) return tdst end -function Base.convert(::Type{T}, t::AbstractBlockTensorMap) where {T <: TensorMap} - tdst = convert(TensorMap, t) - return convert(T, tdst) +function Base.convert(::Type{TT}, t::AbstractBlockTensorMap) where {TT <: TensorKit.TensorMap} + S = spacetype(t) + N₁, N₂ = numout(t), numin(t) + cod = ProductSpace{S, N₁}(oplus.(codomain(t).spaces)) + dom = ProductSpace{S, N₂}(oplus.(domain(t).spaces)) + tdst = TT(undef, cod ← dom) + issparse(t) && zerovector!(tdst) + + _copy_subblocks!(tdst, t) + return tdst end function Base.convert(::Type{TT}, t::AbstractTensorMap) where {TT <: AbstractBlockTensorMap} diff --git a/src/tensors/tensoroperations.jl b/src/tensors/tensoroperations.jl index 73c0c18..1859af2 100644 --- a/src/tensors/tensoroperations.jl +++ b/src/tensors/tensoroperations.jl @@ -15,6 +15,13 @@ function TO.tensoradd_type(TC, A::AdjointBlockTensorMap, pA::Index2Tuple, conjA: return TO.tensoradd_type(TC, A', adjointtensorindices(A, pA), !conjA) end +# copy blocks back to CPU/collect them into an array +# seems necessary for GPU-backed BlockTensorMaps but +# maybe not the most efficient approach? +function TO.tensorscalar(t::AbstractBlockTensorMap{T, S, 0, 0}) where {T, S} + return prod(TO.tensorscalar, nonzero_values(t)) +end + # tensoralloc_contract # -------------------- for TTA in (:AbstractTensorMap, :AbstractBlockTensorMap), TTB in (:AbstractTensorMap, :AbstractBlockTensorMap) diff --git a/test/abstracttensor/blocktensor.jl b/test/abstracttensor/blocktensor.jl index 82ebe6e..8b518d4 100644 --- a/test/abstracttensor/blocktensor.jl +++ b/test/abstracttensor/blocktensor.jl @@ -5,6 +5,7 @@ using BlockTensorKit using Random using Combinatorics using Adapt +using JLArrays Vtr = ( SumSpace(ℂ^3), @@ -82,6 +83,14 @@ end t2″ = @inferred BlockTensorMap(t2′, W) @test t1 ≈ t1″ @test t2 ≈ t2″ + # test conversion to TensorMap that isn't backed by a Vector + jl_bt1 = rand(JLVector{T}, W) + TT = TensorKit.TensorMap{T, spacetype(t1′), numout(t1′), numin(t1′), JLVector{T}} + JLArrays.@allowscalar begin # to avoid scalar indexing error in Strided + jl_bt1′ = @constinferred convert(TT, jl_bt1) + jl_bt1″ = @inferred BlockTensorMap(jl_bt1′, W) + end + @test jl_bt1 ≈ jl_bt1″ end end From b82f333a7fdec8bb98d31250b98adea839633cb6 Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Sat, 28 Feb 2026 12:39:48 -0500 Subject: [PATCH 2/8] Add comment --- test/abstracttensor/blocktensor.jl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/abstracttensor/blocktensor.jl b/test/abstracttensor/blocktensor.jl index 8b518d4..2807fa5 100644 --- a/test/abstracttensor/blocktensor.jl +++ b/test/abstracttensor/blocktensor.jl @@ -86,6 +86,8 @@ end # test conversion to TensorMap that isn't backed by a Vector jl_bt1 = rand(JLVector{T}, W) TT = TensorKit.TensorMap{T, spacetype(t1′), numout(t1′), numin(t1′), JLVector{T}} + # The @allowscalar here can be removed once QuantumKitHub/Strided.jl#44 and + # QuantumKitHub/StridedViews.jl#31 are merged and new versions are tagged JLArrays.@allowscalar begin # to avoid scalar indexing error in Strided jl_bt1′ = @constinferred convert(TT, jl_bt1) jl_bt1″ = @inferred BlockTensorMap(jl_bt1′, W) From 32e209230d597c32676cc0ff8af5c07e73f6017b Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Mon, 2 Mar 2026 05:26:57 -0500 Subject: [PATCH 3/8] Incremental progress on getting rid of allowscalar --- Project.toml | 3 +++ test/abstracttensor/blocktensor.jl | 10 ++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/Project.toml b/Project.toml index 10b9d80..bc920f0 100644 --- a/Project.toml +++ b/Project.toml @@ -53,3 +53,6 @@ TestExtras = "5ed8adda-3752-4e41-b88a-e8b09835ee3a" [targets] test = ["Test", "TestExtras", "Random", "Combinatorics", "SafeTestsets", "Aqua", "Adapt", "JLArrays"] + +[sources] +Strided = {url = "https://github.com/QuantumKitHub/Strided.jl", rev = "ksh/jlarrays"} diff --git a/test/abstracttensor/blocktensor.jl b/test/abstracttensor/blocktensor.jl index 2807fa5..280588a 100644 --- a/test/abstracttensor/blocktensor.jl +++ b/test/abstracttensor/blocktensor.jl @@ -5,7 +5,7 @@ using BlockTensorKit using Random using Combinatorics using Adapt -using JLArrays +using Strided, JLArrays Vtr = ( SumSpace(ℂ^3), @@ -86,12 +86,10 @@ end # test conversion to TensorMap that isn't backed by a Vector jl_bt1 = rand(JLVector{T}, W) TT = TensorKit.TensorMap{T, spacetype(t1′), numout(t1′), numin(t1′), JLVector{T}} - # The @allowscalar here can be removed once QuantumKitHub/Strided.jl#44 and - # QuantumKitHub/StridedViews.jl#31 are merged and new versions are tagged - JLArrays.@allowscalar begin # to avoid scalar indexing error in Strided - jl_bt1′ = @constinferred convert(TT, jl_bt1) + jl_bt1′ = @constinferred convert(TT, jl_bt1) + JLArrays.@allowscalar begin jl_bt1″ = @inferred BlockTensorMap(jl_bt1′, W) - end + end # still need some logic for copying to a BlockArray of StridedViews @test jl_bt1 ≈ jl_bt1″ end end From b18de454fa30e9b045082ea55fd4284c98b9c256 Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Mon, 2 Mar 2026 09:03:53 -0500 Subject: [PATCH 4/8] Support BlockArrays in KernelAbstractions bcasting with new extension --- Project.toml | 4 ++++ ext/BlockTensorKitGPUArraysExt.jl | 11 +++++++++++ src/tensors/blocktensor.jl | 2 +- test/abstracttensor/blocktensor.jl | 6 ++---- 4 files changed, 18 insertions(+), 5 deletions(-) create mode 100644 ext/BlockTensorKitGPUArraysExt.jl diff --git a/Project.toml b/Project.toml index bc920f0..b0b6573 100644 --- a/Project.toml +++ b/Project.toml @@ -17,9 +17,11 @@ VectorInterface = "409d34a3-91d5-4945-b6ec-7529ddf182d8" [weakdeps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +GPUArrays = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7" [extensions] BlockTensorKitAdaptExt = "Adapt" +BlockTensorKitGPUArraysExt = "GPUArrays" [compat] Adapt = "4" @@ -27,6 +29,7 @@ Aqua = "0.8" BlockArrays = "1" Combinatorics = "1" Compat = "4.13" +GPUArrays = "11.4.1" JLArrays = "0.3" LinearAlgebra = "1" MatrixAlgebraKit = "0.6" @@ -45,6 +48,7 @@ julia = "1.10" Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" +GPUArrays = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7" JLArrays = "27aeb0d3-9eb9-45fb-866b-73c2ecf80fcb" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" diff --git a/ext/BlockTensorKitGPUArraysExt.jl b/ext/BlockTensorKitGPUArraysExt.jl new file mode 100644 index 0000000..ff5f217 --- /dev/null +++ b/ext/BlockTensorKitGPUArraysExt.jl @@ -0,0 +1,11 @@ +module BlockTensorKitGPUArraysExt + +using BlockTensorKit, BlockArrays, GPUArrays, Strided +using Strided: StridedViews +using GPUArrays: KernelAbstractions + +function KernelAbstractions.get_backend(BA::BlockArrays.BlockArray{T, N, A}) where {T, N, A <: AbstractArray{<:StridedView{T, N, <:AnyGPUArray}}} + return KernelAbstractions.get_backend(first(BA.blocks)) +end + +end diff --git a/src/tensors/blocktensor.jl b/src/tensors/blocktensor.jl index 962f6ce..ce351ba 100644 --- a/src/tensors/blocktensor.jl +++ b/src/tensors/blocktensor.jl @@ -114,7 +114,7 @@ function BlockTensorMap(t::AbstractTensorMap, space::TensorMapSumSpace) TT = tensormaptype(spacetype(t), numout(t), numin(t), storagetype(t)) tdst = BlockTensorMap{TT}(undef, space) for (f₁, f₂) in fusiontrees(tdst) - tdst[f₁, f₂] .= t[f₁, f₂] + copy!(tdst[f₁, f₂], t[f₁, f₂]) end return tdst end diff --git a/test/abstracttensor/blocktensor.jl b/test/abstracttensor/blocktensor.jl index 280588a..60f035a 100644 --- a/test/abstracttensor/blocktensor.jl +++ b/test/abstracttensor/blocktensor.jl @@ -5,7 +5,7 @@ using BlockTensorKit using Random using Combinatorics using Adapt -using Strided, JLArrays +using JLArrays Vtr = ( SumSpace(ℂ^3), @@ -87,9 +87,7 @@ end jl_bt1 = rand(JLVector{T}, W) TT = TensorKit.TensorMap{T, spacetype(t1′), numout(t1′), numin(t1′), JLVector{T}} jl_bt1′ = @constinferred convert(TT, jl_bt1) - JLArrays.@allowscalar begin - jl_bt1″ = @inferred BlockTensorMap(jl_bt1′, W) - end # still need some logic for copying to a BlockArray of StridedViews + jl_bt1″ = @inferred BlockTensorMap(jl_bt1′, W) @test jl_bt1 ≈ jl_bt1″ end end From 47941cd0b9348f73b9841c8e43873c7a367fed37 Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Tue, 3 Mar 2026 04:37:47 -0500 Subject: [PATCH 5/8] Add test for different eltype --- test/abstracttensor/blocktensor.jl | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/abstracttensor/blocktensor.jl b/test/abstracttensor/blocktensor.jl index 60f035a..f5cc5d8 100644 --- a/test/abstracttensor/blocktensor.jl +++ b/test/abstracttensor/blocktensor.jl @@ -89,6 +89,18 @@ end jl_bt1′ = @constinferred convert(TT, jl_bt1) jl_bt1″ = @inferred BlockTensorMap(jl_bt1′, W) @test jl_bt1 ≈ jl_bt1″ + # test conversion to TensorMap with a different element type + t1 = rand(ComplexF32, W) + t2 = rand(ComplexF32, W) + t1′ = @constinferred convert(TensorMap, t1) + t2′ = @constinferred convert(TensorMap, t2) + @test norm(t1) ≈ norm(t1′) + @test norm(t2) ≈ norm(t2′) + @test inner(t1, t2) ≈ inner(t1′, t2′) + t1″ = @inferred BlockTensorMap(t1′, W) + t2″ = @inferred BlockTensorMap(t2′, W) + @test t1 ≈ t1″ + @test t2 ≈ t2″ end end From 797946cc5d5226b6bbeff5ffb26e1e54fc84b43c Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Tue, 3 Mar 2026 05:36:30 -0500 Subject: [PATCH 6/8] Update Project.toml --- Project.toml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/Project.toml b/Project.toml index b0b6573..6f97718 100644 --- a/Project.toml +++ b/Project.toml @@ -35,7 +35,7 @@ LinearAlgebra = "1" MatrixAlgebraKit = "0.6" Random = "1" SafeTestsets = "0.1" -Strided = "2" +Strided = "2.3.3" TensorKit = "0.16.1" TensorOperations = "5" Test = "1" @@ -57,6 +57,3 @@ TestExtras = "5ed8adda-3752-4e41-b88a-e8b09835ee3a" [targets] test = ["Test", "TestExtras", "Random", "Combinatorics", "SafeTestsets", "Aqua", "Adapt", "JLArrays"] - -[sources] -Strided = {url = "https://github.com/QuantumKitHub/Strided.jl", rev = "ksh/jlarrays"} From 5db33c58caf15859fc86f8afb7888cefd85c2eaf Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Tue, 3 Mar 2026 11:38:54 +0100 Subject: [PATCH 7/8] Update src/tensors/tensoroperations.jl Co-authored-by: Lukas Devos --- src/tensors/tensoroperations.jl | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/tensors/tensoroperations.jl b/src/tensors/tensoroperations.jl index 1859af2..80bb85a 100644 --- a/src/tensors/tensoroperations.jl +++ b/src/tensors/tensoroperations.jl @@ -15,11 +15,8 @@ function TO.tensoradd_type(TC, A::AdjointBlockTensorMap, pA::Index2Tuple, conjA: return TO.tensoradd_type(TC, A', adjointtensorindices(A, pA), !conjA) end -# copy blocks back to CPU/collect them into an array -# seems necessary for GPU-backed BlockTensorMaps but -# maybe not the most efficient approach? function TO.tensorscalar(t::AbstractBlockTensorMap{T, S, 0, 0}) where {T, S} - return prod(TO.tensorscalar, nonzero_values(t)) + return nonzero_length(t) == 0 ? zero(T) : TO.tensorscalar(only(nonzero_values(t))) end # tensoralloc_contract From 93da47df5b355d56fb5b3874695aefcaad557177 Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Tue, 3 Mar 2026 07:59:10 -0500 Subject: [PATCH 8/8] Move element type test out --- test/abstracttensor/blocktensor.jl | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/test/abstracttensor/blocktensor.jl b/test/abstracttensor/blocktensor.jl index f5cc5d8..b9ff596 100644 --- a/test/abstracttensor/blocktensor.jl +++ b/test/abstracttensor/blocktensor.jl @@ -89,19 +89,14 @@ end jl_bt1′ = @constinferred convert(TT, jl_bt1) jl_bt1″ = @inferred BlockTensorMap(jl_bt1′, W) @test jl_bt1 ≈ jl_bt1″ - # test conversion to TensorMap with a different element type - t1 = rand(ComplexF32, W) - t2 = rand(ComplexF32, W) - t1′ = @constinferred convert(TensorMap, t1) - t2′ = @constinferred convert(TensorMap, t2) - @test norm(t1) ≈ norm(t1′) - @test norm(t2) ≈ norm(t2′) - @test inner(t1, t2) ≈ inner(t1′, t2′) - t1″ = @inferred BlockTensorMap(t1′, W) - t2″ = @inferred BlockTensorMap(t2′, W) - @test t1 ≈ t1″ - @test t2 ≈ t2″ end + # test conversion to TensorMap with a different element type + t1 = rand(ComplexF32, W) + TT = TensorKit.TensorMap{ComplexF64, spacetype(t1), numout(t1), numin(t1), Vector{ComplexF64}} + t1′ = @constinferred convert(TT, t1) + @test norm(t1) ≈ norm(t1′) + t1″ = @inferred BlockTensorMap(t1′, W) + @test t1 ≈ t1″ end @testset "Adapt" begin