From 37888e49a28c8a1859c81c39e8db14ceee438d84 Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Wed, 29 Apr 2026 10:38:50 -0400 Subject: [PATCH 1/2] Incremental fixes for MPSKit --- Project.toml | 2 +- ext/BlockTensorKitGPUArraysExt.jl | 11 +++++++++++ src/linalg/factorizations.jl | 10 ++++------ src/tensors/blocktensor.jl | 4 ++++ src/tensors/sparseblocktensor.jl | 4 ++++ src/tensors/tensoroperations.jl | 5 ++++- 6 files changed, 28 insertions(+), 8 deletions(-) diff --git a/Project.toml b/Project.toml index c0c0443..26ceddb 100644 --- a/Project.toml +++ b/Project.toml @@ -36,7 +36,7 @@ MatrixAlgebraKit = "0.6" Random = "1" SafeTestsets = "0.1" Strided = "2.3.3" -TensorKit = "0.16.4" +TensorKit = "0.17" TensorOperations = "5" Test = "1" TestExtras = "0.2, 0.3" diff --git a/ext/BlockTensorKitGPUArraysExt.jl b/ext/BlockTensorKitGPUArraysExt.jl index ff5f217..2d220c0 100644 --- a/ext/BlockTensorKitGPUArraysExt.jl +++ b/ext/BlockTensorKitGPUArraysExt.jl @@ -8,4 +8,15 @@ function KernelAbstractions.get_backend(BA::BlockArrays.BlockArray{T, N, A}) whe return KernelAbstractions.get_backend(first(BA.blocks)) end +function Base.copyto!(dest::BM, src::TA) where {T <: Number, TA <: AnyGPUMatrix{T}, BM <: BlockMatrix{T, Matrix{TA}}} + # TODO -- should we use Threads here to parallelize these + # transfers in streams if possible? + for block_index in Iterators.product(blockaxes(dest)...) + indices = getindex.(axes(dest), block_index) + dest_view = @view dest[block_index...] + dest_view .= src[indices...] + end + return dest +end + end diff --git a/src/linalg/factorizations.jl b/src/linalg/factorizations.jl index 5d49f65..dffa40e 100644 --- a/src/linalg/factorizations.jl +++ b/src/linalg/factorizations.jl @@ -25,11 +25,10 @@ for f! in ( ) @eval function MAK.$f!(t::AbstractBlockTensorMap, F, alg::AbstractAlgorithm) TensorKit.foreachblock(t, F...) do _, (tblock, Fblocks...) - dense_block = similar_dense(tblock) - Fblocks′ = MAK.$f!(copy_dense!(dense_block, tblock), alg) + Fblocks′ = MAK.$f!(copy_dense!(similar_dense(tblock), tblock), alg) # deal with the case where the output is not in-place for (b′, b) in zip(Fblocks′, Fblocks) - b === b′ || copy!(b, b′) + b === b′ || copyto!(b, b′) end return nothing end @@ -45,10 +44,9 @@ for f! in ( ) @eval function MAK.$f!(t::AbstractBlockTensorMap, N, alg::AbstractAlgorithm) TensorKit.foreachblock(t, N) do _, (tblock, Nblock) - dense_block = similar_dense(tblock) - Nblock′ = MAK.$f!(copy_dense!(dense_block, tblock), alg) + Nblock′ = MAK.$f!(copy_dense!(similar_dense(tblock), tblock), alg) # deal with the case where the output is not the same as the input - Nblock === Nblock′ || copy!(Nblock, Nblock′) + Nblock === Nblock′ || copyto!(Nblock, Nblock′) return nothing end return N diff --git a/src/tensors/blocktensor.jl b/src/tensors/blocktensor.jl index 1ee6fa5..97fd0df 100644 --- a/src/tensors/blocktensor.jl +++ b/src/tensors/blocktensor.jl @@ -161,6 +161,10 @@ end Base.eltype(::Type{<:BlockTensorMap{TT}}) where {TT} = TT Base.parent(t::BlockTensorMap) = t.data +# handle this separately because the storagetype of `AbstractTensorMap` is +# *always* Vector no matter the actual data storage type +TK.storagetype(t::BlockTensorMap{AbstractTensorMap{E, S, N₁, N₂}}) where {E, S, N₁, N₂} = TK.promote_storagetype(values(t.data)...) + function Base.copyto!( dest::BlockTensorMap, Rdest::CartesianIndices, src::BlockTensorMap, Rsrc::CartesianIndices, diff --git a/src/tensors/sparseblocktensor.jl b/src/tensors/sparseblocktensor.jl index 9756824..a170b82 100644 --- a/src/tensors/sparseblocktensor.jl +++ b/src/tensors/sparseblocktensor.jl @@ -168,6 +168,10 @@ VI.scalartype(::Type{<:SparseBlockTensorMap{TT}}) where {TT} = scalartype(TT) Base.parent(t::SparseBlockTensorMap) = SparseTensorArray(t.data, space(t)) Base.eltype(::Type{<:SparseBlockTensorMap{TT}}) where {TT} = TT +# handle this separately because the storagetype of `AbstractTensorMap` is +# *always* Vector no matter the actual data storage type +TK.storagetype(t::SparseBlockTensorMap{AbstractTensorMap{E, S, N₁, N₂}}) where {E, S, N₁, N₂} = TK.promote_storagetype(values(t.data)...) + issparse(::SparseBlockTensorMap) = true nonzero_keys(t::SparseBlockTensorMap) = keys(t.data) nonzero_values(t::SparseBlockTensorMap) = values(t.data) diff --git a/src/tensors/tensoroperations.jl b/src/tensors/tensoroperations.jl index 80bb85a..3e5e766 100644 --- a/src/tensors/tensoroperations.jl +++ b/src/tensors/tensoroperations.jl @@ -31,7 +31,10 @@ for TTA in (:AbstractTensorMap, :AbstractBlockTensorMap), TTB in (:AbstractTenso ) where {N₁, N₂} S = TK.check_spacetype(A, B) TC′ = TK.promote_permute(TC, sectortype(S)) - M = TK.promote_storagetype(TK.similarstoragetype(A, TC′), TK.similarstoragetype(B, TC′)) + # explicitly compute storagetype here to work around eltype of AbstractTensorMap + MA = TK.similarstoragetype(TK.storagetype(A), TC′) + MB = TK.similarstoragetype(TK.storagetype(B), TC′) + M = TK.promote_storagetype(MA, MB) return if issparse(A) && issparse(B) sparseblocktensormaptype(S, N₁, N₂, M) else From 25bc98894573d026e09378f4c6c27543cc56de15 Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Thu, 7 May 2026 07:43:55 -0400 Subject: [PATCH 2/2] Use sources again --- Project.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Project.toml b/Project.toml index 26ceddb..e2bc95d 100644 --- a/Project.toml +++ b/Project.toml @@ -57,3 +57,6 @@ TestExtras = "5ed8adda-3752-4e41-b88a-e8b09835ee3a" [targets] test = ["Test", "TestExtras", "Random", "Combinatorics", "SafeTestsets", "Aqua", "Adapt", "JLArrays"] + +[sources] +TensorKit = {url = "https://github.com/quantumkithub/tensorkit.jl", rev = "main"}