Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ MatrixAlgebraKit = "0.6"
Random = "1"
SafeTestsets = "0.1"
Strided = "2.3.3"
TensorKit = "0.16.4"
TensorKit = "0.17"
TensorOperations = "5"
Test = "1"
TestExtras = "0.2, 0.3"
Expand All @@ -57,3 +57,6 @@ TestExtras = "5ed8adda-3752-4e41-b88a-e8b09835ee3a"

[targets]
test = ["Test", "TestExtras", "Random", "Combinatorics", "SafeTestsets", "Aqua", "Adapt", "JLArrays"]

[sources]
TensorKit = {url = "https://github.com/quantumkithub/tensorkit.jl", rev = "main"}
11 changes: 11 additions & 0 deletions ext/BlockTensorKitGPUArraysExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,15 @@ function KernelAbstractions.get_backend(BA::BlockArrays.BlockArray{T, N, A}) whe
return KernelAbstractions.get_backend(first(BA.blocks))
end

function Base.copyto!(dest::BM, src::TA) where {T <: Number, TA <: AnyGPUMatrix{T}, BM <: BlockMatrix{T, Matrix{TA}}}
# TODO -- should we use Threads here to parallelize these
# transfers in streams if possible?
for block_index in Iterators.product(blockaxes(dest)...)
indices = getindex.(axes(dest), block_index)
dest_view = @view dest[block_index...]
dest_view .= src[indices...]
end
return dest
end

end
10 changes: 4 additions & 6 deletions src/linalg/factorizations.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,10 @@ for f! in (
)
@eval function MAK.$f!(t::AbstractBlockTensorMap, F, alg::AbstractAlgorithm)
TensorKit.foreachblock(t, F...) do _, (tblock, Fblocks...)
dense_block = similar_dense(tblock)
Fblocks′ = MAK.$f!(copy_dense!(dense_block, tblock), alg)
Fblocks′ = MAK.$f!(copy_dense!(similar_dense(tblock), tblock), alg)
# deal with the case where the output is not in-place
for (b′, b) in zip(Fblocks′, Fblocks)
b === b′ || copy!(b, b′)
b === b′ || copyto!(b, b′)
end
return nothing
end
Expand All @@ -45,10 +44,9 @@ for f! in (
)
@eval function MAK.$f!(t::AbstractBlockTensorMap, N, alg::AbstractAlgorithm)
TensorKit.foreachblock(t, N) do _, (tblock, Nblock)
dense_block = similar_dense(tblock)
Nblock′ = MAK.$f!(copy_dense!(dense_block, tblock), alg)
Nblock′ = MAK.$f!(copy_dense!(similar_dense(tblock), tblock), alg)
# deal with the case where the output is not the same as the input
Nblock === Nblock′ || copy!(Nblock, Nblock′)
Nblock === Nblock′ || copyto!(Nblock, Nblock′)
return nothing
end
return N
Expand Down
4 changes: 4 additions & 0 deletions src/tensors/blocktensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,10 @@ end
Base.eltype(::Type{<:BlockTensorMap{TT}}) where {TT} = TT
Base.parent(t::BlockTensorMap) = t.data

# handle this separately because the storagetype of `AbstractTensorMap` is
# *always* Vector no matter the actual data storage type
TK.storagetype(t::BlockTensorMap{AbstractTensorMap{E, S, N₁, N₂}}) where {E, S, N₁, N₂} = TK.promote_storagetype(values(t.data)...)

function Base.copyto!(
dest::BlockTensorMap, Rdest::CartesianIndices,
src::BlockTensorMap, Rsrc::CartesianIndices,
Expand Down
4 changes: 4 additions & 0 deletions src/tensors/sparseblocktensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,10 @@ VI.scalartype(::Type{<:SparseBlockTensorMap{TT}}) where {TT} = scalartype(TT)
Base.parent(t::SparseBlockTensorMap) = SparseTensorArray(t.data, space(t))
Base.eltype(::Type{<:SparseBlockTensorMap{TT}}) where {TT} = TT

# handle this separately because the storagetype of `AbstractTensorMap` is
# *always* Vector no matter the actual data storage type
TK.storagetype(t::SparseBlockTensorMap{AbstractTensorMap{E, S, N₁, N₂}}) where {E, S, N₁, N₂} = TK.promote_storagetype(values(t.data)...)

issparse(::SparseBlockTensorMap) = true
nonzero_keys(t::SparseBlockTensorMap) = keys(t.data)
nonzero_values(t::SparseBlockTensorMap) = values(t.data)
Expand Down
5 changes: 4 additions & 1 deletion src/tensors/tensoroperations.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,10 @@ for TTA in (:AbstractTensorMap, :AbstractBlockTensorMap), TTB in (:AbstractTenso
) where {N₁, N₂}
S = TK.check_spacetype(A, B)
TC′ = TK.promote_permute(TC, sectortype(S))
M = TK.promote_storagetype(TK.similarstoragetype(A, TC′), TK.similarstoragetype(B, TC′))
# explicitly compute storagetype here to work around eltype of AbstractTensorMap
MA = TK.similarstoragetype(TK.storagetype(A), TC′)
MB = TK.similarstoragetype(TK.storagetype(B), TC′)
M = TK.promote_storagetype(MA, MB)
return if issparse(A) && issparse(B)
sparseblocktensormaptype(S, N₁, N₂, M)
else
Expand Down
Loading