Skip to content

Conversation

@nHackel
Copy link

@nHackel nHackel commented Jan 23, 2026

This PR adds direct constructors for the sparse matrix types defined in GPUArrays.jl or rather adds a function for backends to generically define methods for, see also #677.

Changes:

  • Introduced a generic function for backends to define constructors for sparse formats.
  • Added direct constructors for sparse matrices with a specified format.
  • Added a GPUSparseMatrix convenience constructor that defaults to GPUSparseMatrixCOO, which in turn falls back to SparseArrays.sparse for completeness.
  • Added tests for CSC and CSR, which can also be extended to the other formats. I was unsure if the constructor tests should also assert behaviour, so far I've only checked for appropriate array types and size.

There currently is no COO and BSR implementation for JLArrays. I could add one as well, at least for the BSR I'd need to take a look at the format first or adapt from maybe CUDA.jl

@github-actions
Copy link
Contributor

Your PR requires formatting changes to meet the project's style guidelines.
Please consider running Runic (git runic master) to apply these changes.

Click here to view the suggested changes.
diff --git a/lib/JLArrays/src/JLArrays.jl b/lib/JLArrays/src/JLArrays.jl
index 3bd3571..b76a237 100644
--- a/lib/JLArrays/src/JLArrays.jl
+++ b/lib/JLArrays/src/JLArrays.jl
@@ -150,7 +150,7 @@ mutable struct JLSparseMatrixCSC{Tv, Ti} <: GPUArrays.AbstractGPUSparseMatrixCSC
         new{Tv, Ti}(colPtr, rowVal, nzVal, dims, length(nzVal))
     end
 end
-function GPUSparseMatrixCSC(colPtr::JLArray{Ti, 1}, rowVal::JLArray{Ti, 1}, nzVal::JLArray{Tv, 1}, dims::NTuple{2,<:Integer}) where {Tv, Ti <: Integer}
+function GPUSparseMatrixCSC(colPtr::JLArray{Ti, 1}, rowVal::JLArray{Ti, 1}, nzVal::JLArray{Tv, 1}, dims::NTuple{2, <:Integer}) where {Tv, Ti <: Integer}
     return JLSparseMatrixCSC(colPtr, rowVal, nzVal, dims)
 end
 function JLSparseMatrixCSC(colPtr::JLArray{Ti, 1}, rowVal::JLArray{Ti, 1}, nzVal::JLArray{Tv, 1}, dims::NTuple{2,<:Integer}) where {Tv, Ti <: Integer}
@@ -184,7 +184,7 @@ end
 function JLSparseMatrixCSR(rowPtr::JLArray{Ti, 1}, colVal::JLArray{Ti, 1}, nzVal::JLArray{Tv, 1}, dims::NTuple{2,<:Integer}) where {Tv, Ti <: Integer}
     return JLSparseMatrixCSR{Tv, Ti}(rowPtr, colVal, nzVal, dims)
 end
-function GPUSparseMatrixCSR(rowPtr::JLArray{Ti, 1}, colVal::JLArray{Ti, 1}, nzVal::JLArray{Tv, 1}, dims::NTuple{2,<:Integer}) where {Tv, Ti <: Integer}
+function GPUSparseMatrixCSR(rowPtr::JLArray{Ti, 1}, colVal::JLArray{Ti, 1}, nzVal::JLArray{Tv, 1}, dims::NTuple{2, <:Integer}) where {Tv, Ti <: Integer}
     return JLSparseMatrixCSR(rowPtr, colVal, nzVal, dims)
 end
 function SparseArrays.SparseMatrixCSC(x::JLSparseMatrixCSR) 
diff --git a/test/testsuite/sparse.jl b/test/testsuite/sparse.jl
index 146801e..ecfee75 100644
--- a/test/testsuite/sparse.jl
+++ b/test/testsuite/sparse.jl
@@ -154,11 +154,11 @@ end
 
 # Helper function to derive direct matrix formats:
 # Create colptr, rowval, nzval for m x n matrix with 3 values per column
-function csc_vectors(m::Int, n::Int, ::Type{ET}; I::Type{<:Integer}=Int32) where {ET}
+function csc_vectors(m::Int, n::Int, ::Type{ET}; I::Type{<:Integer} = Int32) where {ET}
     # Fixed, deterministic 3 nnz per column; random nz values
     colptr = Vector{I}(undef, n + 1)
     rowval = Vector{I}()
-    nzval  = Vector{ET}()
+    nzval = Vector{ET}()
 
     colptr[1] = I(1)
     nnz_acc = 0
@@ -172,29 +172,29 @@ function csc_vectors(m::Int, n::Int, ::Type{ET}; I::Type{<:Integer}=Int32) where
     end
     return colptr, rowval, nzval
 end
-function csr_vectors(m::Int, n::Int, ::Type{ET}; I::Type{<:Integer}=Int32) where {ET}
+function csr_vectors(m::Int, n::Int, ::Type{ET}; I::Type{<:Integer} = Int32) where {ET}
     # Build CSC for (n, m), then interpret as CSR for (m, n)
-    colptr_nm, rowval_nm, nzval_nm = csc_vectors(n, m, ET; I=I)
+    colptr_nm, rowval_nm, nzval_nm = csc_vectors(n, m, ET; I = I)
     rowptr = colptr_nm
     colind = rowval_nm
-    nzval  = nzval_nm
+    nzval = nzval_nm
     return rowptr, colind, nzval
 end
 # Construct appropriate sparse arrays
-function construct_sparse_matrix(AT::Type{<:GPUArrays.AbstractGPUSparseMatrixCSC}, ::Type{ET}, m::Int, n::Int; I::Type{<:Integer}=Int32) where {ET}
-    colptr, rowval, nzval = csc_vectors(m, n, ET; I=I)
+function construct_sparse_matrix(AT::Type{<:GPUArrays.AbstractGPUSparseMatrixCSC}, ::Type{ET}, m::Int, n::Int; I::Type{<:Integer} = Int32) where {ET}
+    colptr, rowval, nzval = csc_vectors(m, n, ET; I = I)
     dense_AT = GPUArrays.dense_array_type(AT)
     d_colptr = dense_AT(colptr)
     d_rowval = dense_AT(rowval)
-    d_nzval  = dense_AT(nzval)
+    d_nzval = dense_AT(nzval)
     return GPUSparseMatrixCSC(d_colptr, d_rowval, d_nzval, (m, n))
 end
-function construct_sparse_matrix(AT::Type{<:GPUArrays.AbstractGPUSparseMatrixCSR}, ::Type{ET}, m::Int, n::Int; I::Type{<:Integer}=Int32) where {ET}
-    rowptr, colind, nzval = csr_vectors(m, n, ET; I=I)
+function construct_sparse_matrix(AT::Type{<:GPUArrays.AbstractGPUSparseMatrixCSR}, ::Type{ET}, m::Int, n::Int; I::Type{<:Integer} = Int32) where {ET}
+    rowptr, colind, nzval = csr_vectors(m, n, ET; I = I)
     dense_AT = GPUArrays.dense_array_type(AT)
     d_rowptr = dense_AT(rowptr)
     d_colind = dense_AT(colind)
-    d_nzval  = dense_AT(nzval)
+    d_nzval = dense_AT(nzval)
     return GPUSparseMatrixCSR(d_rowptr, d_colind, d_nzval, (m, n))
 end
 function direct_vector_construction(AT::Type{<:GPUArrays.AbstractGPUSparseMatrix}, eltypes)
@@ -205,6 +205,7 @@ function direct_vector_construction(AT::Type{<:GPUArrays.AbstractGPUSparseMatrix
         @test x isa AT{ET}
         @test size(x) == (m, n)
     end
+    return
 end
 function direct_vector_construction(AT, eltypes)
     # NOP

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

None yet

Projects

None yet

Development

Successfully merging this pull request may close these issues.

1 participant