-
Notifications
You must be signed in to change notification settings - Fork 17
Expand file tree
/
Copy pathscript.py
More file actions
104 lines (86 loc) · 3.21 KB
/
script.py
File metadata and controls
104 lines (86 loc) · 3.21 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import os
import sys
import tarfile
import tempfile
import zipfile
import anndata as ad
import torch
from CellPLM.pipeline.cell_embedding import CellEmbeddingPipeline
from CellPLM.utils import set_seed
## VIASH START
# Note: this section is auto-generated by viash at runtime. To edit it, make changes
# in config.vsh.yaml and then run `viash config inject config.vsh.yaml`.
par = {
"input": "resources_test/.../input.h5ad",
"output": "output.h5ad",
"model": "20231027_85M",
}
meta = {"name": "cellplm"}
## VIASH END
sys.path.append(meta["resources_dir"])
from exit_codes import exit_non_applicable
from read_anndata_partial import read_anndata
set_seed(24)
device = "cuda" if torch.cuda.is_available() else "cpu"
if device == "cpu":
import warnings
warnings.warn("Loading CellPLM models requires a GPU, this run will fail")
print("\n>>> Reading input files...", flush=True)
print(f"Input H5AD file: '{par['input']}'", flush=True)
adata = read_anndata(par["input"], X="layers/counts", obs="obs", var="var", uns="uns")
if adata.uns["dataset_organism"] != "homo_sapiens":
exit_non_applicable(
f"CellPLM can only be used with human data "
f'(dataset_organism == "{adata.uns["dataset_organism"]}")'
)
print(adata, flush=True)
print("\n>>> Getting model files...", flush=True)
# Available from https://www.dropbox.com/scl/fo/i5rmxgtqzg7iykt2e9uqm/h/ckpt?dl=0&subfolder_nav_tracking=1
if os.path.isdir(par["model"]):
model_temp = None
model_dir = par["model"]
else:
model_temp = tempfile.TemporaryDirectory()
model_dir = model_temp.name
if zipfile.is_zipfile(par["model"]):
print("Extracting CellPLM models from .zip...", flush=True)
with zipfile.ZipFile(par["model"], "r") as zip_file:
zip_file.extractall(model_dir)
elif tarfile.is_tarfile(par["model"]) and par["model"].endswith(".tar.gz"):
print("Extracting CellPLM models from .tar.gz...", flush=True)
with tarfile.open(par["model"], "r:gz") as tar_file:
tar_file.extractall(model_dir)
model_dir = os.path.join(model_dir, os.listdir(model_dir)[0])
else:
raise ValueError(
"The 'model' argument should be a directory a .zip file or a .tar.gz file"
)
print(f"Model directory: '{model_dir}'", flush=True)
print("\n>>> Creating embedding model pipeline...", flush=True)
pipeline = CellEmbeddingPipeline(
pretrain_prefix=par["model_name"], pretrain_directory=model_dir
)
print("\n>>> Embedding data...", flush=True)
embedding = pipeline.predict(adata, device=device)
embedding = embedding.cpu().numpy()
print("\n>>> Storing output...", flush=True)
output = ad.AnnData(
obs=adata.obs[[]],
var=adata.var[[]],
obsm={
"X_emb": embedding,
},
uns={
"dataset_id": adata.uns["dataset_id"],
"normalization_id": adata.uns["normalization_id"],
"method_id": meta["name"],
},
)
print(output)
print("\n>>> Writing output to file...", flush=True)
print(f"Output H5AD file: '{par['output']}'", flush=True)
output.write_h5ad(par["output"], compression="gzip")
if model_temp is not None:
print("\n>>> Cleaning up temporary directories...", flush=True)
model_temp.cleanup()
print("\n>>> Done!", flush=True)