-
Notifications
You must be signed in to change notification settings - Fork 17
Expand file tree
/
Copy pathscript.py
More file actions
87 lines (70 loc) · 2.55 KB
/
script.py
File metadata and controls
87 lines (70 loc) · 2.55 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import sys
import os
import anndata as ad
import scgpt
from czbenchmarks.datasets.single_cell import SingleCellDataset
from czbenchmarks.datasets.types import Organism, DataType
from czbenchmarks.models.types import ModelType
## VIASH START
# Note: this section is auto-generated by viash at runtime. To edit it, make changes
# in config.vsh.yaml and then run `viash config inject config.vsh.yaml`.
par = {
"input": "resources_test/.../input.h5ad",
"output": "output.h5ad",
}
meta = {"name": "scgpt_czbenchmarks"}
## VIASH END
sys.path.append(meta["resources_dir"])
from read_anndata_partial import read_anndata
from exit_codes import exit_non_applicable
sys.path.append("/app")
from model import ScGPT
print(f"====== scGPT version {scgpt.__version__} (czbenchmarks) ======", flush=True)
# Check organism and exit if needed
adata_uns = read_anndata(par["input"], uns="uns")
if adata_uns.uns["dataset_organism"] != "homo_sapiens":
exit_non_applicable(
f"scGPT can only be used with human data "
f"(dataset_organism == \"{adata_uns.uns['dataset_organism']}\")"
)
del adata_uns
print("\n>>> Creating input dataset..", flush=True)
dataset = SingleCellDataset(path = par["input"], organism = Organism.HUMAN)
print(dataset)
dataset.load_data()
dataset.adata.X = dataset.adata.layers["counts"].copy()
print(dataset.adata, flush=True)
print("\n>>> Running scGPT..", flush=True)
model = ScGPT()
# Run these steps manually instead of using model.run() to avoid reloading data
print("Validating data...", flush=True)
dataset.validate()
model.validate_dataset(dataset)
print("Data validated successfully", flush=True)
print("Downloading model weights...", flush=True)
if not os.path.exists("/weights/human"):
os.makedirs("/weights/human")
model.download_model_weights(dataset)
print("Model weights downloaded successfully", flush=True)
print("Running model...", flush=True)
model.run_model(dataset)
print("Model ran successfully", flush=True)
embedding = dataset.get_output(ModelType.SCGPT, DataType.EMBEDDING)
print("\n>>> Storing output...", flush=True)
output = ad.AnnData(
obs=dataset.adata.obs[[]],
var=dataset.adata.var[[]],
obsm={
"X_emb": embedding,
},
uns={
"dataset_id": dataset.adata.uns["dataset_id"],
"normalization_id": dataset.adata.uns["normalization_id"],
"method_id": meta["name"],
},
)
print(output)
print("\n>>> Writing output to file...", flush=True)
print(f"Output H5AD file: '{par['output']}'", flush=True)
output.write_h5ad(par["output"], compression="gzip")
print("\n>>> Done!", flush=True)