forked from BimberLab/DiscvrLabKeyModules
-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathAbstractCombineGeneCountsHandler.java
More file actions
444 lines (373 loc) · 17.8 KB
/
AbstractCombineGeneCountsHandler.java
File metadata and controls
444 lines (373 loc) · 17.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
package org.labkey.sequenceanalysis.analysis;
import au.com.bytecode.opencsv.CSVWriter;
import org.json.JSONObject;
import org.labkey.api.collections.CaseInsensitiveHashSet;
import org.labkey.api.module.ModuleLoader;
import org.labkey.api.pipeline.PipelineJob;
import org.labkey.api.pipeline.PipelineJobException;
import org.labkey.api.pipeline.RecordedAction;
import org.labkey.api.sequenceanalysis.SequenceAnalysisService;
import org.labkey.api.sequenceanalysis.SequenceOutputFile;
import org.labkey.api.sequenceanalysis.model.Readset;
import org.labkey.api.sequenceanalysis.pipeline.AbstractParameterizedOutputHandler;
import org.labkey.api.sequenceanalysis.pipeline.SequenceAnalysisJobSupport;
import org.labkey.api.sequenceanalysis.pipeline.SequenceOutputHandler;
import org.labkey.api.sequenceanalysis.pipeline.ToolParameterDescriptor;
import org.labkey.api.sequenceanalysis.run.GeneToNameTranslator;
import org.labkey.api.util.FileType;
import org.labkey.api.util.PageFlowUtil;
import org.labkey.api.writer.PrintWriters;
import org.labkey.sequenceanalysis.SequenceAnalysisModule;
import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
abstract public class AbstractCombineGeneCountsHandler extends AbstractParameterizedOutputHandler<SequenceOutputHandler.SequenceOutputProcessor>
{
protected static final String STRAND1 = "Strand1";
protected static final String STRAND2 = "Strand2";
protected static final String STRANDED = "Stranded";
protected static final String UNSTRANDED = "Unstranded";
protected static final String INFER = "Infer";
protected static final String ReadsetId = "ReadsetId";
protected static final String ReadsetName = "ReadsetName";
protected static final String OutputFileId = "OutputFileId";
protected static final Set<String> OTHER_IDS = PageFlowUtil.set("N_ambiguous", "N_multimapping", "N_noFeature", "N_unmapped");
private final FileType _fileType;
private final String _toolName;
public AbstractCombineGeneCountsHandler(String name, String description, boolean allowInferStranded, FileType fileType, String toolName)
{
super(ModuleLoader.getInstance().getModule(SequenceAnalysisModule.class), name, description, new LinkedHashSet<>(List.of("LDK/field/SimpleCombo.js")), getParams(allowInferStranded));
_fileType = fileType;
_toolName = toolName;
}
private static List<ToolParameterDescriptor> getParams(boolean allowInferStranded)
{
List<ToolParameterDescriptor> ret = new ArrayList<>();
ret.add(ToolParameterDescriptor.create("name", "Output Name", "This is the name that will be used to describe the output.", "textfield", new JSONObject()
{{
put("allowBlank", false);
}}, null));
ret.add(ToolParameterDescriptor.createExpDataParam("gtf", "GTF/GFF File", "The GTF/GFF file containing genes for this genome.", "sequenceanalysis-genomefileselectorfield", new JSONObject()
{{
put("extensions", Arrays.asList("gtf", "gff"));
put("width", 400);
put("allowBlank", false);
}}, null));
ret.add(ToolParameterDescriptor.create("skipGenesWithoutData", "Skip Genes Without Data", "If checked, the output table will omit any genes with zero read counts across all samples.", "checkbox", null, false));
ret.add(ToolParameterDescriptor.create("idInHeader", "Header Value", "Choose which value to use as the header/sample identifier", "ldk-simplecombo", new JSONObject(){{
put("storeValues", ReadsetId + ";" + ReadsetName + ";" + OutputFileId);
put("value", ReadsetId);
}}, ReadsetId));
if (allowInferStranded)
{
ret.add(ToolParameterDescriptor.create(STRANDED, "Strandedness", "Choose whether to treat these data as stranded, unstranded, or to have the script infer the strandedness", "ldk-simplecombo", new JSONObject()
{{
put("storeValues", STRAND1 + ";" + STRAND2 + ";" + UNSTRANDED + ";" + INFER);
put("value", INFER);
}}, INFER));
ret.add(ToolParameterDescriptor.create("strandedThreshold", "Strandedness Threshold", "If infer is selected, Choose whether to treat these data as stranded, unstranded, or to have the script infer the strandedness", "ldk-simplecombo", new JSONObject(){{
put("minValue", 0);
put("maxValue", 1);
put("decimalPrecision", 2);
}}, 0.9));
}
return ret;
}
@Override
public boolean canProcess(SequenceOutputFile o)
{
return o.getFile() != null && o.getLibrary_id() != null && _fileType.isType(o.getFile());
}
@Override
public boolean doRunRemote()
{
return true;
}
@Override
public boolean doRunLocal()
{
return false;
}
@Override
public SequenceOutputProcessor getProcessor()
{
return new CombineStarGeneCountsHandler.Processor();
}
@Override
public boolean doSplitJobs()
{
return false;
}
public class Processor implements SequenceOutputProcessor
{
@Override
public void init(JobContext ctx, List<SequenceOutputFile> inputFiles, List<RecordedAction> actions, List<SequenceOutputFile> outputsToCreate) throws UnsupportedOperationException, PipelineJobException
{
if (!ctx.getParams().has("name"))
{
throw new PipelineJobException("Must provide the name of the output");
}
Integer libraryId = null;
Set<String> distinctHeaderValues = new CaseInsensitiveHashSet();
for (SequenceOutputFile o : inputFiles)
{
if (o.getLibrary_id() != null)
{
if (libraryId == null)
{
libraryId = o.getLibrary_id();
}
if (!libraryId.equals(o.getLibrary_id()))
{
throw new PipelineJobException("All samples must use the same reference genome");
}
ctx.getSequenceSupport().cacheGenome(SequenceAnalysisService.get().getReferenceGenome(o.getLibrary_id(), ctx.getJob().getUser()));
}
else
{
throw new PipelineJobException("No library Id provided for file: " + o.getRowid());
}
String idInHeader = ctx.getParams().optString("idInHeader", OutputFileId);
String val = getHeaderValue(idInHeader, ctx.getSequenceSupport(), o);
if (distinctHeaderValues.contains(val))
{
throw new PipelineJobException("Duplicate values found for gene table headers. Value was: " + val + " using the field: " + idInHeader);
}
distinctHeaderValues.add(val);
}
}
@Override
public void processFilesOnWebserver(PipelineJob job, SequenceAnalysisJobSupport support, List<SequenceOutputFile> inputFiles, JSONObject params, File outputDir, List<RecordedAction> actions, List<SequenceOutputFile> outputsToCreate) throws UnsupportedOperationException, PipelineJobException
{
}
@Override
public void processFilesRemote(List<SequenceOutputFile> inputFiles, JobContext ctx) throws UnsupportedOperationException, PipelineJobException
{
final String idInHeader = ctx.getParams().optString("idInHeader", OutputFileId);
prepareFiles(ctx, inputFiles, getName(), new HeaderProvider()
{
@Override
public String getHeader(JobContext ctx, SequenceOutputFile so)
{
return getHeaderValue(idInHeader, ctx.getSequenceSupport(), so);
}
}, "Gene Count Table: " + _toolName);
File outFile = new File(ctx.getOutputDir(), ctx.getParams().getString("name") + ".sampleInfo.txt");
try (CSVWriter writer = new CSVWriter(PrintWriters.getPrintWriter(outFile), '\t', CSVWriter.NO_QUOTE_CHARACTER))
{
writer.writeNext(new String[]{"RowId", "Name", "ReadsetId", "ReadsetName", "AnalysisId", "SubjectId"});
Map<Integer, SequenceOutputFile> outputFileMap = new TreeMap<>();
for (SequenceOutputFile so : inputFiles)
{
outputFileMap.put(so.getRowid(), so);
}
for (Integer rowId : outputFileMap.keySet())
{
SequenceOutputFile so = outputFileMap.get(rowId);
Readset rs = so.getReadset() != null ? ctx.getSequenceSupport().getCachedReadset(so.getReadset()) : null;
writer.writeNext(new String[]{so.getRowid().toString(), so.getName(), appendIfNotNull(so.getReadset()), (rs == null ? "" : rs.getName()), appendIfNotNull(so.getAnalysis_id()), (rs == null ? "" : appendIfNotNull(rs.getSubjectId()))});
}
}
catch (IOException e)
{
throw new PipelineJobException(e);
}
}
}
private String appendIfNotNull(Object input)
{
return input == null ? "" : String.valueOf(input);
}
public void prepareFiles(JobContext ctx, List<SequenceOutputFile> inputFiles, String actionName, HeaderProvider hp, String outputCategory) throws PipelineJobException
{
PipelineJob job = ctx.getJob();
JSONObject params = ctx.getParams();
RecordedAction action = new RecordedAction(actionName);
action.setStartTime(new Date());
String name = params.getString("name");
Boolean doSkipGenesWithoutData = params.optBoolean("skipGenesWithoutData", false);
ctx.getLogger().debug("skip genes without data: " + doSkipGenesWithoutData);
int gtf = params.optInt("gtf");
if (gtf == 0)
{
throw new PipelineJobException("No GTF file provided");
}
File gtfFile = ctx.getSequenceSupport().getCachedData(gtf);
if (gtfFile == null || !gtfFile.exists())
{
throw new PipelineJobException("Unable to find GTF/GFF file: " + gtfFile);
}
Set<Integer> genomeIds = new HashSet<>();
inputFiles.forEach(x -> genomeIds.add(x.getLibrary_id()));
if (genomeIds.size() > 1)
{
throw new PipelineJobException("All inputs must be from the same genome!");
}
action.addInput(gtfFile, "GTF/GFF file");
job.getLogger().info("using GTF/GFF file: " + gtfFile.getPath());
//first build a map of all geneIDs and other attributes
job.getLogger().info("reading GTF/GFF file");
GeneToNameTranslator translator = new GeneToNameTranslator(gtfFile, job.getLogger());
CountResults results = new CountResults(inputFiles.size());
processOutputFiles(results, inputFiles, params, translator, job, action);
job.getLogger().info("writing output. total genes: " + results.distinctGenes.size());
double sumNonZero = 0.0;
for (SequenceOutputFile so : inputFiles)
{
sumNonZero += results.nonZeroCounts.get(so.getRowid());
}
double avgNonZero = sumNonZero / (double)inputFiles.size();
job.getLogger().info("total non-zero genes per sample (and ratio relative to avg)");
job.getLogger().info("average: " + avgNonZero);
for (SequenceOutputFile so : inputFiles)
{
long totalNonZero = results.nonZeroCounts.get(so.getRowid());
double ratio = (double)totalNonZero / avgNonZero;
job.getLogger().info(so.getRowid() + "/" + so.getName() + ": " + totalNonZero + " (" + ratio + " of avg)");
if (ratio > 2 || ratio < 0.5)
{
job.getLogger().warn("total non zero was more than 2-fold different than the average");
}
//TODO: consider a warn threshold based on total features?
}
Map<Integer, SequenceOutputFile> outputFileMap = new TreeMap<>();
for (SequenceOutputFile so : inputFiles)
{
outputFileMap.put(so.getRowid(), so);
}
File outputFile = new File(ctx.getOutputDir(), name + ".txt");
try (CSVWriter writer = new CSVWriter(PrintWriters.getPrintWriter(outputFile), '\t', CSVWriter.NO_QUOTE_CHARACTER))
{
//header
List<String> header = new ArrayList<>();
header.add("GeneId");
header.add("GeneName");
header.add("GeneDescription");
header.add("SamplesWithReads");
for (Integer rowId : outputFileMap.keySet())
{
header.add(hp.getHeader(ctx, outputFileMap.get(rowId)));
}
writer.writeNext(header.toArray(new String[0]));
Set<String> genesWithoutData = new TreeSet<>();
for (String geneId : results.distinctGenes)
{
List<String> row = new ArrayList<>(inputFiles.size() + 3);
if (translator.getGeneMap().containsKey(geneId))
{
row.add(geneId);
row.add(translator.getGeneMap().get(geneId).get("gene_name"));
row.add(translator.getGeneMap().get(geneId).get("gene_description"));
List<String> toAdd = new ArrayList<>();
int totalWithData = 0;
for (Integer rowId : outputFileMap.keySet())
{
Double count = results.counts.get(rowId).get(geneId);
if (count != null && count > 0)
{
totalWithData++;
}
toAdd.add(count == null ? "0" : count.toString());
}
if (totalWithData > 0 || !doSkipGenesWithoutData)
{
row.add(String.valueOf(totalWithData));
row.addAll(toAdd);
writer.writeNext(row.toArray(new String[0]));
}
if (totalWithData == 0 && !OTHER_IDS.contains(geneId))
{
genesWithoutData.add(geneId);
}
}
else
{
job.getLogger().error("gene not found in GTF: [" + geneId + "]");
}
}
if (!genesWithoutData.isEmpty())
{
File skippedGenes = new File(ctx.getOutputDir(), "genesWithoutData.txt");
job.getLogger().info("writing list of the " + genesWithoutData.size() + " genes without data to: " + skippedGenes.getPath());
try (PrintWriter errWriter = PrintWriters.getPrintWriter(skippedGenes))
{
errWriter.write("GeneId\tGeneName\tGeneDescription\n");
for (String geneId : genesWithoutData)
{
errWriter.write(geneId + "\t" + (translator.getGeneMap().get(geneId).get("gene_name") == null ? "" : translator.getGeneMap().get(geneId).get("gene_name")) + "\t" + (translator.getGeneMap().get(geneId).get("gene_description") == null ? "" : translator.getGeneMap().get(geneId).get("gene_description")) + "\n");
}
}
catch (IOException e)
{
throw new PipelineJobException(e);
}
ctx.getFileManager().addOutput(action, "Genes Without Data", skippedGenes);
}
}
catch (IOException e)
{
throw new PipelineJobException(e);
}
action.addOutput(outputFile, outputCategory, false);
SequenceOutputFile so = new SequenceOutputFile();
so.setCategory(outputCategory);
so.setFile(outputFile);
so.setDescription("Total datasets: " + inputFiles.size());
so.setName(params.getString("name"));
so.setLibrary_id(genomeIds.iterator().next());
ctx.addSequenceOutput(so);
action.setEndTime(new Date());
ctx.addActions(action);
}
public interface HeaderProvider
{
String getHeader(JobContext ctx, SequenceOutputFile so);
}
private static String getHeaderValue(String idInHeader, SequenceAnalysisJobSupport support, SequenceOutputFile so)
{
if (idInHeader.equals(ReadsetName))
{
Readset rs = support.getCachedReadset(so.getReadset());
if (rs == null)
{
throw new IllegalArgumentException("Readset not found for: " + so.getRowid());
}
return rs.getName();
}
else if (idInHeader.equals(ReadsetId))
{
Readset rs = support.getCachedReadset(so.getReadset());
if (rs == null)
{
throw new IllegalArgumentException("Readset not found for: " + so.getRowid());
}
return rs.getReadsetId().toString();
}
else
{
return so.getRowid().toString();
}
}
protected static class CountResults
{
Map<Integer, Map<String, Double>> counts;
Set<String> distinctGenes = new HashSet<>(5000);
Map<Integer, Long> nonZeroCounts;
public CountResults(int inputFilesSize)
{
nonZeroCounts = new HashMap<>(inputFilesSize);
}
}
abstract protected void processOutputFiles(CountResults results, List<SequenceOutputFile> inputFiles, JSONObject params, GeneToNameTranslator translator, PipelineJob job, RecordedAction action) throws PipelineJobException;
}