-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathAnalyzeThresholdRamp.m
More file actions
83 lines (75 loc) · 3.66 KB
/
AnalyzeThresholdRamp.m
File metadata and controls
83 lines (75 loc) · 3.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
function analysis = AnalyzeThresholdRamp( filename, varargin )
% use an inputParser object to parse options passed in from above
optionParser = inputParser();
optionParser.StructExpand = true;
optionParser.KeepUnmatched = true;
% length of time after spike to keep for eventStack (in ms)
optionParser.addParameter( 'postSpikePeriod', 20 );
% length of time before spike to keep for eventStack (in ms)
optionParser.addParameter( 'preSpikePeriod', 1 );
% which amplifier channel to draw from
optionParser.addParameter( 'ampChannel', 0 );
% flag to produce a summary table row for a larger analysis
optionParser.addParameter( 'produceSummaryTable', true );
% pass to a function to do a little extra parsing
options = parseOptions( optionParser, varargin{:} );
% parse cellId from filename
[~, justTheFileName, ~] = fileparts( filename );
cellId = strsplit( justTheFileName, '_' );
cellId = cellId{1};
cellId = strrep( cellId, ' ', '' );
% initialize analysis struct object
analysis = struct( 'filename', filename, 'cellId', cellId, ...
'datetime', datetime( 'now' ) );
% pull data from abf file using abfload
% https://github.com/fcollman/abfload
% data is a [samples X ampChannels X episodes] matrix
% sampleInterval is in microseconds
% header is a structure of descriptive information
[abfData, sampleInterval, header] = abfload( filename );
abfStim = BuildStimFromHeader( header, sampleInterval, options.ampChannel );
abfData = squeeze( abfData(:, options.ampChannel+1, :) ); % strip away other dimensions
analysis.rawTraces = abfData;
sampleInterval = sampleInterval / 1000; % convert to milliseconds
samplesPerMs = 1 / sampleInterval;
numberOfEpisodes = header.lActualEpisodes;
analysis.header = header;
analysis.stimWaveform = abfStim;
analysis.samplesPerMs = samplesPerMs;
% iterate through episodes of data collecting spike information
for episode = 1:numberOfEpisodes
% trace holds a single sweep of ephys data from the protocol
trace = abfData(:, episode);
% eventDetect is the spike detector. Tends to be fairly liberal.
[spikeStartIndices, options] = DetectSpikes( trace, samplesPerMs, options );
[spikeStruct(episode), options] = ComputeSpikeShapeParameters( trace, ...
samplesPerMs, spikeStartIndices, options );
end
analysis.spikes = spikeStruct;
% process the protocol information to separate spikes into stimulated and
% non-stimulated, build FI data, ISIs
[analysis.dep, options] = ProcessThresholdRamps( analysis, abfStim, samplesPerMs, options );
% Create and store summary row as table in analysis object. Desirable if
% you're analyzing a large number of hypdeps simultaneously and you want
% to produce a summary table by concatenating a large number of analysis
% objects
analysis.summary = ProduceRampSummary( analysis );
analysis.options = options;
end
%%% %%% %%% %%% %%% %%% %%% %%% %%% %%% %%% %%% %%% %%% %%% %%% %%% %%% %%%
% sequester the code for parsing input into this function down here
function options = parseOptions( parser, varargin )
% doesn't need to be stored, obj is persistent
parser.parse( varargin{:} );
% collect results of parsing into options object. This excludes any
% unmatched fields
options = parser.Results;
% collect any unmatched fields and assign them to normal fields. These
% may be useless, but this is the conservative, forward-thinking thing to
% do in case functions at lower levels of hierarchy need options passed
% form multiple layers above.
UnmatchedFields = fieldnames( parser.Unmatched );
for i=1:numel( UnmatchedFields )
options.(UnmatchedFields{i}) = parser.Unmatched.(UnmatchedFields{i});
end
end