-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathprocessing_utils.py
More file actions
84 lines (68 loc) · 3.23 KB
/
processing_utils.py
File metadata and controls
84 lines (68 loc) · 3.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import pandas as pd
import numpy as np
from scipy.ndimage import gaussian_filter1d
from scipy import signal
def compute_spike_density(row:pd.DataFrame, t_min:float=None, t_max:float=None, bin_size:float=0.005, tau:float=0.2, conv_mode='same')->np.ndarray:
"""
Compute spike density for a single unit.
Parameters:
row: pd.DataFrame - row of the dataframe containing spike times
t_min: float - minimum time for the histogram
t_max: float - maximum time for the histogram
bin_size: float - size of the time bins
tau: float - time constant for the exponential smoothing
"""
spike_times = np.asarray(row['spike_times'])
try:
if t_min is None:
t_min = spike_times.min()
if t_max is None:
t_max = spike_times.max()
bins = np.arange(t_min, t_max + bin_size, bin_size)
except ValueError as e:
print(f"Error in {row['cluster_id']}: {e}")
return np.array([])
counts, _ = np.histogram(spike_times, bins=bins)
rate = counts / bin_size
# smoothed = gaussian_filter1d(rate, sigma=smoothing_sigma/bin_size)
window = signal.windows.exponential(100, 0, tau/bin_size, sym=False) # TODO: CHECK M AND TAU IN RESPECT OF TIME/BIN for bin sizes > 1
smoothed = signal.convolve(rate, window, mode=conv_mode) / sum(window)
return smoothed
def get_spike_densities_to_onset(df,
query,
event_code,
event_times_all,
dt = 0.005,
time_range_before_onset = .25,
time_range_after_onset = .5,
):
"""
Get spike densities aligned to stimulus onset.
Parameters:
---
df: pd.DataFrame - dataframe containing spike densities
query: str - query to filter the dataframe
event_code: int - event code to align to
event_times_all: dict - dictionary of event times with dataset names as keys
dt: float - time bin size for spike density
time_range_before_onset: float - time range before stimulus onset
time_range_after_onset: float - time range after stimulus onset
"""
idxs = df.query(query).index.values
spike_densities_to_onset = []
for idx in idxs:
unit_id = df.loc[idx, 'cluster_id']
dataset = df.loc[idx, 'dataset']
t0 = df.loc[idx, 'spike_times'][0] # [s]
event_times = event_times_all[dataset][0][event_times_all[dataset][1] == event_code]
stim_onsets = np.astype((event_times-t0)/dt, int) # [bins]
# idx_ = np.where(stim_onsets>int(time_range_before_onset/dt))[0]
stim_onsets = stim_onsets[stim_onsets>int(time_range_before_onset/dt)]
if len(stim_onsets) == 0:
continue
spike_densities_to_onset_idx = np.array([df.loc[idx, 'spike_density'][stim_onset-int(time_range_before_onset/dt) : stim_onset+int(time_range_after_onset/dt)] for stim_onset in stim_onsets]).T
if len(spike_densities_to_onset) == 0:
spike_densities_to_onset = spike_densities_to_onset_idx
else:
spike_densities_to_onset = np.concatenate((spike_densities_to_onset, spike_densities_to_onset_idx), axis=1)
return spike_densities_to_onset