-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path01best_mlp_hbv.py
More file actions
324 lines (265 loc) · 13.9 KB
/
01best_mlp_hbv.py
File metadata and controls
324 lines (265 loc) · 13.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
'''
Use best (from hyperparameter tuning) MLP-HBV model and make predictions on all CAMELS basins.
Run both 1hbv unit and 16 hbv units models.
Author: Sandeep Poudel (1/12/2026)
'''
import pandas as pd
import numpy as np
import os
import time
import torch
from torch.utils.data import DataLoader, Dataset
from torch.optim.lr_scheduler import ReduceLROnPlateau
from sklearn.preprocessing import StandardScaler
from models.multi_hbv import MLPParameterNet, DifferentiableMHBV, constrain_multi_parameters # custom imports
#-------------------------------#--------------------------------#-------------------------------#--------------------------------#-------------------------------#
# Configuration
static_feats_names = [
"elev_mean", "slope_mean", "area_gages2", "p_mean", "pet_mean", "aridity",
"p_seasonality", "frac_snow", "high_prec_freq", "high_prec_dur",
"low_prec_freq", "low_prec_dur", "frac_forest", "lai_max", "lai_diff",
"gvf_max", "gvf_diff", "dom_land_cover_frac", "soil_depth_pelletier",
"soil_depth_statsgo", "soil_porosity", "soil_conductivity", "max_water_content",
"sand_frac", "silt_frac", "clay_frac", "glim_1st_class_frac", "glim_2nd_class_frac",
"carbonate_rocks_frac", "geol_permeability",
]
# ‼️‼️Or only use latitude/longitude as static features for alternative input experiment
# static_feats_names = ["lat", "lon"]
num_hbv_units = 1 # predict 16 sets of HBV parameters per basin
hidden_dim = 2048 # 2048 MLP hidden dimension
print(f"Using hidden dim: {hidden_dim} with hbv unit: {num_hbv_units}")
data_dir = "data" # ‼️‼️Or use the correct data directory for mixed input experiment
output_dir = f"output/best_mlp_{num_hbv_units}hbv"
os.makedirs(output_dir, exist_ok=True)
scaler_path = f"{data_dir}/scaler_camels_mlp_hbv.pt"
model_path = f"output/tune_mlp_hbv/best_mlp_model_{num_hbv_units}hbv_{hidden_dim}hiddensize.pt"
input_dim = len(static_feats_names) # 14 Number of static features
output_dim = 20 # Number of HBV parameters
batch_size = 128 # batch size
epochs = 1000 # 1000 Maximum number of training epochs
lr = 1e-4 # Learning rate
dropout = 0.4 # Dropout rate for MLP
spinup_days = 365*2 # Spin-up days for HBV model
sequence_length = spinup_days + 365 # Length of the input sequence for HBV model
stride_length = 60 # sliding window of stride length when creating sequences
num_ensemble = 1 # number of MC dropout samples during inference, set to 1 for no MC dropout
early_stopping_patience = 10 # Patience for early stopping
lr_patience = 5 # Patience for learning rate reduction
test_batch_size = 128 #Number of basins to run in parallel during inference
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Get file list from data directory
basin_list = pd.read_csv("camels531.csv") # list of all CAMELS basins
#randomly select 20% of basins as test set
test_basin = basin_list.sample(frac=0.2, random_state=42).reset_index(drop=True)
#save this as a csv file
test_basin.to_csv(os.path.join(output_dir, "test_basins.csv"), index=False)
train_basin = basin_list[~basin_list['name'].isin(test_basin['name'])].reset_index(drop=True)
gauge_id = train_basin["name"].values
# add a leading zero if gauge_id is numeric and has length 7
gauge_id = [str(gid).zfill(8) if str(gid).isdigit() and len(str(gid))==7 else str(gid) for gid in gauge_id]
file_list = [os.path.join(data_dir, f"input_{gauge_id}.csv") for gauge_id in gauge_id]
#-------------------------------#--------------------------------#-------------------------------#--------------------------------#-------------------------------#
# Dataset and DataLoader
class HBVDataset(Dataset):
"""
Dataset for HBV model.
Loads multiple basin csv files and selects specified years.
Each item returns (static_features, precip, temp, daylen, qobs) of 2-year sequences.
Static features can be scaled with StandardScaler.
"""
def __init__(self, file_list, years, scaler=None, fit_scaler=False):
self.data = []
self.scaler = scaler
static_feats_all = []
for f in file_list:
df = pd.read_csv(f)
# Filter rows only in the desired years
df['date'] = pd.to_datetime(df['date'])
df = df[df['date'].dt.year.isin(years)].reset_index(drop=True)
static_feats = df[static_feats_names].iloc[0].values.astype("float32")
precip = df["precip"].values.astype("float32")
temp = ((df["tmax"] + df["tmin"]) / 2).values.astype("float32")
qobs = df["qobs"].values.astype("float32")
daylen = (df["daylenhr"]).values.astype("float32")
total_days = len(df)
for start in range(0, total_days - sequence_length + 1, stride_length): # step by stride_length
end = start + sequence_length
self.data.append({
"static": static_feats,
"precip": precip[start:end],
"temp": temp[start:end],
"qobs": qobs[start:end],
"daylen": daylen[start:end],
})
static_feats_all.append(static_feats)
if fit_scaler and scaler is None:
self.scaler = StandardScaler()
self.scaler.fit(np.stack(static_feats_all))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
d = self.data[idx]
static = d["static"]
if self.scaler is not None:
static = self.scaler.transform(static.reshape(1, -1)).squeeze(0).astype(np.float32)
return (
torch.tensor(static, dtype=torch.float32),
torch.tensor(d["precip"], dtype=torch.float32),
torch.tensor(d["temp"], dtype=torch.float32),
torch.tensor(d["daylen"], dtype=torch.float32),
torch.tensor(d["qobs"], dtype=torch.float32),
)
#-------------------------------#--------------------------------#-------------------------------#--------------------------------#-------------------------------#
# Training and Validation
start_time = time.time()
# MSE loss function that handles NaNs in target
def masked_mse_loss(pred, target):
mask = ~torch.isnan(target)
if mask.sum() == 0:
return torch.tensor(0.0, device=target.device, requires_grad=True)
return ((pred[mask] - target[mask])**2).mean()
# Datasets and Loaders
#use total of 24 years of data: 1995 to 2018; 16 years for training, 8 years for validation
train_ds = HBVDataset(file_list, years=list(range(1990, 2006)), fit_scaler=True) # ‼️ Change training years as needed
scaler = train_ds.scaler # Get the scaler from the training dataset
torch.save(scaler, scaler_path) # save the scaler to a file
valid_ds = HBVDataset(file_list, years=list(range(2006, 2015)), scaler=scaler) # ‼️ Change validation years as needed
train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True)
valid_loader = DataLoader(valid_ds, batch_size=batch_size, shuffle=False)
# Models: Load MLP and HBV
mlp = MLPParameterNet(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim*num_hbv_units, dropout=dropout).to(device)
hbv = DifferentiableMHBV(num_hbv_units=num_hbv_units).to(device)
# Optimizer & Loss
optimizer = torch.optim.Adam(mlp.parameters(), lr=lr)
loss_fn = masked_mse_loss
# Learning rate scheduler
scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=lr_patience, min_lr=1e-6)
best_val_loss = float("inf")
patience = early_stopping_patience # Early stopping patience
epochs_no_improvement = 0
# torch.autograd.set_detect_anomaly(True) # Enable anomaly detection during debugging NaNs
for epoch in range(1, epochs + 1):
mlp.train()
total_loss = 0.0
for static, precip, temp, daylen, qobs in train_loader:
static = static.to(device)
precip = precip.to(device)
temp = temp.to(device)
daylen = daylen.to(device)
qobs = qobs.to(device)
# --- MLP predicts HBV parameters ---
pars = mlp(static)
pars = constrain_multi_parameters(pars, num_hbv_units) # constrain parameters to physical ranges
# --- Spinup ---
hbv_states = hbv.run_spinup(pars, precip[:, :spinup_days],
temp[:, :spinup_days], daylen[:, :spinup_days])
# --- Main period with gradients ---
hbv.set_state(hbv_states)
qsim = hbv(pars, precip[:, spinup_days:], temp[:, spinup_days:], daylen[:, spinup_days:])
# --- Loss ---
loss = loss_fn(qsim, qobs[:, spinup_days:])
if any(torch.isnan(v).any() for v in [pars, qsim, loss]):
print("NaNs detected in parameters, states, or loss — skipping batch.")
continue
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(mlp.parameters(), max_norm=1.0)
optimizer.step()
# --- Reset states for next batch ---
hbv.reset_state()
total_loss += loss.detach().cpu().item()
avg_train_loss = total_loss / len(train_loader)
# Validation
mlp.eval()
val_loss = 0.0
with torch.no_grad():
for static, precip, temp, daylen, qobs in valid_loader:
static = static.to(device)
precip = precip.to(device)
temp = temp.to(device)
daylen = daylen.to(device)
qobs = qobs.to(device)
pars = mlp(static)
pars = constrain_multi_parameters(pars, num_hbv_units)
hbv_states = hbv.run_spinup(pars, precip[:, :spinup_days], temp[:, :spinup_days], daylen[:, :spinup_days])
hbv.set_state(hbv_states)
qsim = hbv(pars, precip[:, spinup_days:], temp[:, spinup_days:], daylen[:, spinup_days:])
loss = loss_fn(qsim, qobs[:, spinup_days:])
if torch.isnan(loss):
continue
hbv.reset_state()
val_loss += loss.detach().cpu().item()
avg_val_loss = val_loss / len(valid_loader)
print(f"Epoch {epoch:02d} | Train Loss: {avg_train_loss:.4f} | Val Loss: {avg_val_loss:.4f} | lr: {optimizer.param_groups[0]['lr']:.6f}")
# Step the scheduler
scheduler.step(avg_val_loss)
# Early stopping
if avg_val_loss < best_val_loss:
best_val_loss = avg_val_loss
torch.save(mlp.state_dict(), model_path)
epochs_no_improve = 0
else:
epochs_no_improve += 1
if epochs_no_improve >= patience:
print(f'Early stopping triggered at epoch {epoch}. No improvement for {patience} epochs.')
break
print(f"Training complete in {(time.time() - start_time)/60:.2f} minutes")
print(f"Best validation loss with hidden size of {hidden_dim} with hbv unit of {num_hbv_units} is {best_val_loss:.4f} at epoch {epoch - epochs_no_improve}")
#-------------------------------#--------------------------------#-------------------------------#--------------------------------#-------------------------------#
# Inference with MC Dropout
start_time = time.time()
mlp = MLPParameterNet(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim*num_hbv_units, dropout=dropout)
mlp.load_state_dict(torch.load(model_path, map_location=device))
mlp.to(device)
mlp.eval() # ‼️‼️run mlp in train model if using MC dropout, else eval mode is fine
scaler = torch.load(scaler_path, weights_only=False)
hbv = DifferentiableMHBV(num_hbv_units=num_hbv_units).to(device)
hbv.eval() # HBV is deterministic, eval mode is fine
basin_list = pd.read_csv("camels531.csv")
gauge_id = basin_list["name"].values
# add a leading zero if gauge_id is numeric and has length 7
gauge_id = [str(gid).zfill(8) if str(gid).isdigit() and len(str(gid))==7 else str(gid) for gid in gauge_id]
file_list = [os.path.join(data_dir, f"input_{gauge_id}.csv") for gauge_id in gauge_id]
for i in range(0, len(file_list), test_batch_size):
hbv.reset_state() # Reset HBV states before each batch
batch_files = file_list[i:i + test_batch_size]
dfs, static_list, precip_list, temp_list, daylen_list, qobs_list, basin_ids = [], [], [], [], [], [], []
for fpath in batch_files:
basin_id = os.path.basename(fpath).replace(".csv", "")
df = pd.read_csv(fpath)
dfs.append(df)
basin_ids.append(basin_id)
# Static input
static = df[static_feats_names].iloc[0].values.astype("float32")
static = scaler.transform(static.reshape(1, -1))
static_list.append(static)
# Dynamic inputs
precip_list.append(df["precip"].values.astype("float32"))
temp_list.append(((df["tmax"] + df["tmin"]) / 2).values.astype("float32"))
daylen_list.append(df["daylenhr"].values.astype("float32"))
qobs_list.append(df["qobs"].values)
# Convert to tensors (assuming all basins have same T)
static_tensor = torch.tensor(np.vstack(static_list), dtype=torch.float32).to(device) # [B, F]
precip = torch.tensor(np.stack(precip_list), dtype=torch.float32).to(device) # [B, T]
temp = torch.tensor(np.stack(temp_list), dtype=torch.float32).to(device) # [B, T]
daylen = torch.tensor(np.stack(daylen_list), dtype=torch.float32).to(device) # [B, T]
with torch.no_grad():
ensemble_qsim = [] # to store ensemble predictions if using MC dropout
for _ in range(num_ensemble):
pars = mlp(static_tensor) # [B, P]
pars = constrain_multi_parameters(pars, num_hbv_units)
qsim = hbv(pars, precip, temp, daylen) # [B, T]
ensemble_qsim.append(qsim.cpu().numpy())
ensemble_qsim = np.stack(ensemble_qsim, axis=2) # [B, T, num_ensemble]
# Save per basin predictions as csv
for b in range(len(batch_files)):
df = dfs[b]
qobs = qobs_list[b]
out_df = pd.DataFrame({
"date": df["date"],
"qobs": np.round(qobs, 3)
})
for j in range(num_ensemble):
out_df[f"qsim_{j+1}"] = np.round(ensemble_qsim[b, :, j], 3)
out_df.to_csv(os.path.join(output_dir, f"pred_{basin_ids[b]}.csv"), index=False)
print(f"Inference complete in {(time.time() - start_time)/60:.2f} minutes")