1+ from models .neural_analytics import NeuralAnalyticsModel
2+
3+ import time
4+ import torch
5+ import torch .nn as nn
6+ import torch .optim as optim
7+
8+ def r2_score_torch (y_true , y_pred ):
9+ """
10+ Calculates R^2 using PyTorch.
11+
12+ :param y_true: Tensor of true values.
13+ :param y_pred: Tensor of predicted values.
14+ :return: R^2 value.
15+ """
16+ # Calculate the mean of the true values
17+ y_true_mean = torch .mean (y_true )
18+
19+ # Calculate the residual sum of squares and the total sum of squares
20+ ss_res = torch .sum ((y_true - y_pred ) ** 2 ) # Residual sum of squares
21+ ss_tot = torch .sum ((y_true - y_true_mean ) ** 2 ) # Total sum of squares
22+
23+ # Calculate R^2
24+ r2 = 1 - (ss_res / ss_tot )
25+ return r2 .item () # Return as a scalar value
26+
27+ def train_model (train_loader , device , writer , epochs = 50 , learning_rate = 0.001 ):
28+ """
29+ Trains the next value prediction model in the electrical grid using sliding windows.
30+
31+ :param train_loader: DataLoader for the training set.
32+ :param device: Device (CPU or GPU) to train the model.
33+ :param writer: TensorBoard writer to log the loss.
34+ :param epochs: Number of epochs for training.
35+ :param learning_rate: Learning rate for the optimizer.
36+ :return: The trained model.
37+ """
38+ # Create a model
39+ model = NeuralAnalyticsModel ()
40+ model .to (device ) # Move the model to the device
41+
42+ # Define the loss function and optimizer
43+ criterion = nn .MSELoss () # Use MSELoss for regression
44+ optimizer = optim .Adam (model .parameters (), lr = learning_rate )
45+
46+ start_time = time .time () # Measure training time
47+
48+ # Training
49+ for epoch in range (epochs ):
50+ epoch_loss = 0.0 # To store the accumulated loss in each epoch
51+ all_outputs = []
52+ all_targets = []
53+
54+ for i , element in enumerate (train_loader ):
55+ # Unpack the data
56+ x = element ['window_stack' ].to (device )
57+ y = element ['next_value' ].to (device )
58+
59+ # Forward pass
60+ outputs = model (x )
61+
62+ # Ensure outputs and y have the same shape
63+ outputs = torch .squeeze (outputs )
64+
65+ # Calculate the loss
66+ loss = criterion (outputs , y )
67+
68+ # Backward pass and optimization
69+ optimizer .zero_grad ()
70+ loss .backward ()
71+ optimizer .step ()
72+
73+ epoch_loss += loss .item ()
74+
75+ # Save predicted and true values to calculate R²
76+ all_outputs .append (outputs .detach ())
77+ all_targets .append (y .detach ())
78+
79+ # Concatenate the lists to get tensors
80+ all_outputs = torch .cat (all_outputs )
81+ all_targets = torch .cat (all_targets )
82+
83+ # Average loss per epoch
84+ avg_loss = epoch_loss / len (train_loader )
85+
86+ # Calculate R² using the defined function
87+ r2 = r2_score_torch (all_targets , all_outputs )
88+
89+ # Export loss and R² to TensorBoard
90+ writer .add_scalar ('Loss/train' , avg_loss , epoch )
91+ writer .add_scalar ('R2/train' , r2 , epoch )
92+
93+ # Log each epoch with loss and R²
94+ print (f'[#] Epoch [{ epoch + 1 } /{ epochs } ] -> Loss: { avg_loss :.4f} ; R²: { r2 :.4f} ' )
95+
96+ total_time = time .time () - start_time
97+ print (f'[*] Training completed in { total_time :.2f} seconds.' )
98+
99+ # Close the SummaryWriter
100+ writer .close ()
101+
102+ return model
0 commit comments