Skip to content

Commit 7d7ee16

Browse files
committed
feat: Import code model base from other proyect from me
1 parent c808440 commit 7d7ee16

10 files changed

Lines changed: 308 additions & 0 deletions

File tree

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
use std::process::Command;
2+
use std::path::Path;
3+
use std::fs;
4+
5+
fn main() {
6+
// Get the project root directory
7+
let project_root = Path::new(env!("CARGO_MANIFEST_DIR"));
8+
// Path to the Python script
9+
let script_path = project_root.join("src/main.py");
10+
// Path to the build file in ./build
11+
let build_file_path = project_root.join("build/neural_analytics.onnx");
12+
13+
// Check if the Python script exists
14+
if !script_path.exists() {
15+
panic!("[!] The file main.py is not found in the src folder");
16+
}
17+
18+
// Check if the build file already exists
19+
if !build_file_path.exists() {
20+
// Call the Python script only if the build file does not exist
21+
let output = Command::new("python3")
22+
.arg(&script_path)
23+
.output()
24+
.expect("[!] Error executing the Python script");
25+
26+
// Display output or errors from the script in the console
27+
if !output.status.success() {
28+
eprintln!(
29+
"[!] Error in the Python script: {}",
30+
String::from_utf8_lossy(&output.stderr)
31+
);
32+
} else {
33+
println!(
34+
"[*] Script executed successfully: {}",
35+
String::from_utf8_lossy(&output.stdout)
36+
);
37+
}
38+
} else {
39+
println!("[*] The build file already exists, skipping script execution.");
40+
}
41+
42+
// Copy the file neural_analytics.onnx
43+
let mut target_dir = Path::new("target")
44+
.join(std::env::var("PROFILE").unwrap_or_else(|_| "debug".to_string()))
45+
.join("assets");
46+
47+
target_dir = project_root.join("../../").join(target_dir);
48+
49+
// Create the target directory if it does not exist
50+
fs::create_dir_all(&target_dir).expect("[!] OS: Error creating the target directory");
51+
52+
// Copy the file
53+
let target_path = target_dir.join("neural_analytics.onnx");
54+
fs::copy(&build_file_path, &target_path).expect("[!] OS: Error copying the file");
55+
56+
println!("[*] File copied to: {:?}", target_path);
57+
}
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
scikit-learn==1.5.2
2+
numpy==2.1.2
3+
pandas==2.2.3
4+
torch==2.5.0
5+
onnx==1.17.0

packages/neural_analytics_model/src/datasets/neural_analytics.py

Whitespace-only changes.

packages/neural_analytics_model/src/lib.rs

Whitespace-only changes.
Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
from utils.trainer import train_model
2+
from utils.export import export_model
3+
from utils.evaluation import evaluate_model
4+
5+
from datasets.neural_analytics import NeuralAnalyticsDataset
6+
from models.neural_analytics import NeuralAnalyticsModel
7+
from sklearn.model_selection import train_test_split
8+
9+
from torch.utils.data import DataLoader
10+
from torch.utils.tensorboard import SummaryWriter
11+
12+
import os
13+
import torch
14+
15+
BATCH_SIZE = 64
16+
DATA_FILE = os.path.join(os.getcwd(), './assets/AUSTRIA_2015_2021.csv')
17+
18+
def main():
19+
# Notify about the intention of this module
20+
print(f'[*] Training module for the {NeuralAnalyticsModel.__name__} model')
21+
22+
# We try to use the best device as possible
23+
device = torch.device('cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu')
24+
torch.set_default_dtype(torch.float32)
25+
26+
print(f'[*] The device to be used will be "{device}"')
27+
28+
# Prepare dataset
29+
dataset = NeuralAnalyticsDataset(DATA_FILE, device)
30+
train_dataset, val_dataset = train_test_split(dataset, test_size=0.2, random_state=42)
31+
32+
# Load the dataset into PyTorch
33+
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
34+
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False) # No need to shuffle during validation
35+
36+
# Prepare to capture data in TensorBoard
37+
writer = SummaryWriter(log_dir="./runs")
38+
39+
# Train and Test Model
40+
model = train_model(train_loader, device, writer)
41+
evaluate_model(model, val_loader, device, writer)
42+
43+
# Export Model
44+
export_model(
45+
model,
46+
device,
47+
input_size=(1, 19, 3),
48+
output_path='./build/neural_analytics.onnx'
49+
)
50+
51+
# Close the training report
52+
writer.close()
53+
54+
if __name__ == "__main__":
55+
main()

packages/neural_analytics_model/src/models/neural_analytics.py

Whitespace-only changes.

packages/neural_analytics_model/src/preprocessors/neural_analytics.py

Whitespace-only changes.
Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
import torch
2+
from preprocessors.neural_analytics import calculate_r2_from_csv
3+
from sklearn.metrics import r2_score
4+
5+
def evaluate_model(model, val_loader, device, writer, epoch=50):
6+
"""
7+
Evaluates the accuracy of the model on a dataset with sliding windows.
8+
9+
:param model: PyTorch model to evaluate.
10+
:param val_loader: DataLoader containing the data for evaluation.
11+
:param device: Device where the model runs (CPU or GPU).
12+
:param writer: TensorBoard writer to log the accuracy.
13+
:param epoch: Current epoch number, used for logging in TensorBoard.
14+
"""
15+
model.eval() # Set the model to evaluation mode
16+
y_true = []
17+
y_pred = []
18+
19+
with torch.no_grad(): # Do not compute gradients for evaluation
20+
for i, element in enumerate(val_loader):
21+
# Unpack the data
22+
x = element['window_stack']
23+
y = element['next_value']
24+
25+
# Make predictions
26+
outputs = model(x) # Make sure your model accepts timestamps
27+
28+
# Assume the model returns a continuous value
29+
predicted = outputs.squeeze() # Remove unnecessary dimensions
30+
31+
# Store the predictions and true values
32+
y_true.extend(y.cpu().numpy()) # Ensure these are CPU tensors
33+
y_pred.extend(predicted.cpu().numpy())
34+
35+
# Calculate R^2
36+
r2_model = r2_score(y_true, y_pred) # Use the r2_score function from sklearn
37+
print(f'[*] R^2 of the model on the evaluation set: {r2_model:.4f}')
38+
39+
r2_prod = calculate_r2_from_csv('./assets/AUSTRIA_2015_2021.csv')
40+
print(f'[*] R^2 of the production forecast: {r2_prod:.4f}')
41+
42+
# Log R^2 in TensorBoard
43+
writer.add_scalar('R2/eval', r2_model, epoch)
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
import torch
2+
import onnx
3+
import os
4+
5+
def export_model(model, device, input_size, output_path):
6+
"""
7+
Exports a PyTorch model to ONNX format and simplifies the ONNX model.
8+
9+
:param model: PyTorch model to be exported.
10+
:param device: PyTorch device being used for training.
11+
:param input_size: Input size of the model (e.g., (batch_size, channels, height, width)).
12+
:param output_path: Path where the ONNX model will be saved.
13+
"""
14+
# Set the model to evaluation mode
15+
model.eval()
16+
17+
# Create a dummy input tensor
18+
dummy_input = torch.randn(*input_size).to(device)
19+
20+
# Check and create the export directory if it doesn't exist
21+
output_dir = os.path.dirname(output_path)
22+
if not os.path.exists(output_dir):
23+
os.makedirs(output_dir)
24+
print(f'[*] Directory created: {output_dir}')
25+
26+
# Export the model to ONNX format
27+
torch.onnx.export(
28+
model, dummy_input, output_path,
29+
export_params=True,
30+
opset_version=11,
31+
do_constant_folding=True, # Constant optimization
32+
input_names=['input'],
33+
output_names=['output'],
34+
dynamic_axes={
35+
'input': {0: 'batch_size'}, # Dynamic axis for batch size
36+
'output': {0: 'batch_size'}
37+
}
38+
)
39+
40+
# Load the ONNX model for simplification
41+
model_onnx = onnx.load(output_path)
42+
43+
# Save the simplified model
44+
onnx.save(model_onnx, output_path)
45+
46+
print(f'[*] Model exported and simplified to: {output_path}')
Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
from models.neural_analytics import NeuralAnalyticsModel
2+
3+
import time
4+
import torch
5+
import torch.nn as nn
6+
import torch.optim as optim
7+
8+
def r2_score_torch(y_true, y_pred):
9+
"""
10+
Calculates R^2 using PyTorch.
11+
12+
:param y_true: Tensor of true values.
13+
:param y_pred: Tensor of predicted values.
14+
:return: R^2 value.
15+
"""
16+
# Calculate the mean of the true values
17+
y_true_mean = torch.mean(y_true)
18+
19+
# Calculate the residual sum of squares and the total sum of squares
20+
ss_res = torch.sum((y_true - y_pred) ** 2) # Residual sum of squares
21+
ss_tot = torch.sum((y_true - y_true_mean) ** 2) # Total sum of squares
22+
23+
# Calculate R^2
24+
r2 = 1 - (ss_res / ss_tot)
25+
return r2.item() # Return as a scalar value
26+
27+
def train_model(train_loader, device, writer, epochs=50, learning_rate=0.001):
28+
"""
29+
Trains the next value prediction model in the electrical grid using sliding windows.
30+
31+
:param train_loader: DataLoader for the training set.
32+
:param device: Device (CPU or GPU) to train the model.
33+
:param writer: TensorBoard writer to log the loss.
34+
:param epochs: Number of epochs for training.
35+
:param learning_rate: Learning rate for the optimizer.
36+
:return: The trained model.
37+
"""
38+
# Create a model
39+
model = NeuralAnalyticsModel()
40+
model.to(device) # Move the model to the device
41+
42+
# Define the loss function and optimizer
43+
criterion = nn.MSELoss() # Use MSELoss for regression
44+
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
45+
46+
start_time = time.time() # Measure training time
47+
48+
# Training
49+
for epoch in range(epochs):
50+
epoch_loss = 0.0 # To store the accumulated loss in each epoch
51+
all_outputs = []
52+
all_targets = []
53+
54+
for i, element in enumerate(train_loader):
55+
# Unpack the data
56+
x = element['window_stack'].to(device)
57+
y = element['next_value'].to(device)
58+
59+
# Forward pass
60+
outputs = model(x)
61+
62+
# Ensure outputs and y have the same shape
63+
outputs = torch.squeeze(outputs)
64+
65+
# Calculate the loss
66+
loss = criterion(outputs, y)
67+
68+
# Backward pass and optimization
69+
optimizer.zero_grad()
70+
loss.backward()
71+
optimizer.step()
72+
73+
epoch_loss += loss.item()
74+
75+
# Save predicted and true values to calculate R²
76+
all_outputs.append(outputs.detach())
77+
all_targets.append(y.detach())
78+
79+
# Concatenate the lists to get tensors
80+
all_outputs = torch.cat(all_outputs)
81+
all_targets = torch.cat(all_targets)
82+
83+
# Average loss per epoch
84+
avg_loss = epoch_loss / len(train_loader)
85+
86+
# Calculate R² using the defined function
87+
r2 = r2_score_torch(all_targets, all_outputs)
88+
89+
# Export loss and R² to TensorBoard
90+
writer.add_scalar('Loss/train', avg_loss, epoch)
91+
writer.add_scalar('R2/train', r2, epoch)
92+
93+
# Log each epoch with loss and R²
94+
print(f'[#] Epoch [{epoch + 1}/{epochs}] -> Loss: {avg_loss:.4f}; R²: {r2:.4f}')
95+
96+
total_time = time.time() - start_time
97+
print(f'[*] Training completed in {total_time:.2f} seconds.')
98+
99+
# Close the SummaryWriter
100+
writer.close()
101+
102+
return model

0 commit comments

Comments
 (0)