|
8 | 8 | from finrock.data_feeder import PdDataFeeder |
9 | 9 | from finrock.trading_env import TradingEnv |
10 | 10 | from finrock.render import PygameRender |
11 | | -from finrock.scalers import MinMaxScaler |
12 | | -from finrock.reward import simpleReward |
13 | | -from finrock.metrics import DifferentActions, AccountValue, MaxDrawdown, SharpeRatio |
14 | | -from finrock.indicators import BolingerBands, RSI, PSAR, SMA |
15 | 11 |
|
16 | 12 |
|
17 | 13 | df = pd.read_csv('Datasets/random_sinusoid.csv') |
18 | 14 | df = df[-1000:] |
19 | 15 |
|
20 | | -pd_data_feeder = PdDataFeeder( |
21 | | - df, |
22 | | - indicators = [ |
23 | | - BolingerBands(data=df, period=20, std=2), |
24 | | - RSI(data=df, period=14), |
25 | | - PSAR(data=df), |
26 | | - SMA(data=df, period=7), |
27 | | - SMA(data=df, period=25), |
28 | | - SMA(data=df, period=99), |
29 | | - ] |
30 | | - ) |
| 16 | +model_path = "runs/1704746665" |
31 | 17 |
|
32 | | -env = TradingEnv( |
33 | | - data_feeder = pd_data_feeder, |
34 | | - output_transformer = MinMaxScaler(min=pd_data_feeder.min, max=pd_data_feeder.max), |
35 | | - initial_balance = 1000.0, |
36 | | - max_episode_steps = 1000, |
37 | | - window_size = 50, |
38 | | - reward_function = simpleReward, |
39 | | - metrics = [ |
40 | | - DifferentActions(), |
41 | | - AccountValue(), |
42 | | - MaxDrawdown(), |
43 | | - SharpeRatio(), |
44 | | - ] |
45 | | -) |
| 18 | +pd_data_feeder = PdDataFeeder.load_config(df, model_path) |
| 19 | +env = TradingEnv.load_config(pd_data_feeder, model_path) |
46 | 20 |
|
47 | 21 | action_space = env.action_space |
48 | 22 | input_shape = env.observation_space.shape |
49 | 23 | pygameRender = PygameRender(frame_rate=120) |
50 | 24 |
|
51 | | -agent = tf.keras.models.load_model('runs/1702982487/ppo_sinusoid_actor.h5') |
| 25 | +agent = tf.keras.models.load_model(f'{model_path}/ppo_sinusoid_actor.h5') |
52 | 26 |
|
53 | 27 | state, info = env.reset() |
54 | 28 | pygameRender.render(info) |
55 | 29 | rewards = 0.0 |
56 | 30 | while True: |
57 | 31 | # simulate model prediction, now use random action |
58 | | - # action = np.random.randint(0, action_space) |
59 | 32 | prob = agent.predict(np.expand_dims(state, axis=0), verbose=False)[0] |
60 | 33 | action = np.argmax(prob) |
61 | 34 |
|
|
0 commit comments