Commit 4e7ae323 authored by hazrmard's avatar hazrmard
Browse files

error handling when loading relearn data, edge case remains where (most...

error handling when loading relearn data, edge case remains where (most recent) empty csv file may be loaded when re-learning
parent ddd1b769
__pycache__
.ipynb_checkpoints
*.pyc
.idea
.vscode
results*/
*.ini
results*/
*store.csv
store
control
......@@ -15,8 +17,6 @@ rl_results_local/
rl_perf_plot.py
modeltrainnb/
energyfreqanalysis/
ah_api.py
TestRelearning.ipynb
*.pyc
.idea
*.out
\ No newline at end of file
*.out
relearn.pkl
-1659.8888191133738
......@@ -56,6 +56,7 @@ The repository contains the following files:
* `solar_irradiance.py`: Pulls solar irradiation data using Solcast API.
* `*.slurm`: Files used for offline weekly training
* `weights.best.hdf5`: LSTM model for energy prediction. Scaled data in -> scaled data out
* `.lastreward`: Accumulated reward by the controller using the parameters in `agent_weights_*.h5f` files.
* `RL_relearn_data.pkl`:One year plus worth of historical data going back from 22nd October 2019. Format of the data shown below
| `Dates` | `OAT` (F) | `OAH` (%) | `Ghi` (W/m2)| `SAT` (F) | `TotalE` (kJ) |
......@@ -188,6 +189,17 @@ Will start a demo on the development server where the control loop acts every se
To exit, press `Ctrl+C`.
#### Additional files created
The script creates/overwrites several files while running:
* `agent_weights_actor.h5f` and `agent_weights_actor.h5f`: The parameters used by the reinforcement learning agent to control the system.
* `log.txt`: The log file containing messages printed by the controller.
* `solcast_dhi.csv`: A csv file that caches weather forecasts in bulk so that frequent network calls are not made.
* `store_at`, `save_to`, `is_valid`: Plain text files containing the history of measurements, the control setpoint, and setpoint validity status. The name of these files is specified as an argument to the script.
* `.lastreward`: The corresponding rewards accumulated by the agent using parameters in `agent_weights_*.h5f` files.
* `relearn.pkl`: Intermiediate data format for agent's environment when it is adapting control.
[1]: https://www.wikiwand.com/en/Reinforcement_learning
[2]: https://github.com/keras-rl/keras-rl
......
......@@ -3,7 +3,9 @@ Defines functions that construct various components of a reinforcement learning
agent
"""
from typing import List, Any
from logging import Logger
import numpy as np
from keras import backend as K
from keras.models import Sequential, Model
from keras.callbacks import Callback
......@@ -82,14 +84,26 @@ class SaveBest(Callback):
Store neural network weights during training if the current episode's
performance is better than the previous best performance.
Class Attributes:
* `lastreward` (float): Last best reward
Args:
* `dest`: name of `h5f` file where to store weights.
"""
def __init__(self, dest: str):
def __init__(self, dest: str, log: Logger=None):
super().__init__()
self.log = log
try:
with open('.lastreward', 'r') as f:
self.lastreward = float(f.readline().strip())
except IOError as e:
if not self.log is None:
self.log.warning(('Could not find last best rewards in file '
'".lastreward. Assuming current training '
'yields best reward.'))
self.lastreward = -np.inf # reward of pre-packaged controller
self.dest = dest
self.lastreward = -1659.8888191133738 # -10000 updating with current best reward
self.rewardsTrace = []
......@@ -97,6 +111,8 @@ class SaveBest(Callback):
self.rewardsTrace.append(logs.get('episode_reward'))
if logs.get('episode_reward') > self.lastreward:
self.lastreward = logs.get('episode_reward')
with open('.lastreward', 'w') as f:
f.write(str(self.lastreward))
self.model.save_weights(self.dest, overwrite=True)
......@@ -119,6 +135,7 @@ def train_agent(agent, env, steps=30000, dest='agent_weights.h5f'):
store_weights = SaveBest(dest=dest)
agent.fit(env, nb_steps=steps, visualize=False, verbose=1, callbacks=[store_weights])
# save latest weights
# TODO: Should this be in production?
agent.save_weights('./rl_results_local/latestweights.h5f')
return store_weights
......
import requests
"""
Functions for getting readings from the BDX API and combining them with the
measurements database.
"""
import json
from pandas import *
import os
import glob
import numpy as np
from datetime import datetime, timedelta
import pytz
from glob import glob
from urllib.parse import urlencode
import warnings
from logging import Logger
import numpy as np
import pytz
import requests
from bs4 import BeautifulSoup
from helperfunctions import merge_df_rows, droprows, merge_df_columns
from pandas import *
from helperfunctions import droprows, merge_df_columns, merge_df_rows
def weeklybdxdata(weeks = 4):
"""
......@@ -83,12 +92,27 @@ def weeklybdxdata(weeks = 4):
return localdata
def weeklysolardata(datafolder: str = './energyfreqanalysis/', weeks: int = 4):
list_of_files = glob.glob(datafolder + '*store.*')
flist = []
for _ in range(weeks):
latest_file = max(list_of_files, key=os.path.getctime) # get the latest "weeks" weeks modified output files
list_of_files.remove(latest_file)
flist.append(latest_file)
"""
Extract solar irradiance data from database created by the control loop. This
is then combined with the remaining variables accessed from the BDX API to
complete the input feature set.
Keyword Arguments:
datafolder {str} -- Directory where .csv files are stored. (default: {'./energyfreqanalysis/'})
weeks {int} -- Number of weeks to read (default: {4})
Returns:
pandas.DataFrame -- Solar irradiance data in a dataframe
"""
data_files = glob(os.path.join(datafolder, '*store.csv'))
flist = sorted(data_files, key=os.path.getctime, reverse=True)[:weeks]
if len(flist) == 0:
warnings.warn('No *store.csv files found containing historical measurements.')
return None
elif len(flist) < weeks:
warnings.warn(('Only {} < {} *store.csv files found containing historical '
'measurements.').format(len(flist), weeks))
dflist = []
for filename in flist:
......@@ -109,9 +133,13 @@ def weeklysolardata(datafolder: str = './energyfreqanalysis/', weeks: int = 4):
return df_solar
def weeklyrelearndata(solardatapath: str, weeks: int = 4):
def weeklyrelearndata(solardatapath: str, weeks: int = 4, log: Logger=None):
df1 = weeklybdxdata(weeks = weeks)
df2 = weeklysolardata(datafolder=solardatapath)
if df2 is None:
raise FileNotFoundError('Solar data could not be extracted.')
dfrelearn = merge_df_columns([df1, df2])
dfrelearn = dfrelearn.reindex(columns=['OAT', 'OAH', 'Ghi', 'SAT', 'TotalE'])
......
......@@ -322,7 +322,7 @@ def learn_control(source: str, save_to: str, duration: int, signal_stop: Event,
try:
# get the new traindata
relearndf = weeklyrelearndata(solardatapath=source, weeks=4)
relearndf = weeklyrelearndata(solardatapath=source, weeks=4, log=log)
relearndf.to_pickle('relearn.pkl')
# Train it with the newly initialized environment
......
"""
Preprocessing/transformation/training functions for the energy model used in
the reinforcement learning agent's environment.
"""
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
import numpy as np
......@@ -5,6 +12,8 @@ from sklearn.preprocessing import MinMaxScaler
import scipy.signal as signal
from pandas import DataFrame
def retrain(model, data, epochs=25):
# do the smoothing
......@@ -38,6 +47,8 @@ def retrain(model, data, epochs=25):
model.fit(train_x, train_y, epochs=epochs, batch_size=16,
validation_data=(test_x, test_y), verbose=0, callbacks=[checkpoint])
def inputreshaper(x, time_steps=1, output_sequence=6):
total_array = []
m, n = x.shape
......@@ -59,6 +70,8 @@ def inputreshaper(x, time_steps=1, output_sequence=6):
return x_reshaped
def outputreshaper(y, output_sequence=6, output_features=1, time_steps=1):
N = output_sequence
total_array = []
......@@ -81,6 +94,8 @@ def outputreshaper(y, output_sequence=6, output_features=1, time_steps=1):
return y_reshaped
def butterworthsmoothing(df, column_names: list = None, Wn = 0.015):
"""
Smoothes the dataframe columns
......
......@@ -18,9 +18,9 @@ def update_dhi_data(is_test_env):
# Solcast api key to access the API
if is_test_env:
api_key = 'Mj1OX_LqWspibNMsPSy2T34Ti6WlpCyA' # for test environment
api_key = 'Mj1OX_LqWspibNMsPSy2T34Ti6WlpCyA' # for test environment
else:
api_key = 'oQJtjbpSMt3BgoTvItnVqmqbhXcusUO1' # for production environment
api_key = 'oQJtjbpSMt3BgoTvItnVqmqbhXcusUO1' # for production environment
# Declare url string
url = 'https://api.solcast.com.au/radiation/'+type[1]+'?'+\
......@@ -39,6 +39,9 @@ def get_current_DHI(t, is_test_env):
using solcast API. Returns the DHI value for the requested time stamp.
Returns `None` when no data found.
"""
# TODO: Remove solcast_dhi.csv from repo. Currently that will give an error
# since this function doesn't check if the file doesn't exist and create a new
# one. Correct behavior: check if csv file exists, if not, create an empty one.
# retrieve dhi data locally
df = pd.read_csv('solcast_dhi.csv')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment