Commit 474554ac authored by hazrmard's avatar hazrmard
Browse files

committing changes for repo relocation

parent 133fc0aa
File added
......@@ -3,4 +3,7 @@
*.slxc
*.asv
*.mat
slprj
\ No newline at end of file
slprj
__pycache__
.ipynb_checkpoints
.vscode
\ No newline at end of file
gitdir: C:/Users/ahmedi/Developer/bare_repos/.datadrivenmodels
{
"folders": [
{
"path": "."
},
{
"path": "..\\pyTorchBridge"
},
{
"path": "..\\pyStateSpace"
}
],
"settings": {}
}
\ No newline at end of file
% Training an LSTM network to predict the next number
% from an input sequence.
%
% Input: 1,2,3
% Output: 4
N = 1000;
NTest = 200;
seqLen = 10;
seq = reshape([1:N], seqLen, [])';
testSeq = reshape([N+1:N+NTest], seqLen, [])';
% The input variables are normalized to have 0 mean
% and 1 variance.
mu = mean(seq(:));
sig = std(seq(:));
stdSeq = (seq - mu) / sig;
stdTest = (testSeq - mu) / sig;
% Preparing input data as a cell array of row vectors.
XTrain = mat2cell(stdSeq(1:end-1,:),...
ones(1, N/seqLen - 1));
XTest = mat2cell(stdTest(1:end-1,:),...
ones(1, NTest/seqLen - 1));
% For format of the response, see
% https://www.mathworks.com/help/deeplearning/ref/trainnetwork.html
% The labels are a matrix of the next number for each
% training sequence.
YTrain = seq(2:end,1);
YTest = testSeq(2:end,1);
% Defining an LSTM network. An LSTM layer takes as
% input `numHiddenUnits` which is the number of time
% steps that an LSTM cell "memorizes" i.e. the window
% over which the LSTM layer operates.
layers = [...
sequenceInputLayer(1)
lstmLayer(10,'OutputMode','last')
fullyConnectedLayer(1)
regressionLayer];
options = trainingOptions('adam', ...
'MaxEpochs',150, ...
'MiniBatchSize', 5, ...
'GradientThreshold',1, ...
'InitialLearnRate',1.0, ...
'LearnRateSchedule','piecewise', ...
'LearnRateDropPeriod', 20, ...
'LearnRateDropFactor',0.5, ...
'Verbose',0, ...
'Plots','training-progress');
net = trainNetwork(XTrain, YTrain, layers, options);
YPred = int32(predict(net, XTrain));
\ No newline at end of file
classdef CompositeModel
%COMPOSITEMODEL Combines black- and grey- box models.
% Detailed explanation goes here
properties
blackBox
greyBox
modelType
end
methods
function obj = CompositeModel(blackBox, greyBox, modelType)
%UNTITLED Construct an instance of this class
% Detailed explanation goes here
obj.blackBox = blackBox;
obj.greyBox = greyBox;
obj.modelType = modelType;
end
function outputArg = method1(obj,inputArg)
%METHOD1 Summary of this method goes here
% Detailed explanation goes here
outputArg = obj.Property1 + inputArg;
end
end
end
% This script generates .mat files for various inputs on
% the RC model. The .mat files have a data variable
% datRC with three columns:
%
% time V_in I_capacitor
%
% Other variables like peak, freq etc are stored for
% relevant inputs.
duration = 10;
resolution = 0.001;
N = duration / resolution;
t = 0:resolution:(duration-resolution);
noise = 25; % signal-to-noise ratio in dB
peak = 10; % amplitude of signal
freq = 1; % frequency of sine signal
% ramp input
in = t' * peak / duration;
% sim_in = [t', awgn(in, noise, 'measured')];
sim_in = [t', in];
data = runSim('RC', sim_in);
save("RC_ramp_" + num2str(noise) + ".mat",...
'data', 'peak', 'resolution');
% sine input
in = sin(2*pi*freq*t') * peak;
% sim_in = [t', awgn(in, noise, 'measured')];
sim_in = [t', in];
data = runSim('RC', sim_in);
save("RC_sin_" + num2str(noise) + ".mat",...
'data', 'freq', 'peak', 'resolution');
% combined ramp + sine + noise
in = (t' * peak / duration) + sin(2*pi*freq*t') * peak;
sim_in = [t', awgn(in, noise, 'measured')];
data = runSim('RC', sim_in);
save("RC_comb_" + num2str(noise) +".mat",...
'data', 'freq', 'peak', 'resolution');
clear data freq peak N t duration resolution in sim_in
\ No newline at end of file
function results = runSim(model, sim_in)
% Runs a Simulink model. The `model` takes as input
% a workspace variable `sim_in` which is a matrix where
% the first column is a time stamp and the remaining
% columns are individual inputs. The model outputs to
% a workspace variable `sim_out` which is a timestamp
% object.
% The function parses the `sim_out` and returns `result`
% in the same form as `sim_in`.
stop_t = sim_in(end,1);
steps = diff(sim_in(:, 1));
minStep = min(steps(:));
set_param(model, 'StopTime', num2str(stop_t), 'MaxStep', num2str(minStep));
% set_param(model, 'StopTime', num2str(stop_t));
sim(model);
% results = [sim_out.Time sim_out.Data];
[y, t] = resample(sim_out.Data, sim_out.Time, 1 / minStep);
results = [t, y];
end
% Estimates a LSTM model of a RC circuit.
%
% See:
% - https://www.mathworks.com/help/deeplearning/ug/long-short-term-memory-networks.html
% - https://www.mathworks.com/help/deeplearning/examples/sequence-to-sequence-regression-using-deep-learning.html
% - https://www.mathworks.com/help/deeplearning/examples/time-series-forecasting-using-deep-learning.html
close all;
% Load training data
rcdat = load('RC_comb_250.mat');
t = rcdat.data(:,1); % timestamps
u = rcdat.data(:,2); % input
y = rcdat.data(:,3) * 1000; % output
% Normalizing input signal
mu = mean(u);
sig = std(u);
stdU = (u - mu) / sig;
% Split into sub-sequences
seqLength = 500;
seqU = reshape(stdU, [], seqLength);
seqY = reshape(y, [], seqLength);
XTrain = mat2cell(seqU, ones(1, size(seqU,1)));
YTrain = mat2cell(seqY, ones(1, size(seqY,1)));
% creating LSTM architecture
numFeatures = 1;
numHiddenUnits = 16;
numResponses = 1;
layers = [ ...
sequenceInputLayer(numFeatures)
lstmLayer(numHiddenUnits,'OutputMode','sequence')
lstmLayer(numHiddenUnits,'OutputMode','sequence')
lstmLayer(numHiddenUnits,'OutputMode','sequence')
lstmLayer(numHiddenUnits,'OutputMode','sequence')
fullyConnectedLayer(numResponses)
regressionLayer];
options = trainingOptions('adam', ...
'MaxEpochs',40, ...
'MiniBatchSize', 5, ...
'GradientThreshold',1, ...
'InitialLearnRate',0.1, ...
'LearnRateSchedule','piecewise', ...
'LearnRateDropPeriod',5, ...
'LearnRateDropFactor',0.7, ...
'Verbose',0, ...
'Plots','training-progress');
net = trainNetwork(XTrain, YTrain, layers, options);
% Plot predicted and actual output
YPred = cell2mat(predict(net, {u'}));
plot(t, y, t, YPred);
legend('Actual', 'Prediction');
\ No newline at end of file
% Training an LSTM network to predict the derivative
% of a sequence.
%
% Input: 1,2,5,3
% Output: 1,1,3,-2
N = 1000;
NTest = N;
seqLen = N;
range = 100;
rng(0);
numbers = randi([0, range], 1, N+NTest);
gradients = gradient(numbers);
seq = reshape(numbers(1:N), seqLen, [])';
seqY = reshape(gradients(1:N), seqLen, [])';
testSeq = reshape(numbers(N+1:end), seqLen, [])';
testSeqY = reshape(gradients(N+1:end), seqLen, [])';
% The input variables are normalized to have 0 mean
% and 1 variance.
mu = mean(seq(:));
sig = std(seq(:));
stdSeq = (seq - mu) / sig;
stdTest = (testSeq - mu) / sig;
% Preparing input data as a cell array of row vectors.
XTrain = mat2cell(stdSeq, ones(1, N/seqLen));
XTest = mat2cell(stdTest, ones(1, NTest/seqLen));
% For format of the response, see
% https://www.mathworks.com/help/deeplearning/ref/trainnetwork.html
YTrain = mat2cell(seqY, ones(1, N/seqLen));
YTest = mat2cell(testSeqY, ones(1, NTest/seqLen));
% Defining an LSTM network. An LSTM layer takes as
% input `numHiddenUnits` which is the number of time
% steps that an LSTM cell "memorizes" i.e. the window
% over which the LSTM layer operates.
layers = [...
sequenceInputLayer(1)
lstmLayer(1,'OutputMode','sequence')
lstmLayer(1,'OutputMode','sequence')
fullyConnectedLayer(1)
regressionLayer];
options = trainingOptions('adam', ...
'MaxEpochs',100, ...
'MiniBatchSize', 5, ...
'GradientThreshold',1, ...
'InitialLearnRate',1, ...
'LearnRateSchedule','piecewise', ...
'LearnRateDropPeriod',50, ...
'LearnRateDropFactor',0.75, ...
'Verbose',0, ...
'Plots','training-progress');
net = trainNetwork(XTrain, YTrain, layers, options);
YPred = int32(cell2mat(predict(net, XTest)));
plot(1:NTest, YPred, 1:NTest, gradients(N+1:end));
legend('Predicted', 'Actual');
\ No newline at end of file
% Estimates parameters of an RC circuit. Produces an
% `idgrey` instance called `sys` representing the
% learned system parameters.
%
% See: https://www.mathworks.com/help/ident/ug/estimating-linear-grey-box-models.html
%
% Initial estimates of circuit parameters.
% The number of parameters should match the order
% of the system. That is, 2 parameters for a first
% order system cannot be solved.
par = {'resistance',500; 'capacitance', 1e-3};
% A grey box system with identifiable parameters.
sys_init = idgrey(@ssRC, par, 'c');
sys_init.Structure.Parameters(2).Free = false;
rcdat = load('RC_comb_25.mat');
t = rcdat.data(:,1); % timestamps
u = rcdat.data(:,2); % input
y = rcdat.data(:,3); % output
data = iddata(y, u, rcdat.resolution);
% Run a grey box estimate of the system using data
sys = greyest(data, sys_init);
fprintf('Parameter value: %.5f\n', getpvec(sys));
% state-space representation of RC circuit
function [A, B, C, D] = ssRC(R, c, ~)
A = [-1/(R*c)];
B = [1/R];
C = A;
D = B;
end
\ No newline at end of file
"""
Defines state space class for rough linear system modelling.
"""
import numpy as np
class SS:
"""
A simple state-space simulator. Takes state-space matrices A, B, C, D and
simulates system response to provided inputs.
Args:
* `A, B, C, D`: `np.ndarray` matrices representing the system. Of shapes:
* `A`: State variables x State variables,
* `B`: State variables x Inputs,
* `C`: Output Variables x State Variables,
* `D`: Output Variables x Inputs
* `tstep`: The resolution of the simulation.
"""
def __init__(self, A: np.ndarray, B: np.ndarray, C: np.ndarray, D: np.ndarray,
tstep=1e-4):
self.A = A
self.B = B
self.C = C
self.D = D
self.tstep = tstep
self.xdim = A.shape[0]
self.ydim = C.shape[0]
def run(self, u: np.ndarray, t: np.ndarray, x0: np.ndarray=None):
"""
Generate system response to inputs. The system response is calculated through
trapezoid rule.
Args:
* `u`: An `N x Inputs` array of inputs,
* `t`: The end time (float) or an array of time stamps for the corresponding
element in `u`.
* `x0`: The initial state of the system.
Returns:
* `Y`: `np.ndarray` of shape `N x Outputs`,
* `T`: `np.ndarray` of shape `N x 1`
"""
if isinstance(t, float):
t = np.linspace(0, t, len(u), endpoint=False)
x = np.zeros(self.xdim) if x0 is None else x0
y, tprev, dxprev = [np.zeros(self.ydim)], 0, 0
T, Y = [], []
for t_, u_ in zip(t, u):
Dt = t_ - tprev
while True:
dt = min(Dt, self.tstep)
dx = self.A @ x + self.B @ u_
y = self.C @ x + self.D @ u_
x += 0.5 * dt * (dx + dxprev)
Dt -= dt
dxprev = dx
if Dt <= 0: break
T.append(t_)
Y.append(y)
tprev = t_
return np.asarray(Y), np.asarray(T)