Closed Omnitact closed 3 years ago
@Omnitact, the issue description is quite vague. Could you please elaborate on how one can reproduce the error you are seeing, the operating system you are in, which section of code gave the error, etc.?
cc: @jinglescode
How can I add the code and the errors within the comment? I try to use the <> however when I preview it, it doesn't come out how I want it to.
Hi @Omnitact, sorry I don't get your question. Do you mean to add a code block on GitHub comment?
Yes, I do, How can I add the code. As I like to show you the errors I am getting
Hi @Omnitact,
Please add a fenced code block and more information about how to reproduce the issue you're seeing.
Fenced code blocks You can create fenced code blocks by placing triple backticks ``` before and after the code block. We recommend placing a blank line before and after code blocks to make the raw formatting easier to read.
function test() {
console.log("notice the blank line before this function?");
}
Here it is, I have attached the whole code of the project. There are three name errors, towards the bottom
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from alpha_vantage.timeseries import TimeSeries
print("All libraries loaded")
All libraries loaded
YOUR_API_KEY
config = {
"alpha_vantage": {
"key": "3PROXA1Q6FII2UBT", # Claim your free API key here: https://www.alphavantage.co/support/#api-key
"symbol": "IBM",
"outputsize": "full",
"key_adjusted_close": "5. adjusted close",
},
"data": {
"window_size": 20,
"train_split_size": 0.80,
},
"plots": {
"xticks_interval": 90, # show a date every 90 days
"color_actual": "#001f3f",
"color_train": "#3D9970",
"color_val": "#0074D9",
"color_pred_train": "#3D9970",
"color_pred_val": "#0074D9",
"color_pred_test": "#FF4136",
},
"model": {
"input_size": 1, # since we are only using 1 feature, close price
"num_lstm_layers": 2,
"lstm_size": 32,
"dropout": 0.2,
},
"training": {
"device": "cpu", # "cuda" or "cpu"
"batch_size": 64,
"num_epoch": 100,
"learning_rate": 0.01,
"scheduler_step_size": 40,
}
}
def download_data(config):
ts = TimeSeries(key=config["alpha_vantage"]["key"])
data, meta_data = ts.get_daily_adjusted(config["alpha_vantage"]["symbol"], outputsize=config["alpha_vantage"]["outputsize"])
data_date = [date for date in data.keys()]
data_date.reverse()
data_close_price = [float(data[date][config["alpha_vantage"]["key_adjusted_close"]]) for date in data.keys()]
data_close_price.reverse()
data_close_price = np.array(data_close_price)
num_data_points = len(data_date)
display_date_range = "from " + data_date[0] + " to " + data_date[num_data_points-1]
print("Number data points", num_data_points, display_date_range)
return data_date, data_close_price, num_data_points, display_date_range
data_date, data_close_price, num_data_points, display_date_range = download_data(config)
# plot
fig = figure(figsize=(25, 5), dpi=80)
fig.patch.set_facecolor((1.0, 1.0, 1.0))
plt.plot(data_date, data_close_price, color=config["plots"]["color_actual"])
xticks = [data_date[i] if ((i%config["plots"]["xticks_interval"]==0 and (num_data_points-i) > config["plots"]["xticks_interval"]) or i==num_data_points-1) else None for i in range(num_data_points)] # make x ticks nice
x = np.arange(0,len(xticks))
plt.xticks(x, xticks, rotation='vertical')
plt.title("Daily close price for " + config["alpha_vantage"]["symbol"] + ", " + display_date_range)
plt.grid(b=None, which='major', axis='y', linestyle='--')
plt.show()
Number data points 5427 from 1999-11-01 to 2021-05-26
class Normalizer():
def __init__(self):
self.mu = None
self.sd = None
def fit_transform(self, x):
self.mu = np.mean(x, axis=(0), keepdims=True)
self.sd = np.std(x, axis=(0), keepdims=True)
normalized_x = (x - self.mu)/self.sd
return normalized_x
def inverse_transform(self, x):
return (x*self.sd) + self.mu
# normalize
scaler = Normalizer()
normalized_data_close_price = scaler.fit_transform(data_close_price)
def prepare_data_x(x, window_size):
# perform windowing
n_row = x.shape[0] - window_size + 1
output = np.lib.stride_tricks.as_strided(x, shape=(n_row, window_size), strides=(x.strides[0], x.strides[0]))
return output[:-1], output[-1]
def prepare_data_y(x, window_size):
# # perform simple moving average
# output = np.convolve(x, np.ones(window_size), 'valid') / window_size
# use the next day as label
output = x[window_size:]
return output
data_x, data_x_unseen = prepare_data_x(normalized_data_close_price, window_size=config["data"]["window_size"])
data_y = prepare_data_y(normalized_data_close_price, window_size=config["data"]["window_size"])
# split dataset
split_index = int(data_y.shape[0]*config["data"]["train_split_size"])
data_x_train = data_x[:split_index]
data_x_val = data_x[split_index:]
data_y_train = data_y[:split_index]
data_y_val = data_y[split_index:]
# prepare data for plotting
to_plot_data_y_train = np.zeros(num_data_points)
to_plot_data_y_val = np.zeros(num_data_points)
to_plot_data_y_train[config["data"]["window_size"]:split_index+config["data"]["window_size"]] = scaler.inverse_transform(data_y_train)
to_plot_data_y_val[split_index+config["data"]["window_size"]:] = scaler.inverse_transform(data_y_val)
to_plot_data_y_train = np.where(to_plot_data_y_train == 0, None, to_plot_data_y_train)
to_plot_data_y_val = np.where(to_plot_data_y_val == 0, None, to_plot_data_y_val)
## plots
fig = figure(figsize=(25, 5), dpi=80)
fig.patch.set_facecolor((1.0, 1.0, 1.0))
plt.plot(data_date, to_plot_data_y_train, label="Prices (train)", color=config["plots"]["color_train"])
plt.plot(data_date, to_plot_data_y_val, label="Prices (validation)", color=config["plots"]["color_val"])
xticks = [data_date[i] if ((i%config["plots"]["xticks_interval"]==0 and (num_data_points-i) > config["plots"]["xticks_interval"]) or i==num_data_points-1) else None for i in range(num_data_points)] # make x ticks nice
x = np.arange(0,len(xticks))
plt.xticks(x, xticks, rotation='vertical')
plt.title("Daily close prices for " + config["alpha_vantage"]["symbol"] + " - showing training and validation data")
plt.grid(b=None, which='major', axis='y', linestyle='--')
plt.legend()
plt.show()
class TimeSeriesDataset(Dataset):
def __init__(self, x, y):
x = np.expand_dims(x, 2) # in our case, we have only 1 feature, so we need to convert `x` into [batch, sequence, features] for LSTM
self.x = x.astype(np.float32)
self.y = y.astype(np.float32)
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return (self.x[idx], self.y[idx])
dataset_train = TimeSeriesDataset(data_x_train, data_y_train)
dataset_val = TimeSeriesDataset(data_x_val, data_y_val)
print("Train data shape", dataset_train.x.shape, dataset_train.y.shape)
print("Validation data shape", dataset_val.x.shape, dataset_val.y.shape)
train_dataloader = DataLoader(dataset_train, batch_size=config["training"]["batch_size"], shuffle=True)
val_dataloader = DataLoader(dataset_val, batch_size=config["training"]["batch_size"], shuffle=True)
Train data shape (4325, 20, 1) (4325,)
Validation data shape (1082, 20, 1) (1082,)
class LSTMModel(nn.Module):
def __init__(self, input_size=1, hidden_layer_size=32, num_layers=2, output_size=1, dropout=0.2):
super().__init__()
self.hidden_layer_size = hidden_layer_size
self.linear_1 = nn.Linear(input_size, hidden_layer_size)
self.relu = nn.ReLU()
self.lstm = nn.LSTM(hidden_layer_size, hidden_size=self.hidden_layer_size, num_layers=num_layers, batch_first=True)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(num_layers*hidden_layer_size, output_size)
self.init_weights()
def init_weights(self):
for name, param in self.lstm.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight_ih' in name:
nn.init.kaiming_normal_(param)
elif 'weight_hh' in name:
nn.init.orthogonal_(param)
def forward(self, x):
batchsize = x.shape[0]
# layer 1
x = self.linear_1(x)
x = self.relu(x)
# LSTM layer
lstm_out, (h_n, c_n) = self.lstm(x)
# reshape output from hidden cell into [batch, features] for `linear_2`
x = h_n.permute(1, 0, 2).reshape(batchsize, -1)
# layer 2
x = self.dropout(x)
predictions = self.linear_2(x)
return predictions[:,-1]
def run_epoch(dataloader, is_training=False):
epoch_loss = 0
if is_training:
model.train()
else:
model.eval()
for idx, (x, y) in enumerate(dataloader):
if is_training:
optimizer.zero_grad()
batchsize = x.shape[0]
x = x.to(config["training"]["device"])
y = y.to(config["training"]["device"])
out = model(x)
loss = criterion(out.contiguous(), y.contiguous())
if is_training:
loss.backward()
optimizer.step()
epoch_loss += (loss.detach().item() / batchsize)
lr = scheduler.get_last_lr()[0]
return epoch_loss, lr
train_dataloader = DataLoader(dataset_train, batch_size=config["training"]["batch_size"], shuffle=True)
val_dataloader = DataLoader(dataset_val, batch_size=config["training"]["batch_size"], shuffle=True)
model = LSTMModel(input_size=config["model"]["input_size"], hidden_layer_size=config["model"]["lstm_size"], num_layers=config["model"]["num_lstm_layers"], output_size=1, dropout=config["model"]["dropout"])
model = model.to(config["training"]["device"])
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=config["training"]["learning_rate"], betas=(0.9, 0.98), eps=1e-9)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=config["training"]["scheduler_step_size"], gamma=0.1)
for epoch in range(config["training"]["num_epoch"]):
loss_train, lr_train = run_epoch(train_dataloader, is_training=True)
loss_val, lr_val = run_epoch(val_dataloader)
scheduler.step()
print('Epoch[{}/{}] | loss train:{:.6f}, test:{:.6f} | lr:{:.6f}'
.format(epoch+1, config["training"]["num_epoch"], loss_train, loss_val, lr_train))
Python 3.8.8 (default, Apr 13 2021, 12:59:45)
Type 'copyright', 'credits' or 'license' for more information
IPython 7.22.0 -- An enhanced Interactive Python. Type '?' for help.
# here we re-initialize dataloader so the data doesn't shuffled, so we can plot the values by date
train_dataloader = DataLoader(dataset_train, batch_size=config["training"]["batch_size"], shuffle=False)
val_dataloader = DataLoader(dataset_val, batch_size=config["training"]["batch_size"], shuffle=False)
model.eval()
# predict on the training data, to see how well the model managed to learn and memorize
predicted_train = np.array([])
for idx, (x, y) in enumerate(train_dataloader):
x = x.to(config["training"]["device"])
out = model(x)
out = out.cpu().detach().numpy()
predicted_train = np.concatenate((predicted_train, out))
# predict on the validation data, to see how the model does
predicted_val = np.array([])
for idx, (x, y) in enumerate(val_dataloader):
x = x.to(config["training"]["device"])
out = model(x)
out = out.cpu().detach().numpy()
predicted_val = np.concatenate((predicted_val, out))
# prepare data for plotting
to_plot_data_y_train_pred = np.zeros(num_data_points)
to_plot_data_y_val_pred = np.zeros(num_data_points)
to_plot_data_y_train_pred[config["data"]["window_size"]:split_index+config["data"]["window_size"]] = scaler.inverse_transform(predicted_train)
to_plot_data_y_val_pred[split_index+config["data"]["window_size"]:] = scaler.inverse_transform(predicted_val)
to_plot_data_y_train_pred = np.where(to_plot_data_y_train_pred == 0, None, to_plot_data_y_train_pred)
to_plot_data_y_val_pred = np.where(to_plot_data_y_val_pred == 0, None, to_plot_data_y_val_pred)
# plots
fig = figure(figsize=(25, 5), dpi=80)
fig.patch.set_facecolor((1.0, 1.0, 1.0))
plt.plot(data_date, data_close_price, label="Actual prices", color=config["plots"]["color_actual"])
plt.plot(data_date, to_plot_data_y_train_pred, label="Predicted prices (train)", color=config["plots"]["color_pred_train"])
plt.plot(data_date, to_plot_data_y_val_pred, label="Predicted prices (validation)", color=config["plots"]["color_pred_val"])
plt.title("Compare predicted prices to actual prices")
xticks = [data_date[i] if ((i%config["plots"]["xticks_interval"]==0 and (num_data_points-i) > config["plots"]["xticks_interval"]) or i==num_data_points-1) else None for i in range(num_data_points)] # make x ticks nice
x = np.arange(0,len(xticks))
plt.xticks(x, xticks, rotation='vertical')
plt.grid(b=None, which='major', axis='y', linestyle='--')
plt.legend()
plt.show()
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-1-265a5e8f429f> in <module>
1 # here we re-initialize dataloader so the data doesn't shuffled, so we can plot the values by date
2
----> 3 train_dataloader = DataLoader(dataset_train, batch_size=config["training"]["batch_size"], shuffle=False)
4 val_dataloader = DataLoader(dataset_val, batch_size=config["training"]["batch_size"], shuffle=False)
5
NameError: name 'DataLoader' is not defined
# prepare data for plotting the zoomed in view of the predicted prices (on validation set) vs. actual prices
to_plot_data_y_val_subset = scaler.inverse_transform(data_y_val)
to_plot_predicted_val = scaler.inverse_transform(predicted_val)
to_plot_data_date = data_date[split_index+config["data"]["window_size"]:]
# plots
fig = figure(figsize=(25, 5), dpi=80)
fig.patch.set_facecolor((1.0, 1.0, 1.0))
plt.plot(to_plot_data_date, to_plot_data_y_val_subset, label="Actual prices", color=config["plots"]["color_actual"])
plt.plot(to_plot_data_date, to_plot_predicted_val, label="Predicted prices (validation)", color=config["plots"]["color_pred_val"])
plt.title("Zoom in to examine predicted price on validation data portion")
xticks = [to_plot_data_date[i] if ((i%int(config["plots"]["xticks_interval"]/5)==0 and (len(to_plot_data_date)-i) > config["plots"]["xticks_interval"]/6) or i==len(to_plot_data_date)-1) else None for i in range(len(to_plot_data_date))] # make x ticks nice
xs = np.arange(0,len(xticks))
plt.xticks(xs, xticks, rotation='vertical')
plt.grid(b=None, which='major', axis='y', linestyle='--')
plt.legend()
plt.show()
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-2-4a87dd546a55> in <module>
1 # prepare data for plotting the zoomed in view of the predicted prices (on validation set) vs. actual prices
2
----> 3 to_plot_data_y_val_subset = scaler.inverse_transform(data_y_val)
4 to_plot_predicted_val = scaler.inverse_transform(predicted_val)
5 to_plot_data_date = data_date[split_index+config["data"]["window_size"]:]
NameError: name 'scaler' is not defined
# predict the closing price of the next trading day
model.eval()
x = torch.tensor(data_x_unseen).float().to(config["training"]["device"]).unsqueeze(0).unsqueeze(2) # this is the data type and shape required, [batch, sequence, feature]
prediction = model(x)
prediction = prediction.cpu().detach().numpy()
# prepare plots
plot_range = 10
to_plot_data_y_val = np.zeros(plot_range)
to_plot_data_y_val_pred = np.zeros(plot_range)
to_plot_data_y_test_pred = np.zeros(plot_range)
to_plot_data_y_val[:plot_range-1] = scaler.inverse_transform(data_y_val)[-plot_range+1:]
to_plot_data_y_val_pred[:plot_range-1] = scaler.inverse_transform(predicted_val)[-plot_range+1:]
to_plot_data_y_test_pred[plot_range-1] = scaler.inverse_transform(prediction)
to_plot_data_y_val = np.where(to_plot_data_y_val == 0, None, to_plot_data_y_val)
to_plot_data_y_val_pred = np.where(to_plot_data_y_val_pred == 0, None, to_plot_data_y_val_pred)
to_plot_data_y_test_pred = np.where(to_plot_data_y_test_pred == 0, None, to_plot_data_y_test_pred)
# plot
plot_date_test = data_date[-plot_range+1:]
plot_date_test.append("tomorrow")
fig = figure(figsize=(25, 5), dpi=80)
fig.patch.set_facecolor((1.0, 1.0, 1.0))
plt.plot(plot_date_test, to_plot_data_y_val, label="Actual prices", marker=".", markersize=10, color=config["plots"]["color_actual"])
plt.plot(plot_date_test, to_plot_data_y_val_pred, label="Past predicted prices", marker=".", markersize=10, color=config["plots"]["color_pred_val"])
plt.plot(plot_date_test, to_plot_data_y_test_pred, label="Predicted price for next day", marker=".", markersize=20, color=config["plots"]["color_pred_test"])
plt.title("Predicted close price of the next trading day")
plt.grid(b=None, which='major', axis='y', linestyle='--')
plt.legend()
plt.show()
print("Predicted close price of the next trading day:", round(to_plot_data_y_test_pred[plot_range-1], 2))
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-3-a2fc0fe423aa> in <module>
1 # predict the closing price of the next trading day
2
----> 3 model.eval()
4
5 x = torch.tensor(data_x_unseen).float().to(config["training"]["device"]).unsqueeze(0).unsqueeze(2) # this is the data type and shape required, [batch, sequence, feature]
NameError: name 'model' is not defined
NameError: name 'DataLoader' is not defined
is from from torch.utils.data import DataLoader
NameError: name 'scaler' is not defined
is from scaler = Normalizer()
NameError: name 'model' is not defined
is from model = LSTMModel(input_size=config["model"]["input_size"], hidden_layer_size=config["model"]["lstm_size"], num_layers=config["model"]["num_lstm_layers"], output_size=1, dropout=config["model"]["dropout"])
All of which you have defined in the code provided. I would suggest you to run and reference this notebook too to cross-check.
@jinglescode Is there a way to change the date it is forecasting. As it is using yesterdays close, to predict today's close price. Is there a way, I can change it for it to predict next week, or end of this week price, or next month and etc...?
There are many things you can try, one way you can do it to use TIME_SERIES_WEEKLY_ADJUSTED or TIME_SERIES_MONTHLY_ADJUSTED.
I am getting some name errors, I have attached a jupyter file which contains all my code. Machine Learning Stock Forecasting.jupyterlab-workspace.zip