summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRohitRathore1 <rohitrathore.imh55@gmail.com>2022-05-19 17:29:04 +0530
committerRohitRathore1 <rohitrathore.imh55@gmail.com>2022-06-09 21:22:59 +0530
commit0aec5cd837af4503605707a560687a2b5b106fd4 (patch)
treecaa5c3ce134e5f1792e6fe71d718eb547ff9299d
parent4803c3eb0d2cf81ef29b25659ed27202d2456ee7 (diff)
Python_Code: Added python code after running pylint
This patch contains required python code Removed vims_visulization and a duplicate between lstm_attention Signed-off-by: Rohit Singh Rathaur <rohitrathore.imh55@gmail.com> Change-Id: I4a9b70a186498b24ba258a6ac303c827d18a4765
-rw-r--r--models/failure_prediction/python/attention_lstm.py471
-rw-r--r--models/failure_prediction/python/bi_lstmstacked_lstm_correlation.py449
-rw-r--r--models/failure_prediction/python/cnn.py376
-rw-r--r--models/failure_prediction/python/decision_tree.py199
-rw-r--r--models/failure_prediction/python/featurecreation.py114
-rw-r--r--models/failure_prediction/python/lstm.py391
-rw-r--r--models/failure_prediction/python/lstm_correlation.py379
-rw-r--r--models/failure_prediction/python/stacked_lstm_correlation.py405
8 files changed, 2784 insertions, 0 deletions
diff --git a/models/failure_prediction/python/attention_lstm.py b/models/failure_prediction/python/attention_lstm.py
new file mode 100644
index 0000000..612ff84
--- /dev/null
+++ b/models/failure_prediction/python/attention_lstm.py
@@ -0,0 +1,471 @@
+# pylint: disable=C0103, C0116, W0621, E0401, W0104, W0105, R0913, E1136, W0612, E0102
+# -*- coding: utf-8 -*-
+"""Attention_LSTM.ipynb
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+ https://colab.research.google.com/drive/1fXlRsp5_7EmuJBdayJTd2ChTWs8CTXIp
+
+Contributors: **Rohit Singh Rathaur, Girish L.**
+
+Copyright [2021](2021) [*Rohit Singh Rathaur, BIT Mesra and Girish L., CIT GUBBI, Karnataka*]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from keras.utils.vis_utils import plot_model
+from keras import backend as K
+import seaborn as sns
+import numpy as np
+import pandas as pd
+import matplotlib.pyplot as plt
+import tensorflow as tf
+from google.colab import drive
+drive.mount('/content/drive')
+
+# Importing libraries
+
+df_Ellis = pd.read_csv(
+ "/content/drive/MyDrive/LFN Anuket/Analysis/data/Final/Ellis_FinalTwoConditionwithOR.csv")
+df_Ellis
+
+df_Ellis.plot()
+
+"""We showed here the histograms of Ellis data"""
+
+# we show here the hist
+df_Ellis.hist(bins=100, figsize=(20, 15))
+# save_fig("attribute_histogram_plots")
+plt.show()
+
+cpu_system_perc = df_Ellis[['ellis-cpu.system_perc']]
+cpu_system_perc.rolling(12).mean().plot(
+ figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Timestamp', fontsize=30)
+
+load_avg_1_min = df_Ellis[['ellis-load.avg_1_min']]
+load_avg_1_min.rolling(12).mean().plot(
+ figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Timestamp', fontsize=30)
+
+"""## Identifying trends in Time Series data
+There are several ways to think about identifying trends in time series. One popular way
+is by taking a rolling average, which means that, for each time point, we take the average
+of the points on either side of it. Note that the number of points is specified by a window
+size, which we need to choose.
+
+What happens then because we take the average is it tends to smooth out noise and seasonality.
+We will see that below right now. Check out this rolling average of `'ellis-cpu.wait_perc'`
+using the built-in `pandas` methods.
+
+When it comes to determining the window size, here, it makes sense to first try out one of
+twelve months, as we're talking about yearly seasonality.
+
+Note that in the code chunk above we used two sets of squared brackets to extract the
+`'ellis-cpu.wait_perc'` column as a DataFrame; If we would have used one set, like
+`df_Ellis['ellis-cpu.wait_perc']`, we would have created a pandas Series.
+
+In the code chunk above, you also chained methods: you called methods on an object one after
+another. Method chaining is pretty popular and pandas is one of the packages that really
+allows you to use that style of programming to the max!
+"""
+
+cpu_wait_perc = df_Ellis[['ellis-cpu.wait_perc']]
+cpu_wait_perc.rolling(12).mean().plot(
+ figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Year', fontsize=30)
+
+"""We have successfully removed the seasonality and we saw an upward trend for
+`ellis-cpu.wait_perc`! But how do these two search terms compare?
+
+We can figure this out by plotting the trends of `'ellis-cpu.wait_perc'`, `cpu_system_perc`
+and `'load_avg_1_min'` on a single figure:
+"""
+
+df_dg = pd.concat([cpu_system_perc.rolling(12).mean(), load_avg_1_min.rolling(
+ 12).mean(), cpu_wait_perc.rolling(12).mean()], axis=1)
+df_dg.plot(figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Year', fontsize=20)
+
+"""We established the correlation matrix for Ellis data.
+Seaborn has five built-in themes to style its plots: `darkgrid`, `whitegrid`, `dark`,
+`white`, and `ticks`. Seaborn defaults to using the darkgrid theme for its plots, but
+we can change this styling to better suit our presentation needs.
+
+To use any of the preset themes pass the name of it to `sns.set_style()`.
+"""
+
+# we establish the corrmartrice
+color = sns.color_palette()
+sns.set_style('darkgrid')
+
+correaltionMatrice = df_Ellis.corr()
+f, ax = plt.subplots(figsize=(20, 10))
+sns.heatmap(
+ correaltionMatrice,
+ cbar=True,
+ vmin=0,
+ vmax=1,
+ square=True,
+ annot=True)
+plt.show()
+
+"""Correlation between rows or columns of two DataFrame objectsCompute pairwise"""
+
+df_Ellis.corrwith(df_Ellis['ellis-load.avg_1_min'])
+
+# using multivariate feature
+
+features_3 = [
+ 'ellis-cpu.wait_perc',
+ 'ellis-load.avg_1_min',
+ 'ellis-net.in_bytes_sec',
+ 'Label']
+
+features = df_Ellis[features_3]
+features.index = df_Ellis['Timestamp']
+features.head()
+
+features.plot(subplots=True)
+
+features = features.values
+
+"""train test split for simple time series moving window average"""
+
+# standardize data
+train_split = 141600
+tf.random.set_seed(13)
+
+# standardize data
+features_mean = features[:train_split].mean()
+features_std = features[:train_split].std()
+features = (features - features_mean) / features_std
+
+print(type(features))
+print(features.shape)
+
+"""Defined a `multivariate_data` function to generate multivariate data"""
+
+# create mutlivariate data
+
+
+def mutlivariate_data(
+ features,
+ target,
+ start_idx,
+ end_idx,
+ history_size,
+ target_size,
+ step,
+ single_step=False):
+ data = []
+ labels = []
+ start_idx = start_idx + history_size
+ if end_idx is None:
+ end_idx = len(features) - target_size
+ for i in range(start_idx, end_idx):
+ idxs = range(i - history_size, i, step) # using step
+ data.append(features[idxs])
+ if single_step:
+ labels.append(target[i + target_size])
+ else:
+ labels.append(target[i:i + target_size])
+
+ return np.array(data), np.array(labels)
+
+
+"""Generate multivariate data using a defined function `multivariate_data`"""
+
+# generate multivariate data
+
+history = 720
+future_target = 72
+STEP = 6
+
+x_train_ss, y_train_ss = mutlivariate_data(
+ features, features[:, 1], 0, train_split, history, future_target, STEP, single_step=True)
+
+x_val_ss, y_val_ss = mutlivariate_data(features, features[:, 1], train_split, None, history,
+ future_target, STEP, single_step=True)
+
+print(x_train_ss.shape, y_train_ss.shape)
+print(x_val_ss.shape, y_val_ss.shape)
+
+"""
+The `tf.data.Dataset` API supports writing descriptive and efficient input pipelines.
+Dataset usage following a common pattern:
+- Creating a source dataset from our input data.
+- Applied dataset transformations to preprocess the data.
+- Iterate over the dataset and process the elements.
+Note: Iteration happens in a streaming fashion, so the full dataset does not need to
+fit into memory.
+Once we have a dataset, we can apply transformations to prepare the data for our model:
+"""
+
+# tensorflow dataset
+batch_size = 256
+buffer_size = 10000
+
+train_ss = tf.data.Dataset.from_tensor_slices((x_train_ss, y_train_ss))
+train_ss = train_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()
+
+val_ss = tf.data.Dataset.from_tensor_slices((x_val_ss, y_val_ss))
+val_ss = val_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()
+
+print(train_ss)
+print(val_ss)
+
+x_train_ss.shape[-2:]
+
+"""We used a custom loss function to evaluate the model:"""
+
+
+def root_mean_squared_error(y_true, y_pred):
+ return K.sqrt(K.mean(K.square(y_pred - y_true)))
+
+
+# Modelling using LSTM
+steps = 50
+
+EPOCHS = 20
+
+single_step_model = tf.keras.models.Sequential()
+
+single_step_model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(
+ 32, return_sequences=True, input_shape=x_train_ss.shape[-2:])))
+# single_step_model.add(tf.keras.layers.Dropout(0.3))
+single_step_model.add(tf.keras.layers.LSTM(units=100, return_sequences=False))
+# single_step_model.add(tf.keras.layers.Dropout(0.2))
+#model.add(Dense(units=1, activation='relu'))
+single_step_model.add(tf.keras.layers.Activation("relu"))
+single_step_model.add(tf.keras.layers.Dense(1))
+single_step_model.compile(
+ optimizer=tf.keras.optimizers.Adam(),
+ loss='mae',
+ metrics=[
+ tf.keras.metrics.RootMeanSquaredError(
+ name='rmse')])
+#single_step_model.compile(loss='mse', optimizer='rmsprop')
+
+single_step_model_history = single_step_model.fit(
+ train_ss,
+ epochs=EPOCHS,
+ steps_per_epoch=steps,
+ validation_data=val_ss,
+ validation_steps=50)
+
+plot_model(
+ single_step_model,
+ to_file='/content/drive/MyDrive/LFN Anuket/Analysis/data/Final/Bi-LSTM.png',
+ show_shapes=True,
+ show_layer_names=True)
+single_step_model.summary()
+
+
+# plot train test loss
+
+def plot_loss(history, title):
+ loss = history.history['loss']
+ val_loss = history.history['val_loss']
+
+ epochs = range(len(loss))
+ plt.figure()
+ plt.plot(epochs, loss, 'b', label='Train Loss')
+ plt.plot(epochs, val_loss, 'r', label='Validation Loss')
+ plt.title(title)
+ plt.legend()
+ plt.grid()
+ plt.show()
+
+
+plot_loss(single_step_model_history,
+ 'Single Step Training and validation loss')
+
+# plot train test loss
+
+
+def plot_loss(history, title):
+ loss = history.history['rmse']
+ val_loss = history.history['val_rmse']
+
+ epochs = range(len(loss))
+ plt.figure()
+ plt.plot(epochs, loss, 'b', label='Train RMSE')
+ plt.plot(epochs, val_loss, 'r', label='Validation RMSE')
+ plt.title(title)
+ plt.legend()
+ plt.grid()
+ plt.show()
+
+
+plot_loss(single_step_model_history,
+ 'Single Step Training and validation loss')
+
+# fucntion to create time steps
+
+
+def create_time_steps(length):
+ return list(range(-length, 0))
+
+# function to plot time series data
+
+
+def plot_time_series(plot_data, delta, title):
+ labels = ["History", 'True Future', 'Model Predcited']
+ marker = ['.-', 'rx', 'go']
+ time_steps = create_time_steps(plot_data[0].shape[0])
+
+ if delta:
+ future = delta
+ else:
+ future = 0
+ plt.title(title)
+ for i, x in enumerate(plot_data):
+ if i:
+ plt.plot(
+ future,
+ plot_data[i],
+ marker[i],
+ markersize=10,
+ label=labels[i])
+ else:
+ plt.plot(
+ time_steps,
+ plot_data[i].flatten(),
+ marker[i],
+ label=labels[i])
+ plt.legend()
+ plt.xlim([time_steps[0], (future + 5) * 2])
+
+ plt.xlabel('Time_Step')
+ return plt
+
+# Moving window average
+
+
+def MWA(history):
+ return np.mean(history)
+
+# plot time series and predicted values
+
+
+for x, y in val_ss.take(5):
+ plot = plot_time_series([x[0][:, 1].numpy(), y[0].numpy(),
+ single_step_model.predict(x)[0]], 12,
+ 'Single Step Prediction')
+ plot.show()
+
+"""# **MultiStep Forcasting**"""
+
+future_target = 72 # 72 future values
+x_train_multi, y_train_multi = mutlivariate_data(features, features[:, 1], 0,
+ train_split, history,
+ future_target, STEP)
+x_val_multi, y_val_multi = mutlivariate_data(features, features[:, 1],
+ train_split, None, history,
+ future_target, STEP)
+
+print(x_train_multi.shape)
+print(y_train_multi.shape)
+
+# TF DATASET
+
+train_data_multi = tf.data.Dataset.from_tensor_slices(
+ (x_train_multi, y_train_multi))
+train_data_multi = train_data_multi.cache().shuffle(
+ buffer_size).batch(batch_size).repeat()
+
+val_data_multi = tf.data.Dataset.from_tensor_slices((x_val_multi, y_val_multi))
+val_data_multi = val_data_multi.batch(batch_size).repeat()
+
+print(train_data_multi)
+print(val_data_multi)
+
+# plotting function
+
+
+def multi_step_plot(history, true_future, prediction):
+ plt.figure(figsize=(12, 6))
+ num_in = create_time_steps(len(history))
+ num_out = len(true_future)
+ plt.grid()
+ plt.plot(num_in, np.array(history[:, 1]), label='History')
+ plt.plot(np.arange(num_out) / STEP, np.array(true_future), 'bo',
+ label='True Future')
+ if prediction.any():
+ plt.plot(np.arange(num_out) / STEP, np.array(prediction), 'ro',
+ label='Predicted Future')
+ plt.legend(loc='upper left')
+ plt.show()
+
+
+for x, y in train_data_multi.take(1):
+ multi_step_plot(x[0], y[0], np.array([0]))
+
+"""Bi-directional LSTM:
+On some sequence prediction problems, it can be beneficial to allow the LSTM model to
+learn the input sequence both forward and backwards and concatenate both interpretations.
+This is known as bidirectional.
+
+Here, `tf.keras.layers.Bidirectional` is a bidirectional wrapper for RNNs which inherits
+from `Wrapper`, `Layer`, and `module`
+"""
+
+multi_step_model = tf.keras.models.Sequential()
+multi_step_model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(
+ 32, return_sequences=True, input_shape=x_train_multi.shape[-2:])))
+multi_step_model.add(tf.keras.layers.Dropout(0.2))
+multi_step_model.add(tf.keras.layers.LSTM(units=100, return_sequences=False))
+multi_step_model.add(tf.keras.layers.Dropout(0.2))
+#model.add(Dense(units=1, activation='relu'))
+multi_step_model.add(tf.keras.layers.Activation("relu"))
+# aDD dropout layer (0.3)
+multi_step_model.add(tf.keras.layers.Dense(72)) # for 72 outputs
+
+multi_step_model.compile(
+ optimizer=tf.keras.optimizers.RMSprop(
+ clipvalue=1.0), loss='mae', metrics=[
+ tf.keras.metrics.RootMeanSquaredError(
+ name='rmse')])
+
+MULTI_STEP_HISTORY = multi_step_model.fit(train_data_multi, epochs=EPOCHS,
+ steps_per_epoch=steps,
+ validation_data=val_data_multi,
+ validation_steps=50)
+
+plot_loss(MULTI_STEP_HISTORY, 'Multi-Step Training and validation loss')
+
+for x, y in val_data_multi.take(5):
+ multi_step_plot(x[0], y[0], multi_step_model.predict(x)[0])
+
+scores = multi_step_model.evaluate(
+ x_train_multi,
+ y_train_multi,
+ verbose=1,
+ batch_size=200)
+print('MAE: {}'.format(scores[1]))
+
+SCORES_TEST = multi_step_model.evaluate(
+ x_val_multi, y_val_multi, verbose=1, batch_size=200)
+print('MAE: {}'.format(scores[1]))
+
+Y_PRED_TEST = multi_step_model.predict(x_val_multi, verbose=0)
+
+plt.figure(figsize=(10, 5))
+plt.plot(Y_PRED_TEST)
+plt.plot(y_val_multi)
+plt.ylabel("Value")
+plt.xlabel("Timestap")
+plt.legend(loc='upper left')
+plt.show()
diff --git a/models/failure_prediction/python/bi_lstmstacked_lstm_correlation.py b/models/failure_prediction/python/bi_lstmstacked_lstm_correlation.py
new file mode 100644
index 0000000..a76f4d6
--- /dev/null
+++ b/models/failure_prediction/python/bi_lstmstacked_lstm_correlation.py
@@ -0,0 +1,449 @@
+# pylint: disable=C0103, C0116, W0621, E0401, W0104, W0105, R0913, E1136, W0612, E0102, C0301, W0611, C0411
+# -*- coding: utf-8 -*-
+"""Bi_LSTMstacked_LSTM_Correlation.ipynb
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+ https://colab.research.google.com/drive/1lwBt4E8mHUhRTWK94Y0KsUZ1jHgU1ePq
+
+Contributors: **Rohit Singh Rathaur, Girish L.**
+
+Copyright [2021](2021) [*Rohit Singh Rathaur, BIT Mesra and Girish L., CIT GUBBI, Karnataka*]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from keras import backend as K
+from keras.utils.vis_utils import plot_model
+import seaborn as sns
+import os
+import numpy as np
+import pandas as pd
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+import tensorflow as tf
+from google.colab import drive
+drive.mount('/content/drive')
+
+# Importing libraries
+
+df_Ellis = pd.read_csv(
+ "/content/drive/MyDrive/LFN Anuket/Analysis/data/Final/Ellis_FinalTwoConditionwithOR.csv")
+df_Ellis
+
+df_Ellis.plot()
+
+"""We showed here the histograms of Ellis data"""
+
+# we show here the hist
+df_Ellis.hist(bins=100, figsize=(20, 15))
+# save_fig("attribute_histogram_plots")
+plt.show()
+
+cpu_system_perc = df_Ellis[['ellis-cpu.system_perc']]
+cpu_system_perc.rolling(12).mean().plot(
+ figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Timestamp', fontsize=30)
+
+load_avg_1_min = df_Ellis[['ellis-load.avg_1_min']]
+load_avg_1_min.rolling(12).mean().plot(
+ figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Timestamp', fontsize=30)
+
+"""## Identifying trends in Time Series data
+There are several ways to think about identifying trends in time series. One popular way is by taking a rolling average, which means that, for each time point, we take the average of the points on either side of it. Note that the number of points is specified by a window size, which we need to choose.
+
+What happens then because we take the average is it tends to smooth out noise and seasonality. We will see that below right now. Check out this rolling average of `'ellis-cpu.wait_perc'` using the built-in `pandas` methods.
+
+When it comes to determining the window size, here, it makes sense to first try out one of twelve months, as we're talking about yearly seasonality.
+
+Note that in the code chunk above we used two sets of squared brackets to extract the `'ellis-cpu.wait_perc'` column as a DataFrame; If we would have used one set, like `df_Ellis['ellis-cpu.wait_perc']`, we would have created a pandas Series.
+
+In the code chunk above, you also chained methods: you called methods on an object one after another. Method chaining is pretty popular and pandas is one of the packages that really allows you to use that style of programming to the max!
+"""
+
+cpu_wait_perc = df_Ellis[['ellis-cpu.wait_perc']]
+cpu_wait_perc.rolling(12).mean().plot(
+ figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Year', fontsize=30)
+
+"""We have successfully removed the seasonality and we saw an upward trend for `ellis-cpu.wait_perc`! But how do these two search terms compare?
+
+We can figure this out by plotting the trends of `'ellis-cpu.wait_perc'`, `cpu_system_perc` and `'load_avg_1_min'` on a single figure:
+"""
+
+df_dg = pd.concat([cpu_system_perc.rolling(12).mean(), load_avg_1_min.rolling(
+ 12).mean(), cpu_wait_perc.rolling(12).mean()], axis=1)
+df_dg.plot(figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Year', fontsize=20)
+
+"""We established the correlation matrix for Ellis data.
+Seaborn has five built-in themes to style its plots: `darkgrid`, `whitegrid`, `dark`, `white`, and `ticks`. Seaborn defaults to using the darkgrid theme for its plots, but we can change this styling to better suit our presentation needs.
+
+To use any of the preset themes pass the name of it to `sns.set_style()`.
+"""
+
+# we establish the corrmartrice
+color = sns.color_palette()
+sns.set_style('darkgrid')
+
+correaltionMatrice = df_Ellis.corr()
+f, ax = plt.subplots(figsize=(20, 10))
+sns.heatmap(
+ correaltionMatrice,
+ cbar=True,
+ vmin=0,
+ vmax=1,
+ square=True,
+ annot=True)
+plt.show()
+
+"""Correlation between rows or columns of two DataFrame objectsCompute pairwise"""
+
+df_Ellis.corrwith(df_Ellis['ellis-load.avg_1_min'])
+
+# using multivariate feature
+
+features_3 = [
+ 'ellis-cpu.wait_perc',
+ 'ellis-load.avg_1_min',
+ 'ellis-net.in_bytes_sec',
+ 'Label']
+
+features = df_Ellis[features_3]
+features.index = df_Ellis['Timestamp']
+features.head()
+
+features.plot(subplots=True)
+
+features = features.values
+
+"""train test split for simple time series moving window average"""
+
+# standardize data
+train_split = 141600
+tf.random.set_seed(13)
+
+# standardize data
+features_mean = features[:train_split].mean()
+features_std = features[:train_split].std()
+features = (features - features_mean) / features_std
+
+print(type(features))
+print(features.shape)
+
+"""Created multivariate data"""
+
+# create mutlivariate data
+
+
+def mutlivariate_data(
+ features,
+ target,
+ start_idx,
+ end_idx,
+ history_size,
+ target_size,
+ step,
+ single_step=False):
+ data = []
+ labels = []
+ start_idx = start_idx + history_size
+ if end_idx is None:
+ end_idx = len(features) - target_size
+ for i in range(start_idx, end_idx):
+ idxs = range(i - history_size, i, step) # using step
+ data.append(features[idxs])
+ if single_step:
+ labels.append(target[i + target_size])
+ else:
+ labels.append(target[i:i + target_size])
+
+ return np.array(data), np.array(labels)
+
+# generate multivariate data
+
+
+history = 720
+future_target = 72
+STEP = 6
+
+x_train_ss, y_train_ss = mutlivariate_data(
+ features, features[:, 1], 0, train_split, history, future_target, STEP, single_step=True)
+
+x_val_ss, y_val_ss = mutlivariate_data(features, features[:, 1], train_split, None, history,
+ future_target, STEP, single_step=True)
+
+print(x_train_ss.shape, y_train_ss.shape)
+print(x_val_ss.shape, y_val_ss.shape)
+
+"""The `tf.data.Dataset` API supports writing descriptive and efficient input pipelines. Dataset usage following a common pattern:
+- Creating a source dataset from our input data.
+- Applied dataset transformations to preprocess the data.
+- Iterate over the dataset and process the elements.
+Note: Iteration happens in a streaming fashion, so the full dataset does not need to fit into memory.
+Once we have a dataset, we can apply transformations to prepare the data for our model:
+"""
+
+# tensorflow dataset
+batch_size = 256
+buffer_size = 10000
+
+train_ss = tf.data.Dataset.from_tensor_slices((x_train_ss, y_train_ss))
+train_ss = train_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()
+
+val_ss = tf.data.Dataset.from_tensor_slices((x_val_ss, y_val_ss))
+val_ss = val_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()
+
+print(train_ss)
+print(val_ss)
+
+x_train_ss.shape[-2:]
+
+
+def root_mean_squared_error(y_true, y_pred):
+ return K.sqrt(K.mean(K.square(y_pred - y_true)))
+
+
+# Modelling using LSTM
+steps = 50
+
+EPOCHS = 20
+
+single_step_model = tf.keras.models.Sequential()
+
+single_step_model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(
+ 32, return_sequences=True, input_shape=x_train_ss.shape[-2:])))
+# single_step_model.add(tf.keras.layers.Dropout(0.3))
+single_step_model.add(tf.keras.layers.LSTM(units=100, return_sequences=False))
+# single_step_model.add(tf.keras.layers.Dropout(0.2))
+#model.add(Dense(units=1, activation='relu'))
+single_step_model.add(tf.keras.layers.Activation("relu"))
+single_step_model.add(tf.keras.layers.Dense(1))
+single_step_model.compile(
+ optimizer=tf.keras.optimizers.Adam(),
+ loss='mae',
+ metrics=[
+ tf.keras.metrics.RootMeanSquaredError(
+ name='rmse')])
+#single_step_model.compile(loss='mse', optimizer='rmsprop')
+
+single_step_model_history = single_step_model.fit(
+ train_ss,
+ epochs=EPOCHS,
+ steps_per_epoch=steps,
+ validation_data=val_ss,
+ validation_steps=50)
+
+plot_model(
+ single_step_model,
+ to_file='/content/drive/MyDrive/LFN Anuket/Analysis/data/Final/Bi-LSTM.png',
+ show_shapes=True,
+ show_layer_names=True)
+single_step_model.summary()
+
+
+# plot train test loss
+
+def plot_loss(history, title):
+ loss = history.history['loss']
+ val_loss = history.history['val_loss']
+
+ epochs = range(len(loss))
+ plt.figure()
+ plt.plot(epochs, loss, 'b', label='Train Loss')
+ plt.plot(epochs, val_loss, 'r', label='Validation Loss')
+ plt.title(title)
+ plt.legend()
+ plt.grid()
+ plt.show()
+
+
+plot_loss(single_step_model_history,
+ 'Single Step Training and validation loss')
+
+# plot train test loss
+
+
+def plot_loss(history, title):
+ loss = history.history['rmse']
+ val_loss = history.history['val_rmse']
+
+ epochs = range(len(loss))
+ plt.figure()
+ plt.plot(epochs, loss, 'b', label='Train RMSE')
+ plt.plot(epochs, val_loss, 'r', label='Validation RMSE')
+ plt.title(title)
+ plt.legend()
+ plt.grid()
+ plt.show()
+
+
+plot_loss(single_step_model_history,
+ 'Single Step Training and validation loss')
+
+# fucntion to create time steps
+
+
+def create_time_steps(length):
+ return list(range(-length, 0))
+
+# function to plot time series data
+
+
+def plot_time_series(plot_data, delta, title):
+ labels = ["History", 'True Future', 'Model Predcited']
+ marker = ['.-', 'rx', 'go']
+ time_steps = create_time_steps(plot_data[0].shape[0])
+
+ if delta:
+ future = delta
+ else:
+ future = 0
+ plt.title(title)
+ for i, x in enumerate(plot_data):
+ if i:
+ plt.plot(
+ future,
+ plot_data[i],
+ marker[i],
+ markersize=10,
+ label=labels[i])
+ else:
+ plt.plot(
+ time_steps,
+ plot_data[i].flatten(),
+ marker[i],
+ label=labels[i])
+ plt.legend()
+ plt.xlim([time_steps[0], (future + 5) * 2])
+
+ plt.xlabel('Time_Step')
+ return plt
+
+# Moving window average
+
+
+def MWA(history):
+ return np.mean(history)
+
+# plot time series and predicted values
+
+
+for x, y in val_ss.take(5):
+ plot = plot_time_series([x[0][:, 1].numpy(), y[0].numpy(),
+ single_step_model.predict(x)[0]], 12,
+ 'Single Step Prediction')
+ plot.show()
+
+"""# **MultiStep Forcasting**"""
+
+future_target = 72 # 72 future values
+x_train_multi, y_train_multi = mutlivariate_data(features, features[:, 1], 0,
+ train_split, history,
+ future_target, STEP)
+x_val_multi, y_val_multi = mutlivariate_data(features, features[:, 1],
+ train_split, None, history,
+ future_target, STEP)
+
+print(x_train_multi.shape)
+print(y_train_multi.shape)
+
+# TF DATASET
+
+train_data_multi = tf.data.Dataset.from_tensor_slices(
+ (x_train_multi, y_train_multi))
+train_data_multi = train_data_multi.cache().shuffle(
+ buffer_size).batch(batch_size).repeat()
+
+val_data_multi = tf.data.Dataset.from_tensor_slices((x_val_multi, y_val_multi))
+val_data_multi = val_data_multi.batch(batch_size).repeat()
+
+print(train_data_multi)
+print(val_data_multi)
+
+# plotting function
+
+
+def multi_step_plot(history, true_future, prediction):
+ plt.figure(figsize=(12, 6))
+ num_in = create_time_steps(len(history))
+ num_out = len(true_future)
+ plt.grid()
+ plt.plot(num_in, np.array(history[:, 1]), label='History')
+ plt.plot(np.arange(num_out) / STEP, np.array(true_future), 'bo',
+ label='True Future')
+ if prediction.any():
+ plt.plot(np.arange(num_out) / STEP, np.array(prediction), 'ro',
+ label='Predicted Future')
+ plt.legend(loc='upper left')
+ plt.show()
+
+
+for x, y in train_data_multi.take(1):
+ multi_step_plot(x[0], y[0], np.array([0]))
+
+"""Bi-directional LSTM:
+On some sequence prediction problems, it can be beneficial to allow the LSTM model to learn the input sequence both forward and backwards and concatenate both interpretations. This is known as bidirectional.
+
+Here, `tf.keras.layers.Bidirectional` is a bidirectional wrapper for RNNs which inherits from `Wrapper`, `Layer`, and `module`
+"""
+
+multi_step_model = tf.keras.models.Sequential()
+multi_step_model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(
+ 32, return_sequences=True, input_shape=x_train_multi.shape[-2:])))
+multi_step_model.add(tf.keras.layers.Dropout(0.2))
+multi_step_model.add(tf.keras.layers.LSTM(units=100, return_sequences=False))
+multi_step_model.add(tf.keras.layers.Dropout(0.2))
+#model.add(Dense(units=1, activation='relu'))
+multi_step_model.add(tf.keras.layers.Activation("relu"))
+# aDD dropout layer (0.3)
+multi_step_model.add(tf.keras.layers.Dense(72)) # for 72 outputs
+
+multi_step_model.compile(
+ optimizer=tf.keras.optimizers.RMSprop(
+ clipvalue=1.0), loss='mae', metrics=[
+ tf.keras.metrics.RootMeanSquaredError(
+ name='rmse')])
+
+multi_step_history = multi_step_model.fit(train_data_multi, epochs=EPOCHS,
+ steps_per_epoch=steps,
+ validation_data=val_data_multi,
+ validation_steps=50)
+
+plot_loss(multi_step_history, 'Multi-Step Training and validation loss')
+
+for x, y in val_data_multi.take(5):
+ multi_step_plot(x[0], y[0], multi_step_model.predict(x)[0])
+
+scores = multi_step_model.evaluate(
+ x_train_multi,
+ y_train_multi,
+ verbose=1,
+ batch_size=200)
+print('MAE: {}'.format(scores[1]))
+
+scores_test = multi_step_model.evaluate(
+ x_val_multi, y_val_multi, verbose=1, batch_size=200)
+print('MAE: {}'.format(scores[1]))
+
+y_pred_test = multi_step_model.predict(x_val_multi, verbose=0)
+
+plt.figure(figsize=(10, 5))
+plt.plot(y_pred_test)
+plt.plot(y_val_multi)
+plt.ylabel("Value")
+plt.xlabel("Timestap")
+plt.legend(loc='upper left')
+plt.show()
diff --git a/models/failure_prediction/python/cnn.py b/models/failure_prediction/python/cnn.py
new file mode 100644
index 0000000..3a16562
--- /dev/null
+++ b/models/failure_prediction/python/cnn.py
@@ -0,0 +1,376 @@
+# pylint: disable=C0103, C0116, W0621, E0401, W0104, W0105, R0913, E1136, W0612, E0102, C0301, W0611, C0411, W0311, C0326, C0330
+# -*- coding: utf-8 -*-
+"""CNN.ipynb
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+ https://colab.research.google.com/drive/1W8WsMl3qckYG9Xa2CUiA-RU3322whQUf
+
+Contributors: **Rohit Singh Rathaur, Girish L.**
+
+Copyright [2021](2021) [*Rohit Singh Rathaur, BIT Mesra and Girish L., CIT GUBBI, Karnataka*]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from keras import backend as K
+from keras.layers import Dense
+from keras.layers.convolutional import MaxPooling1D
+from keras.layers.convolutional import Conv1D
+from keras.layers import Flatten
+from keras.utils.vis_utils import plot_model
+import seaborn as sns
+import os
+import numpy as np
+import pandas as pd
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+import tensorflow as tf
+from google.colab import drive
+drive.mount('/content/drive')
+
+# Importing libraries
+
+df_Ellis = pd.read_csv(
+ "/content/drive/MyDrive/Failure/lstm/Ellis_FinalTwoConditionwithOR.csv")
+df_Ellis
+
+df_Ellis.plot()
+
+# we show here the hist
+df_Ellis.hist(bins=100, figsize=(20, 15))
+# save_fig("attribute_histogram_plots")
+plt.show()
+
+cpu_system_perc = df_Ellis[['ellis-cpu.system_perc']]
+cpu_system_perc.rolling(12).mean().plot(
+ figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Timestamp', fontsize=30)
+
+load_avg_1_min = df_Ellis[['ellis-load.avg_1_min']]
+load_avg_1_min.rolling(12).mean().plot(
+ figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Timestamp', fontsize=30)
+
+cpu_wait_perc = df_Ellis[['ellis-cpu.wait_perc']]
+cpu_wait_perc.rolling(12).mean().plot(
+ figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Year', fontsize=30)
+
+df_dg = pd.concat([cpu_system_perc.rolling(12).mean(), load_avg_1_min.rolling(
+ 12).mean(), cpu_wait_perc.rolling(12).mean()], axis=1)
+df_dg.plot(figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Year', fontsize=20)
+
+
+# we establish the corrmartrice
+color = sns.color_palette()
+sns.set_style('darkgrid')
+
+correaltionMatrice = df_Ellis.corr()
+f, ax = plt.subplots(figsize=(20, 10))
+sns.heatmap(
+ correaltionMatrice,
+ cbar=True,
+ vmin=0,
+ vmax=1,
+ square=True,
+ annot=True)
+plt.show()
+
+df_Ellis.corrwith(df_Ellis['ellis-load.avg_1_min'])
+
+# using multivariate feature
+
+features_3 = [
+ 'ellis-cpu.wait_perc',
+ 'ellis-load.avg_1_min',
+ 'ellis-net.in_bytes_sec',
+ 'Label']
+
+features = df_Ellis[features_3]
+features.index = df_Ellis['Timestamp']
+features.head()
+
+features.plot(subplots=True)
+
+features = features.values
+
+# standardize data
+train_split = 141600
+tf.random.set_seed(13)
+
+# standardize data
+features_mean = features[:train_split].mean()
+features_std = features[:train_split].std()
+features = (features - features_mean) / features_std
+
+print(type(features))
+print(features.shape)
+
+# create mutlivariate data
+
+
+def mutlivariate_data(features, target, start_idx, end_idx, history_size, target_size,
+ step, single_step=False):
+ data = []
+ labels = []
+ start_idx = start_idx + history_size
+ if end_idx is None:
+ end_idx = len(features) - target_size
+ for i in range(start_idx, end_idx):
+ idxs = range(i - history_size, i, step) # using step
+ data.append(features[idxs])
+ if single_step:
+ labels.append(target[i + target_size])
+ else:
+ labels.append(target[i:i + target_size])
+
+ return np.array(data), np.array(labels)
+
+# generate multivariate data
+
+
+history = 720
+future_target = 72
+STEP = 6
+
+x_train_ss, y_train_ss = mutlivariate_data(features, features[:, 1], 0, train_split, history,
+ future_target, STEP, single_step=True)
+
+x_val_ss, y_val_ss = mutlivariate_data(features, features[:, 1], train_split, None, history,
+ future_target, STEP, single_step=True)
+
+print(x_train_ss.shape, y_train_ss.shape)
+print(x_val_ss.shape, y_val_ss.shape)
+
+# tensorflow dataset
+batch_size = 256
+buffer_size = 10000
+
+train_ss = tf.data.Dataset.from_tensor_slices((x_train_ss, y_train_ss))
+train_ss = train_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()
+
+val_ss = tf.data.Dataset.from_tensor_slices((x_val_ss, y_val_ss))
+val_ss = val_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()
+
+print(train_ss)
+print(val_ss)
+
+
+def root_mean_squared_error(y_true, y_pred):
+ return K.sqrt(K.mean(K.square(y_pred - y_true)))
+
+
+# Modelling using LSTM
+steps = 50
+
+EPOCHS = 20
+
+single_step_model = tf.keras.models.Sequential()
+
+single_step_model.add(Conv1D(filters=64, kernel_size=2, activation='relu', input_shape=(1, 48)))
+single_step_model.add(MaxPooling1D(pool_size=2))
+single_step_model.add(Flatten())
+single_step_model.add(Dense(50, activation='relu'))
+single_step_model.add(Dense(1))
+single_step_model.compile(
+ optimizer='adam', loss='mae', metrics=[
+ tf.keras.metrics.RootMeanSquaredError(
+ name='rmse')])
+
+
+
+# single_step_model.add(tf.keras.layers.LSTM(32, return_sequences=False, input_shape = x_train_ss.shape[-2:]))
+# single_step_model.add(tf.keras.layers.Dropout(0.3))
+# single_step_model.add(tf.keras.layers.Dense(1))
+# single_step_model.compile(optimizer = tf.keras.optimizers.Adam(), loss = 'mae',metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])
+# single_step_model.compile(loss='mse', optimizer='rmsprop')
+single_step_model_history=single_step_model.fit(train_ss, epochs=EPOCHS,
+ steps_per_epoch=steps, validation_data=val_ss,
+ validation_steps=50)
+single_step_model.summary()
+plot_model(
+ single_step_model,
+ to_file='/content/drive/MyDrive/Failure/lstm/CNN-LSTM.png',
+ show_shapes=True,
+ show_layer_names=True)
+
+# plot train test loss
+
+def plot_loss(history, title):
+ loss=history.history['loss']
+ val_loss=history.history['val_loss']
+
+ epochs=range(len(loss))
+ plt.figure()
+ plt.plot(epochs, loss, 'b', label='Train Loss')
+ plt.plot(epochs, val_loss, 'r', label='Validation Loss')
+ plt.title(title)
+ plt.legend()
+ plt.grid()
+ plt.show()
+
+plot_loss(single_step_model_history,
+ 'Single Step Training and validation loss')
+
+# plot train test loss
+
+def plot_loss(history, title):
+ loss=history.history['rmse']
+ val_loss=history.history['val_rmse']
+
+ epochs=range(len(loss))
+ plt.figure()
+ plt.plot(epochs, loss, 'b', label='Train RMSE')
+ plt.plot(epochs, val_loss, 'r', label='Validation RMSE')
+ plt.title(title)
+ plt.legend()
+ plt.grid()
+ plt.show()
+
+plot_loss(single_step_model_history,
+ 'Single Step Training and validation loss')
+
+# fucntion to create time steps
+def create_time_steps(length):
+ return list(range(-length, 0))
+
+# function to plot time series data
+
+def plot_time_series(plot_data, delta, title):
+ labels=["History", 'True Future', 'Model Predcited']
+ marker=['.-', 'rx', 'go']
+ time_steps=create_time_steps(plot_data[0].shape[0])
+
+ if delta:
+ future=delta
+ else:
+ future=0
+ plt.title(title)
+ for i, x in enumerate(plot_data):
+ if i:
+ plt.plot(future, plot_data[i], marker[i], markersize=10, label=labels[i])
+ else:
+ plt.plot(time_steps, plot_data[i].flatten(), marker[i], label=labels[i])
+ plt.legend()
+ plt.xlim([time_steps[0], (future + 5) * 2])
+
+ plt.xlabel('Time_Step')
+ return plt
+
+# Moving window average
+
+def MWA(history):
+ return np.mean(history)
+
+# plot time series and predicted values
+
+for x, y in val_ss.take(5):
+ plot=plot_time_series([x[0][:, 1].numpy(), y[0].numpy(),
+ single_step_model.predict(x)[0]], 12,
+ 'Single Step Prediction')
+ plot.show()
+
+"""# **MultiStep Forcasting**"""
+
+future_target=72 # 72 future values
+x_train_multi, y_train_multi=mutlivariate_data(features, features[:, 1], 0,
+ train_split, history,
+ future_target, STEP)
+x_val_multi, y_val_multi=mutlivariate_data(features, features[:, 1],
+ train_split, None, history,
+ future_target, STEP)
+
+print(x_train_multi.shape)
+print(y_train_multi.shape)
+
+# TF DATASET
+
+train_data_multi=tf.data.Dataset.from_tensor_slices(
+ (x_train_multi, y_train_multi))
+train_data_multi=train_data_multi.cache().shuffle(
+ buffer_size).batch(batch_size).repeat()
+
+val_data_multi=tf.data.Dataset.from_tensor_slices((x_val_multi, y_val_multi))
+val_data_multi=val_data_multi.batch(batch_size).repeat()
+
+print(train_data_multi)
+print(val_data_multi)
+
+# plotting function
+def multi_step_plot(history, true_future, prediction):
+ plt.figure(figsize=(12, 6))
+ num_in=create_time_steps(len(history))
+ num_out=len(true_future)
+ plt.grid()
+ plt.plot(num_in, np.array(history[:, 1]), label='History')
+ plt.plot(np.arange(num_out) / STEP, np.array(true_future), 'bo',
+ label='True Future')
+ if prediction.any():
+ plt.plot(np.arange(num_out) / STEP, np.array(prediction), 'ro',
+ label='Predicted Future')
+ plt.legend(loc='upper left')
+ plt.show()
+
+
+
+for x, y in train_data_multi.take(1):
+ multi_step_plot(x[0], y[0], np.array([0]))
+
+multi_step_model=tf.keras.models.Sequential()
+
+
+multi_step_model.add(Conv1D(filters=64, kernel_size=2,
+ activation='relu', input_shape=x_train_ss.shape[-2:]))
+multi_step_model.add(MaxPooling1D(pool_size=2))
+multi_step_model.add(Flatten())
+multi_step_model.add(Dense(50, activation='relu'))
+multi_step_model.add(Dense(1))
+multi_step_model.compile(
+ optimizer='adam', loss='mae', metrics=[
+ tf.keras.metrics.RootMeanSquaredError(
+ name='rmse')])
+
+
+# multi_step_model.add(tf.keras.layers.LSTM(32,
+ # return_sequences=True,
+ # input_shape=x_train_multi.shape[-2:]))
+# multi_step_model.add(tf.keras.layers.LSTM(16, activation='relu'))
+# aDD dropout layer (0.3)
+# multi_step_model.add(tf.keras.layers.Dense(72)) # for 72 outputs
+
+# multi_step_model.compile(optimizer=tf.keras.optimizers.RMSprop(clipvalue=1.0),
+# loss='mae',metrics=[tf.keras.metrics.RootMeanSquaredError(name='rmse')])
+
+multi_step_history=multi_step_model.fit(train_data_multi, epochs=EPOCHS,
+ steps_per_epoch=steps,
+ validation_data=val_data_multi,
+ validation_steps=50)
+
+plot_loss(multi_step_history, 'Multi-Step Training and validation loss')
+
+for x, y in val_data_multi.take(5):
+ multi_step_plot(x[0], y[0], multi_step_model.predict(x)[0])
+
+scores=multi_step_model.evaluate(
+ x_train_multi,
+ y_train_multi,
+ verbose=1,
+ batch_size=200)
+print('MAE: {}'.format(scores[1]))
+
+scores_test=multi_step_model.evaluate(
+ x_val_multi, y_val_multi, verbose=1, batch_size=200)
+print('MAE: {}'.format(scores[1]))
diff --git a/models/failure_prediction/python/decision_tree.py b/models/failure_prediction/python/decision_tree.py
new file mode 100644
index 0000000..a88c19c
--- /dev/null
+++ b/models/failure_prediction/python/decision_tree.py
@@ -0,0 +1,199 @@
+# pylint: disable=C0103, C0116, W0621, E0401, W0104, W0105, R0913, E1136, W0612, E0102, C0301, W0611, C0411, W0311, C0326, C0330, W0106, C0412
+# -*- coding: utf-8 -*-
+"""Decision_Tree.ipynb
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+ https://colab.research.google.com/drive/1TdQCHMWu8lPA53-jFhxXDUPQdjqufrL1
+
+Contributors: **Rohit Singh Rathaur, Girish L.**
+
+Copyright [2021](2021) [*Rohit Singh Rathaur, BIT Mesra and Girish L., CIT GUBBI, Karnataka*]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+We mounted the drive to access the data
+"""
+
+import sklearn.metrics as metrics
+from sklearn.metrics import classification_report
+import seaborn as sns
+from sklearn import tree
+from sklearn.linear_model import LogisticRegression
+from sklearn.model_selection import train_test_split
+import os
+import numpy as np
+import pandas as pd
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+import tensorflow as tf
+from google.colab import drive
+drive.mount('/content/drive')
+
+"""We are importing libraries to read the CSV and to train the models"""
+
+# Importing libraries
+
+"""We are reading CSV file using `read_csv` function and dropping the `Timestamp` column and storing it in a DataFrame called `df_Ellis`."""
+
+df_Ellis = pd.read_csv(
+ "/content/drive/MyDrive/Failure/lstm/Ellis_FinalTwoConditionwithOR.csv")
+df_Ellis = df_Ellis.drop(columns='Timestamp')
+df_Ellis
+
+"""First we stored the `feature_cols` and defined the `X` matrix and `y` vector where `X` is a matrix and containing all the feature matrix and `y` is a vector which is having target value."""
+
+# define X and y
+feature_cols = [
+ 'ellis-cpu.wait_perc',
+ 'ellis-load.avg_1_min',
+ 'ellis-net.in_bytes_sec',
+ 'ellis-cpu.system_perc',
+ 'ellis-mem.free_mb']
+
+# X is a matrix, hence we use [] to access the features we want in feature_cols
+X = df_Ellis[feature_cols]
+
+# y is a vector, hence we use dot to access 'label'
+y = df_Ellis.Label
+
+"""We splitted `X` and `y` into `X_train`, `X_test`, `y_train`, and `y_test` using `train_test_split` function."""
+
+# split X and y into training and testing sets
+X_train, X_test, y_train, y_test = train_test_split(
+ X, y, test_size=0.30, random_state=5)
+
+"""We are training the model with Decision Tree."""
+
+# train a logistic regression model on the training set
+
+# instantiate model
+logreg = tree.DecisionTreeClassifier()
+
+# fit model
+logreg.fit(X_train, y_train)
+
+"""We are making predictions for test set"""
+
+# make class predictions for the testing set
+y_pred_class = logreg.predict(X_test)
+
+"""Here, we are calculating the accuracy using `sklearn` library"""
+
+# calculate accuracy
+print(metrics.accuracy_score(y_test, y_pred_class))
+
+"""We are examining the class distribution of the testing set using a `pandas` series method"""
+
+# examine the class distribution of the testing set (using a Pandas Series
+# method)
+y_test.value_counts()
+
+"""We counted the value for each lables"""
+
+y_train.value_counts()
+
+"""We are calculating the percentage of ones because `y_test` only contains ones and zeroes, we can simply calculate the mean = percentage of ones"""
+
+# calculate the percentage of ones
+# because y_test only contains ones and zeros, we can simply calculate the
+# mean = percentage of ones
+y_test.mean()
+
+"""We are calculating the percentage of zeros"""
+
+# calculate the percentage of zeros
+1 - y_test.mean()
+
+# calculate null accuracy in a single line of code
+# only for binary classification problems coded as 0/1
+max(y_test.mean(), 1 - y_test.mean())
+
+# calculate null accuracy (for multi-class classification problems)
+y_test.value_counts().head(1) / len(y_test)
+
+# print the first 25 true and predicted responses
+print('True:', y_test.values[0:50])
+print('False:', y_pred_class[0:50])
+
+# IMPORTANT: first argument is true values, second argument is predicted values
+# this produces a 2x2 numpy array (matrix)
+print(metrics.confusion_matrix(y_test, y_pred_class))
+
+# save confusion matrix and slice into four pieces
+confusion = metrics.confusion_matrix(y_test, y_pred_class)
+print(confusion)
+#[row, column]
+TP = confusion[1, 1]
+TN = confusion[0, 0]
+FP = confusion[0, 1]
+FN = confusion[1, 0]
+
+# use float to perform true division, not integer division
+print((TP + TN) / float(TP + TN + FP + FN))
+print(metrics.accuracy_score(y_test, y_pred_class))
+
+"""We are defining a function `print_results` to print the result of `y_test` and `y_pred`."""
+
+
+def print_results(y_test, y_pred):
+
+ # f1-score
+ f1 = metrics.f1_score(y_test, y_pred)
+ print("F1 Score: ", f1)
+ print(classification_report(y_test, y_pred))
+
+ conf_matrix = metrics.confusion_matrix(y_test, y_pred)
+ plt.figure(figsize=(12, 12))
+ plt.subplot(221)
+ sns.heatmap(conf_matrix, fmt="d", annot=True, cmap='Blues')
+ b, t = plt.ylim()
+ plt.ylim(b + 0.5, t - 0.5)
+ plt.title('Confuion Matrix')
+ plt.ylabel('True Values')
+ plt.xlabel('Predicted Values')
+
+ # roc_auc_score
+ model_roc_auc = metrics.roc_auc_score(y_test, y_pred)
+ print("Area under curve : ", model_roc_auc, "\n")
+ fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred)
+ gmeans = np.sqrt(tpr * (1 - fpr))
+ ix = np.argmax(gmeans)
+ threshold = np.round(thresholds[ix], 3)
+
+ plt.subplot(222)
+ plt.plot(
+ fpr,
+ tpr,
+ color='darkorange',
+ lw=1,
+ label="Auc : %.3f" %
+ model_roc_auc)
+ plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
+ plt.scatter(
+ fpr[ix],
+ tpr[ix],
+ marker='o',
+ color='black',
+ label='Best Threshold:' +
+ str(threshold))
+ plt.xlim([0.0, 1.0])
+ plt.ylim([0.0, 1.05])
+ plt.xlabel('False Positive Rate')
+ plt.ylabel('True Positive Rate')
+ plt.title('Receiver operating characteristic')
+ plt.legend(loc="lower right")
+
+
+print_results(y_test, y_pred_class)
diff --git a/models/failure_prediction/python/featurecreation.py b/models/failure_prediction/python/featurecreation.py
new file mode 100644
index 0000000..7ed5cf3
--- /dev/null
+++ b/models/failure_prediction/python/featurecreation.py
@@ -0,0 +1,114 @@
+# pylint: disable=C0103, C0116, W0621, E0401, W0104, W0105, R0913, E1136, W0612, E0102, C0301, W0611, C0411, W0311, C0326, C0330, W0106, C0412
+# -*- coding: utf-8 -*-
+"""FeatureCreation.ipynb
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+ https://colab.research.google.com/drive/1UQzgn71tYU7WHgr-CL1CRNM9q9Ajr2Kx
+
+Contributors: **Rohit Singh Rathaur, Girish L.**
+
+Copyright [2021](2021) [*Rohit Singh Rathaur, BIT Mesra and Girish L., CIT GUBBI, Karnataka*]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Commented out IPython magic to ensure Python compatibility.
+# Import libraries use for visualization and analysis
+import pandas as pd
+import numpy as np
+
+# %matplotlib inline
+import matplotlib
+import matplotlib.pyplot as plt
+
+from pandas import Series, DataFrame
+import seaborn as sns
+from sklearn.preprocessing import scale
+from sklearn.decomposition import PCA
+from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
+from scipy import stats
+from IPython.display import display, HTML
+
+from google.colab import drive
+drive.mount('/gdrive')
+
+"""# **Loading the Data**"""
+
+df_Ellis = pd.read_csv(
+ "/gdrive/MyDrive/LFN Anuket/Analysis/data/Final/Final.csv")
+#df_Bono = pd.read_csv("/gdrive/MyDrive/LFN Anuket/Analysis/data/matrices/df_Bono.csv", error_bad_lines=False)
+#df_Sprout = pd.read_csv("/gdrive/MyDrive/LFN Anuket/Analysis/data/matrices/df_Sprout.csv", error_bad_lines=False)
+#df_Homer = pd.read_csv("/gdrive/MyDrive/LFN Anuket/Analysis/data/matrices/df_Homer.csv", error_bad_lines=False)
+#df_Homestead = pd.read_csv("/gdrive/MyDrive/LFN Anuket/Analysis/data/matrices/df_Homestead.csv", error_bad_lines=False)
+#df_Ralf = pd.read_csv("/gdrive/MyDrive/LFN Anuket/Analysis/data/matrices/df_Ralf.csv", error_bad_lines=False)
+
+df_Ellis.head()
+
+df_Ellis.describe()
+
+#df_Ellis['SLO1'] = 0
+#print('Column names are: ',list(df_Ellis.columns))
+
+df4 = df_Ellis["ellis-load.avg_1_min"] > 2.45
+df4
+df4.to_csv(
+ '/gdrive/MyDrive/LFN Anuket/Analysis/data/Final/EllisLoadAvgLabel_lessthan0198.csv')
+df4.head(50)
+
+df3 = df_Ellis["ellis-cpu.wait_perc"] > 5
+df3
+df3.to_csv('/gdrive/MyDrive/LFN Anuket/Analysis/data/Final/ellis-cpu>5.csv')
+df3.head(50)
+
+df5 = df_Ellis["ellis-net.out_packets_sec"] > 1000
+df5
+df5.to_csv(
+ '/gdrive/MyDrive/LFN Anuket/Analysis/data/Final/ellis-net.in_bytes_sec21139.csv')
+df5.head(50)
+
+# We are applying Logical OR Operator between df4 and df3
+df6 = (df4[0:176999]) | (df3[0:176999])
+df6.head(50)
+
+df6.to_csv('/gdrive/MyDrive/LFN Anuket/Analysis/data/Final/OR_TwoCondition(2).csv')
+df6.head(50)
+
+df7 = (df6[0:176999]) | (df5[0:176999])
+df7.head(50)
+
+df7.to_csv('/gdrive/MyDrive/LFN Anuket/Analysis/data/Final/FinalORLabel8.5.csv')
+df7.head(50)
+
+df_Ellis.insert(7, "Label", df7)
+
+#df_Ellis.insert (8, "Label", df7)
+
+# We applied Logical OR operator in two features only known as and df3
+# and df4 and stored result in df6 which is known as Final Label after
+# applying OR condition
+df_Ellis
+df_Ellis.to_csv(
+ '/gdrive/MyDrive/LFN Anuket/Analysis/data/Final/Ellis_FinalTwoConditionwithOR.csv')
+
+df_Ellis.head(100)
+
+# pandas count distinct values in column
+df_Ellis['Label'].value_counts()
+
+#final.to_csv('/gdrive/MyDrive/LFN Anuket/Analysis/data/New/FinalLabel.csv')
+
+#df_Ellis.loc[(df_Ellis["ellis-cpu.wait_perc"] > 5) & (df_Ellis["ellis-load.avg_1_min"] > 2)]
+
+"""# **Creating New Features**"""
diff --git a/models/failure_prediction/python/lstm.py b/models/failure_prediction/python/lstm.py
new file mode 100644
index 0000000..30845f1
--- /dev/null
+++ b/models/failure_prediction/python/lstm.py
@@ -0,0 +1,391 @@
+# pylint: disable=C0103, C0116, W0621, E0401, W0104, W0105, R0913, E1136, W0612, E0102, C0301, W0611, C0411, W0311, W0404, E0602, C0326, C0330, W0106, C0412
+# -*- coding: utf-8 -*-
+"""LSTM.ipynb
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+ https://colab.research.google.com/drive/15natzoGkWnOqxZyzavAaRqBFrPNxzd35
+
+Contributors: Rohit Singh Rathaur, Girish L.
+
+Copyright 2021 [Rohit Singh Rathaur, BIT Mesra and Girish L., CIT GUBBI, Karnataka]
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
+
+We mounted the drive to access the data from google drive
+"""
+
+from keras.utils.vis_utils import plot_model
+from keras.layers import Activation, Dense, Dropout
+import os
+import numpy as np
+import pandas as pd
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+import tensorflow as tf
+from google.colab import drive
+drive.mount('/content/drive')
+
+"""We are importing the libraries:
+- TensorFlow: to process and train the model
+- Matplotlib: to plot the training anf loss curves
+- Pandas: used for data analysis and it allows us to import data from various formats
+- Numpy: For array computing
+"""
+
+# Importing libraries
+
+"""We are reading the CSV file using `read_csv` function and storing it in a DataFrame named `df_Ellis`"""
+
+df_Ellis = pd.read_csv(
+ "/content/drive/MyDrive/Failure/lstm/Ellis_FinalTwoConditionwithOR.csv")
+df_Ellis
+
+"""`plot()` function is used to draw points"""
+
+df_Ellis.plot()
+
+"""Using multivariate features:
+- Storing only the multivariate features in a dataframe named `features_3`
+- Extracting the Timestamp column from `df_Ellis` dataframe
+- and combining it with the dataframe `features`
+"""
+
+# using multivariate feature
+
+features_3 = [
+ 'ellis-cpu.system_perc',
+ 'ellis-cpu.wait_perc',
+ 'ellis-load.avg_1_min',
+ 'ellis-mem.free_mb',
+ 'ellis-net.in_bytes_sec',
+ 'ellis-net.out_packets_sec',
+ 'Label']
+
+features = df_Ellis[features_3]
+features.index = df_Ellis['Timestamp']
+features.head()
+
+"""Plotted features"""
+
+features.plot(subplots=True)
+
+features = features.values
+
+# standardize data
+train_split = 141600
+tf.random.set_seed(13)
+
+# standardize data
+features_mean = features[:train_split].mean()
+features_std = features[:train_split].std()
+features = (features - features_mean) / features_std
+
+print(type(features))
+print(features.shape)
+
+# create mutlivariate data
+
+
+def mutlivariate_data(
+ features,
+ target,
+ start_idx,
+ end_idx,
+ history_size,
+ target_size,
+ step,
+ single_step=False):
+ data = []
+ labels = []
+ start_idx = start_idx + history_size
+ if end_idx is None:
+ end_idx = len(features) - target_size
+ for i in range(start_idx, end_idx):
+ idxs = range(i - history_size, i, step) # using step
+ data.append(features[idxs])
+ if single_step:
+ labels.append(target[i + target_size])
+ else:
+ labels.append(target[i:i + target_size])
+
+ return np.array(data), np.array(labels)
+
+
+"""We spliited the multivariate data in tarining and validation and printed the shape of that data."""
+
+# generate multivariate data
+
+history = 720
+future_target = 72
+STEP = 6
+
+x_train_ss, y_train_ss = mutlivariate_data(
+ features, features[:, 1], 0, train_split, history, future_target, STEP, single_step=True)
+
+x_val_ss, y_val_ss = mutlivariate_data(features, features[:, 1], train_split, None, history,
+ future_target, STEP, single_step=True)
+
+print(x_train_ss.shape, y_train_ss.shape)
+print(x_val_ss.shape, y_val_ss.shape)
+
+"""The `tf.data.Dataset` API supports writing descriptive and efficient input pipelines. Dataset usage following a common pattern:
+- Creating a source dataset from our input data.
+- Applied dataset transformations to preprocess the data.
+- Iterate over the dataset and process the elements.
+Note: Iteration happens in a streaming fashion, so the full dataset does not need to fit into memory.
+Once we have a dataset, we can apply transformations to prepare the data for our model:
+"""
+
+# tensorflow dataset
+batch_size = 256
+buffer_size = 10000
+
+train_ss = tf.data.Dataset.from_tensor_slices((x_train_ss, y_train_ss))
+train_ss = train_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()
+
+val_ss = tf.data.Dataset.from_tensor_slices((x_val_ss, y_val_ss))
+val_ss = val_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()
+
+print(train_ss)
+print(val_ss)
+
+"""We used a custom loss function to evaluate the model:"""
+
+
+def root_mean_squared_error(y_true, y_pred):
+ return K.sqrt(K.mean(K.square(y_pred - y_true)))
+
+
+"""We are building a single step LSTM model for training data with dropout 0.3 and we used ADAM optimizers."""
+
+# Modelling using LSTM
+steps = 50
+
+EPOCHS = 20
+
+single_step_model = tf.keras.models.Sequential()
+
+single_step_model.add(tf.keras.layers.LSTM(
+ 32, return_sequences=False, input_shape=x_train_ss.shape[-2:]))
+single_step_model.add(tf.keras.layers.Dropout(0.3))
+single_step_model.add(tf.keras.layers.Dense(1))
+single_step_model.compile(
+ optimizer=tf.keras.optimizers.Adam(),
+ loss='mae',
+ metrics=[
+ tf.keras.metrics.RootMeanSquaredError(
+ name='rmse')])
+#single_step_model.compile(loss='mse', optimizer='rmsprop')
+single_step_model_history = single_step_model.fit(
+ train_ss,
+ epochs=EPOCHS,
+ steps_per_epoch=steps,
+ validation_data=val_ss,
+ validation_steps=50)
+single_step_model.summary()
+plot_model(
+ single_step_model,
+ to_file='/content/drive/MyDrive/Failure/lstm/LSTM.png',
+ show_shapes=True,
+ show_layer_names=True)
+
+"""We defined the `plot_loss` function to plot the train and test loss"""
+
+# plot train test loss
+
+
+def plot_loss(history, title):
+ loss = history.history['loss']
+ val_loss = history.history['val_loss']
+
+ epochs = range(len(loss))
+ plt.figure()
+ plt.plot(epochs, loss, 'b', label='Train Loss')
+ plt.plot(epochs, val_loss, 'r', label='Validation Loss')
+ plt.title(title)
+ plt.legend()
+ plt.grid()
+ plt.show()
+
+
+plot_loss(single_step_model_history,
+ 'Single Step Training and validation loss')
+
+# plot train test loss
+
+
+def plot_loss(history, title):
+ loss = history.history['rmse']
+ val_loss = history.history['val_rmse']
+
+ epochs = range(len(loss))
+ plt.figure()
+ plt.plot(epochs, loss, 'b', label='Train RMSE')
+ plt.plot(epochs, val_loss, 'r', label='Validation RMSE')
+ plt.title(title)
+ plt.legend()
+ plt.grid()
+ plt.show()
+
+
+plot_loss(single_step_model_history,
+ 'Single Step Training and validation loss')
+
+"""We defined a function `create_time_steps` to create time steps and function `plot_time_series` to plot the time series data"""
+
+# fucntion to create time steps
+
+
+def create_time_steps(length):
+ return list(range(-length, 0))
+
+# function to plot time series data
+
+
+def plot_time_series(plot_data, delta, title):
+ labels = ["History", 'True Future', 'Model Predcited']
+ marker = ['.-', 'rx', 'go']
+ time_steps = create_time_steps(plot_data[0].shape[0])
+
+ if delta:
+ future = delta
+ else:
+ future = 0
+ plt.title(title)
+ for i, x in enumerate(plot_data):
+ if i:
+ plt.plot(
+ future,
+ plot_data[i],
+ marker[i],
+ markersize=10,
+ label=labels[i])
+ else:
+ plt.plot(
+ time_steps,
+ plot_data[i].flatten(),
+ marker[i],
+ label=labels[i])
+ plt.legend()
+ plt.xlim([time_steps[0], (future + 5) * 2])
+
+ plt.xlabel('Time_Step')
+ return plt
+
+# Moving window average
+
+
+def MWA(history):
+ return np.mean(history)
+
+
+"""We plotted the time series and predicted values"""
+
+# plot time series and predicted values
+
+for x, y in val_ss.take(5):
+ plot = plot_time_series([x[0][:, 1].numpy(), y[0].numpy(),
+ single_step_model.predict(x)[0]], 12,
+ 'Single Step Prediction')
+ plot.show()
+
+"""# **MultiStep Forcasting**
+
+We splitted the data in the form of training and validation for multistep forcasting:
+"""
+
+future_target = 72 # 72 future values
+x_train_multi, y_train_multi = mutlivariate_data(features, features[:, 1], 0,
+ train_split, history,
+ future_target, STEP)
+x_val_multi, y_val_multi = mutlivariate_data(features, features[:, 1],
+ train_split, None, history,
+ future_target, STEP)
+
+print(x_train_multi.shape)
+print(y_train_multi.shape)
+
+"""The `tf.data.Dataset` API supports writing descriptive and efficient input pipelines. Dataset usage following a common pattern:
+- Creating a source dataset from our input data.
+- Applied dataset transformations to preprocess the data.
+- Iterate over the dataset and process the elements.
+Note: Iteration happens in a streaming fashion, so the full dataset does not need to fit into memory.
+Once we have a dataset, we can apply transformations to prepare the data for our model:
+"""
+
+# TF DATASET
+
+train_data_multi = tf.data.Dataset.from_tensor_slices(
+ (x_train_multi, y_train_multi))
+train_data_multi = train_data_multi.cache().shuffle(
+ buffer_size).batch(batch_size).repeat()
+
+val_data_multi = tf.data.Dataset.from_tensor_slices((x_val_multi, y_val_multi))
+val_data_multi = val_data_multi.batch(batch_size).repeat()
+
+print(train_data_multi)
+print(val_data_multi)
+
+"""We created a `multi_step_plot` function to plot between `history` and `true_future` data"""
+
+# plotting function
+
+
+def multi_step_plot(history, true_future, prediction):
+ plt.figure(figsize=(12, 6))
+ num_in = create_time_steps(len(history))
+ num_out = len(true_future)
+ plt.grid()
+ plt.plot(num_in, np.array(history[:, 1]), label='History')
+ plt.plot(np.arange(num_out) / STEP, np.array(true_future), 'bo',
+ label='True Future')
+ if prediction.any():
+ plt.plot(np.arange(num_out) / STEP, np.array(prediction), 'ro',
+ label='Predicted Future')
+ plt.legend(loc='upper left')
+ plt.show()
+
+
+for x, y in train_data_multi.take(1):
+ multi_step_plot(x[0], y[0], np.array([0]))
+
+"""We are building a single step LSTM model for training data with dropout 0.3 and we used ADAM optimizers."""
+
+multi_step_model = tf.keras.models.Sequential()
+multi_step_model.add(tf.keras.layers.LSTM(
+ 32, return_sequences=True, input_shape=x_train_multi.shape[-2:]))
+multi_step_model.add(tf.keras.layers.LSTM(16, activation='relu'))
+# aDD dropout layer (0.3)
+multi_step_model.add(tf.keras.layers.Dense(72)) # for 72 outputs
+
+multi_step_model.compile(
+ optimizer=tf.keras.optimizers.RMSprop(
+ clipvalue=1.0), loss='mae', metrics=[
+ tf.keras.metrics.RootMeanSquaredError(
+ name='rmse')])
+
+multi_step_history = multi_step_model.fit(train_data_multi, epochs=EPOCHS,
+ steps_per_epoch=steps,
+ validation_data=val_data_multi,
+ validation_steps=50)
+
+plot_loss(multi_step_history, 'Multi-Step Training and validation loss')
+
+for x, y in val_data_multi.take(5):
+ multi_step_plot(x[0], y[0], multi_step_model.predict(x)[0])
+
+scores = multi_step_model.evaluate(
+ x_train_multi,
+ y_train_multi,
+ verbose=1,
+ batch_size=200)
+print('MAE: {}'.format(scores[1]))
+
+scores_test = multi_step_model.evaluate(
+ x_val_multi, y_val_multi, verbose=1, batch_size=200)
+print('MAE: {}'.format(scores[1]))
diff --git a/models/failure_prediction/python/lstm_correlation.py b/models/failure_prediction/python/lstm_correlation.py
new file mode 100644
index 0000000..54d4040
--- /dev/null
+++ b/models/failure_prediction/python/lstm_correlation.py
@@ -0,0 +1,379 @@
+# pylint: disable=C0103, C0116, W0621, E0401, W0104, W0105, R0913, E1136, W0612, E0102, C0301, W0611, C0411, W0311, W0404, E0602, C0326, C0330, W0106, C0412
+# -*- coding: utf-8 -*-
+"""LSTM_correlation.ipynb
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+ https://colab.research.google.com/drive/1pDIYGV2-FR7QJEhCt9HxlJfeIeqw8xBj
+
+Contributors: Rohit Singh Rathaur, Girish L.
+
+Copyright 2021 [Rohit Singh Rathaur, BIT Mesra and Girish L., CIT GUBBI, Karnataka]
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
+"""
+
+import os
+from keras.layers import Activation, Dense, Dropout
+import seaborn as sns
+import numpy as np
+import pandas as pd
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+import tensorflow as tf
+from google.colab import drive
+drive.mount('/gdrive')
+
+"""We are importing the libraries:
+
+- TensorFlow: to process and train the model
+- Matplotlib: to plot the training anf loss curves
+- Pandas: used for data analysis and it allows us to import data from various formats
+- Numpy: For array computing
+"""
+
+# Importing libraries
+
+"""We are reading the CSV file using `read_csv` function and storing it in a DataFrame named `df_Ellis`"""
+
+df_Ellis = pd.read_csv(
+ "/gdrive/MyDrive/LFN Anuket/Analysis/data/Final/Ellis_FinalTwoConditionwithOR.csv")
+df_Ellis
+
+df_Ellis.plot()
+
+# we show here the hist
+df_Ellis.hist(bins=100, figsize=(20, 15))
+# save_fig("attribute_histogram_plots")
+plt.show()
+
+cpu_system_perc = df_Ellis[['ellis-cpu.system_perc']]
+cpu_system_perc.rolling(12).mean().plot(
+ figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Timestamp', fontsize=30)
+
+load_avg_1_min = df_Ellis[['ellis-load.avg_1_min']]
+load_avg_1_min.rolling(12).mean().plot(
+ figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Timestamp', fontsize=30)
+
+cpu_wait_perc = df_Ellis[['ellis-cpu.wait_perc']]
+cpu_wait_perc.rolling(12).mean().plot(
+ figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Year', fontsize=30)
+
+df_dg = pd.concat([cpu_system_perc.rolling(12).mean(), load_avg_1_min.rolling(
+ 12).mean(), cpu_wait_perc.rolling(12).mean()], axis=1)
+df_dg.plot(figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Year', fontsize=20)
+
+# we establish the corrmartrice
+color = sns.color_palette()
+sns.set_style('darkgrid')
+
+correaltionMatrice = df_Ellis.corr()
+f, ax = plt.subplots(figsize=(20, 10))
+sns.heatmap(
+ correaltionMatrice,
+ cbar=True,
+ vmin=0,
+ vmax=1,
+ square=True,
+ annot=True)
+plt.show()
+
+df_Ellis.corrwith(df_Ellis['ellis-load.avg_1_min'])
+
+# using multivariate feature
+
+features_3 = [
+ 'ellis-cpu.wait_perc',
+ 'ellis-load.avg_1_min',
+ 'ellis-net.in_bytes_sec',
+ 'Label']
+
+features = df_Ellis[features_3]
+features.index = df_Ellis['Timestamp']
+features.head()
+
+features.plot(subplots=True)
+
+features = features.values
+
+# standardize data
+train_split = 141600
+tf.random.set_seed(13)
+
+# standardize data
+features_mean = features[:train_split].mean()
+features_std = features[:train_split].std()
+features = (features - features_mean) / features_std
+
+print(type(features))
+print(features.shape)
+
+# create mutlivariate data
+
+
+def mutlivariate_data(
+ features,
+ target,
+ start_idx,
+ end_idx,
+ history_size,
+ target_size,
+ step,
+ single_step=False):
+ data = []
+ labels = []
+ start_idx = start_idx + history_size
+ if end_idx is None:
+ end_idx = len(features) - target_size
+ for i in range(start_idx, end_idx):
+ idxs = range(i - history_size, i, step) # using step
+ data.append(features[idxs])
+ if single_step:
+ labels.append(target[i + target_size])
+ else:
+ labels.append(target[i:i + target_size])
+
+ return np.array(data), np.array(labels)
+
+# generate multivariate data
+
+
+history = 720
+future_target = 72
+STEP = 6
+
+x_train_ss, y_train_ss = mutlivariate_data(
+ features, features[:, 1], 0, train_split, history, future_target, STEP, single_step=True)
+
+x_val_ss, y_val_ss = mutlivariate_data(features, features[:, 1], train_split, None, history,
+ future_target, STEP, single_step=True)
+
+print(x_train_ss.shape, y_train_ss.shape)
+print(x_val_ss.shape, y_val_ss.shape)
+
+# tensorflow dataset
+batch_size = 256
+buffer_size = 10000
+
+train_ss = tf.data.Dataset.from_tensor_slices((x_train_ss, y_train_ss))
+train_ss = train_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()
+
+val_ss = tf.data.Dataset.from_tensor_slices((x_val_ss, y_val_ss))
+val_ss = val_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()
+
+print(train_ss)
+print(val_ss)
+
+
+def root_mean_squared_error(y_true, y_pred):
+ return K.sqrt(K.mean(K.square(y_pred - y_true)))
+
+
+# Modelling using LSTM
+steps = 50
+
+EPOCHS = 20
+
+single_step_model = tf.keras.models.Sequential()
+
+single_step_model.add(tf.keras.layers.LSTM(
+ 32, return_sequences=False, input_shape=x_train_ss.shape[-2:]))
+single_step_model.add(tf.keras.layers.Dropout(0.3))
+single_step_model.add(tf.keras.layers.Dense(1))
+single_step_model.compile(
+ optimizer=tf.keras.optimizers.Adam(),
+ loss='mae',
+ metrics=[
+ tf.keras.metrics.RootMeanSquaredError(
+ name='rmse')])
+#single_step_model.compile(loss='mse', optimizer='rmsprop')
+single_step_model_history = single_step_model.fit(
+ train_ss,
+ epochs=EPOCHS,
+ steps_per_epoch=steps,
+ validation_data=val_ss,
+ validation_steps=50)
+single_step_model.summary()
+
+# plot train test loss
+
+
+def plot_loss(history, title):
+ loss = history.history['loss']
+ val_loss = history.history['val_loss']
+
+ epochs = range(len(loss))
+ plt.figure()
+ plt.plot(epochs, loss, 'b', label='Train Loss')
+ plt.plot(epochs, val_loss, 'r', label='Validation Loss')
+ plt.title(title)
+ plt.legend()
+ plt.grid()
+ plt.show()
+
+
+plot_loss(single_step_model_history,
+ 'Single Step Training and validation loss')
+
+# plot train test loss
+
+
+def plot_loss(history, title):
+ loss = history.history['rmse']
+ val_loss = history.history['val_rmse']
+
+ epochs = range(len(loss))
+ plt.figure()
+ plt.plot(epochs, loss, 'b', label='Train RMSE')
+ plt.plot(epochs, val_loss, 'r', label='Validation RMSE')
+ plt.title(title)
+ plt.legend()
+ plt.grid()
+ plt.show()
+
+
+plot_loss(single_step_model_history,
+ 'Single Step Training and validation loss')
+
+# fucntion to create time steps
+
+
+def create_time_steps(length):
+ return list(range(-length, 0))
+
+# function to plot time series data
+
+
+def plot_time_series(plot_data, delta, title):
+ labels = ["History", 'True Future', 'Model Predcited']
+ marker = ['.-', 'rx', 'go']
+ time_steps = create_time_steps(plot_data[0].shape[0])
+
+ if delta:
+ future = delta
+ else:
+ future = 0
+ plt.title(title)
+ for i, x in enumerate(plot_data):
+ if i:
+ plt.plot(
+ future,
+ plot_data[i],
+ marker[i],
+ markersize=10,
+ label=labels[i])
+ else:
+ plt.plot(
+ time_steps,
+ plot_data[i].flatten(),
+ marker[i],
+ label=labels[i])
+ plt.legend()
+ plt.xlim([time_steps[0], (future + 5) * 2])
+
+ plt.xlabel('Time_Step')
+ return plt
+
+# Moving window average
+
+
+def MWA(history):
+ return np.mean(history)
+
+# plot time series and predicted values
+
+
+for x, y in val_ss.take(5):
+ plot = plot_time_series([x[0][:, 1].numpy(), y[0].numpy(),
+ single_step_model.predict(x)[0]], 12,
+ 'Single Step Prediction')
+ plot.show()
+
+"""# **MultiStep Forcasting**"""
+
+future_target = 72 # 72 future values
+x_train_multi, y_train_multi = mutlivariate_data(features, features[:, 1], 0,
+ train_split, history,
+ future_target, STEP)
+x_val_multi, y_val_multi = mutlivariate_data(features, features[:, 1],
+ train_split, None, history,
+ future_target, STEP)
+
+print(x_train_multi.shape)
+print(y_train_multi.shape)
+
+# TF DATASET
+
+train_data_multi = tf.data.Dataset.from_tensor_slices(
+ (x_train_multi, y_train_multi))
+train_data_multi = train_data_multi.cache().shuffle(
+ buffer_size).batch(batch_size).repeat()
+
+val_data_multi = tf.data.Dataset.from_tensor_slices((x_val_multi, y_val_multi))
+val_data_multi = val_data_multi.batch(batch_size).repeat()
+
+print(train_data_multi)
+print(val_data_multi)
+
+# plotting function
+
+
+def multi_step_plot(history, true_future, prediction):
+ plt.figure(figsize=(12, 6))
+ num_in = create_time_steps(len(history))
+ num_out = len(true_future)
+ plt.grid()
+ plt.plot(num_in, np.array(history[:, 1]), label='History')
+ plt.plot(np.arange(num_out) / STEP, np.array(true_future), 'bo',
+ label='True Future')
+ if prediction.any():
+ plt.plot(np.arange(num_out) / STEP, np.array(prediction), 'ro',
+ label='Predicted Future')
+ plt.legend(loc='upper left')
+ plt.show()
+
+
+for x, y in train_data_multi.take(1):
+ multi_step_plot(x[0], y[0], np.array([0]))
+
+multi_step_model = tf.keras.models.Sequential()
+multi_step_model.add(tf.keras.layers.LSTM(
+ 32, return_sequences=True, input_shape=x_train_multi.shape[-2:]))
+multi_step_model.add(tf.keras.layers.LSTM(16, activation='relu'))
+# aDD dropout layer (0.3)
+multi_step_model.add(tf.keras.layers.Dense(72)) # for 72 outputs
+
+multi_step_model.compile(
+ optimizer=tf.keras.optimizers.RMSprop(
+ clipvalue=1.0), loss='mae', metrics=[
+ tf.keras.metrics.RootMeanSquaredError(
+ name='rmse')])
+
+multi_step_history = multi_step_model.fit(train_data_multi, epochs=EPOCHS,
+ steps_per_epoch=steps,
+ validation_data=val_data_multi,
+ validation_steps=50)
+
+plot_loss(multi_step_history, 'Multi-Step Training and validation loss')
+
+for x, y in val_data_multi.take(5):
+ multi_step_plot(x[0], y[0], multi_step_model.predict(x)[0])
+
+scores = multi_step_model.evaluate(
+ x_train_multi,
+ y_train_multi,
+ verbose=1,
+ batch_size=200)
+print('MAE: {}'.format(scores[1]))
+
+scores_test = multi_step_model.evaluate(
+ x_val_multi, y_val_multi, verbose=1, batch_size=200)
+print('MAE: {}'.format(scores[1]))
diff --git a/models/failure_prediction/python/stacked_lstm_correlation.py b/models/failure_prediction/python/stacked_lstm_correlation.py
new file mode 100644
index 0000000..e5ffd78
--- /dev/null
+++ b/models/failure_prediction/python/stacked_lstm_correlation.py
@@ -0,0 +1,405 @@
+# pylint: disable=C0103, C0116, W0621, E0401, W0104, W0105, R0913, E1136, W0612, E0102, C0301, W0611, C0411, W0311, W0404, E0602, C0326, C0330, W0106, C0412
+# -*- coding: utf-8 -*-
+"""stacked_LSTM_Correlation.ipynb
+
+Automatically generated by Colaboratory.
+
+Original file is located at
+ https://colab.research.google.com/drive/1x8vGD105bcSgNTyC2sx0C3ixUsVPvDQ4
+
+Contributors: **Rohit Singh Rathaur, Girish L.**
+
+Copyright [2021](2021) [*Rohit Singh Rathaur, BIT Mesra and Girish L., CIT GUBBI, Karnataka*]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from keras.layers import Activation, Dense, Dropout
+import seaborn as sns
+import os
+import numpy as np
+import pandas as pd
+import matplotlib as mpl
+import matplotlib.pyplot as plt
+import tensorflow as tf
+from google.colab import drive
+drive.mount('/gdrive')
+
+# Importing libraries
+
+df_Ellis = pd.read_csv(
+ "/gdrive/MyDrive/LFN Anuket/Analysis/data/Final/Ellis_FinalTwoConditionwithOR.csv")
+df_Ellis
+
+df_Ellis.plot()
+
+# we show here the hist
+df_Ellis.hist(bins=100, figsize=(20, 15))
+# save_fig("attribute_histogram_plots")
+plt.show()
+
+cpu_system_perc = df_Ellis[['ellis-cpu.system_perc']]
+cpu_system_perc.rolling(12).mean().plot(
+ figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Timestamp', fontsize=30)
+
+load_avg_1_min = df_Ellis[['ellis-load.avg_1_min']]
+load_avg_1_min.rolling(12).mean().plot(
+ figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Timestamp', fontsize=30)
+
+cpu_wait_perc = df_Ellis[['ellis-cpu.wait_perc']]
+cpu_wait_perc.rolling(12).mean().plot(
+ figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Year', fontsize=30)
+
+df_dg = pd.concat([cpu_system_perc.rolling(12).mean(), load_avg_1_min.rolling(
+ 12).mean(), cpu_wait_perc.rolling(12).mean()], axis=1)
+df_dg.plot(figsize=(20, 10), linewidth=5, fontsize=20)
+plt.xlabel('Year', fontsize=20)
+
+
+# we establish the corrmartrice
+color = sns.color_palette()
+sns.set_style('darkgrid')
+
+correaltionMatrice = df_Ellis.corr()
+f, ax = plt.subplots(figsize=(20, 10))
+sns.heatmap(
+ correaltionMatrice,
+ cbar=True,
+ vmin=0,
+ vmax=1,
+ square=True,
+ annot=True)
+plt.show()
+
+df_Ellis.corrwith(df_Ellis['ellis-load.avg_1_min'])
+
+# using multivariate feature
+
+features_3 = [
+ 'ellis-cpu.wait_perc',
+ 'ellis-load.avg_1_min',
+ 'ellis-net.in_bytes_sec',
+ 'Label']
+
+features = df_Ellis[features_3]
+features.index = df_Ellis['Timestamp']
+features.head()
+
+features.plot(subplots=True)
+
+features = features.values
+
+# standardize data
+train_split = 141600
+tf.random.set_seed(13)
+
+# standardize data
+features_mean = features[:train_split].mean()
+features_std = features[:train_split].std()
+features = (features - features_mean) / features_std
+
+print(type(features))
+print(features.shape)
+
+# create mutlivariate data
+
+
+def mutlivariate_data(
+ features,
+ target,
+ start_idx,
+ end_idx,
+ history_size,
+ target_size,
+ step,
+ single_step=False):
+ data = []
+ labels = []
+ start_idx = start_idx + history_size
+ if end_idx is None:
+ end_idx = len(features) - target_size
+ for i in range(start_idx, end_idx):
+ idxs = range(i - history_size, i, step) # using step
+ data.append(features[idxs])
+ if single_step:
+ labels.append(target[i + target_size])
+ else:
+ labels.append(target[i:i + target_size])
+
+ return np.array(data), np.array(labels)
+
+# generate multivariate data
+
+
+history = 720
+future_target = 72
+STEP = 6
+
+x_train_ss, y_train_ss = mutlivariate_data(
+ features, features[:, 1], 0, train_split, history, future_target, STEP, single_step=True)
+
+x_val_ss, y_val_ss = mutlivariate_data(features, features[:, 1], train_split, None, history,
+ future_target, STEP, single_step=True)
+
+print(x_train_ss.shape, y_train_ss.shape)
+print(x_val_ss.shape, y_val_ss.shape)
+
+# tensorflow dataset
+batch_size = 256
+buffer_size = 10000
+
+train_ss = tf.data.Dataset.from_tensor_slices((x_train_ss, y_train_ss))
+train_ss = train_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()
+
+val_ss = tf.data.Dataset.from_tensor_slices((x_val_ss, y_val_ss))
+val_ss = val_ss.cache().shuffle(buffer_size).batch(batch_size).repeat()
+
+print(train_ss)
+print(val_ss)
+
+
+def root_mean_squared_error(y_true, y_pred):
+ return K.sqrt(K.mean(K.square(y_pred - y_true)))
+
+
+"""## Why Increase Depth?
+Stacking LSTM hidden layers makes the model deeper, more accurately earning the description as a deep learning technique. It is the depth of neural networks that is generally attributed to the success of the approach on a wide range of challenging prediction problems.
+
+As Stacked LSTMs are now a stable technique for challenging sequence prediction problems. A Stacked LSTM architecture is defined as an LSTM model comprised of multiple LSTM layers. An LSTM layer above provides a sequence output rather than a single value output to the LSTM layer below. Specifically, one output per input time step, rather than one output time step for all input time steps.
+
+We created Stacked LSTM model using Keras which is a Python deep learning library.
+"""
+
+# Modelling using LSTM
+steps = 50
+
+EPOCHS = 20
+
+single_step_model = tf.keras.models.Sequential()
+
+single_step_model.add(tf.keras.layers.LSTM(
+ 32, return_sequences=True, input_shape=x_train_ss.shape[-2:]))
+single_step_model.add(tf.keras.layers.Dropout(0.3))
+single_step_model.add(tf.keras.layers.LSTM(units=100, return_sequences=False))
+single_step_model.add(tf.keras.layers.Dropout(0.2))
+#model.add(Dense(units=1, activation='relu'))
+single_step_model.add(tf.keras.layers.Activation("relu"))
+single_step_model.add(tf.keras.layers.Dense(1))
+single_step_model.compile(
+ optimizer=tf.keras.optimizers.Adam(),
+ loss='mae',
+ metrics=[
+ tf.keras.metrics.RootMeanSquaredError(
+ name='rmse')])
+#single_step_model.compile(loss='mse', optimizer='rmsprop')
+single_step_model_history = single_step_model.fit(
+ train_ss,
+ epochs=EPOCHS,
+ steps_per_epoch=steps,
+ validation_data=val_ss,
+ validation_steps=50)
+
+
+single_step_model.summary()
+
+# plot train test loss
+
+
+def plot_loss(history, title):
+ loss = history.history['loss']
+ val_loss = history.history['val_loss']
+
+ epochs = range(len(loss))
+ plt.figure()
+ plt.plot(epochs, loss, 'b', label='Train Loss')
+ plt.plot(epochs, val_loss, 'r', label='Validation Loss')
+ plt.title(title)
+ plt.legend()
+ plt.grid()
+ plt.show()
+
+
+plot_loss(single_step_model_history,
+ 'Single Step Training and validation loss')
+
+# plot train test loss
+
+
+def plot_loss(history, title):
+ loss = history.history['rmse']
+ val_loss = history.history['val_rmse']
+
+ epochs = range(len(loss))
+ plt.figure()
+ plt.plot(epochs, loss, 'b', label='Train RMSE')
+ plt.plot(epochs, val_loss, 'r', label='Validation RMSE')
+ plt.title(title)
+ plt.legend()
+ plt.grid()
+ plt.show()
+
+
+plot_loss(single_step_model_history,
+ 'Single Step Training and validation loss')
+
+# fucntion to create time steps
+
+
+def create_time_steps(length):
+ return list(range(-length, 0))
+
+# function to plot time series data
+
+
+def plot_time_series(plot_data, delta, title):
+ labels = ["History", 'True Future', 'Model Predcited']
+ marker = ['.-', 'rx', 'go']
+ time_steps = create_time_steps(plot_data[0].shape[0])
+
+ if delta:
+ future = delta
+ else:
+ future = 0
+ plt.title(title)
+ for i, x in enumerate(plot_data):
+ if i:
+ plt.plot(
+ future,
+ plot_data[i],
+ marker[i],
+ markersize=10,
+ label=labels[i])
+ else:
+ plt.plot(
+ time_steps,
+ plot_data[i].flatten(),
+ marker[i],
+ label=labels[i])
+ plt.legend()
+ plt.xlim([time_steps[0], (future + 5) * 2])
+
+ plt.xlabel('Time_Step')
+ return plt
+
+# Moving window average
+
+
+def MWA(history):
+ return np.mean(history)
+
+# plot time series and predicted values
+
+
+for x, y in val_ss.take(5):
+ plot = plot_time_series([x[0][:, 1].numpy(), y[0].numpy(),
+ single_step_model.predict(x)[0]], 12,
+ 'Single Step Prediction')
+ plot.show()
+
+"""# **MultiStep Forcasting**"""
+
+future_target = 72 # 72 future values
+x_train_multi, y_train_multi = mutlivariate_data(features, features[:, 1], 0,
+ train_split, history,
+ future_target, STEP)
+x_val_multi, y_val_multi = mutlivariate_data(features, features[:, 1],
+ train_split, None, history,
+ future_target, STEP)
+
+print(x_train_multi.shape)
+print(y_train_multi.shape)
+
+# TF DATASET
+
+train_data_multi = tf.data.Dataset.from_tensor_slices(
+ (x_train_multi, y_train_multi))
+train_data_multi = train_data_multi.cache().shuffle(
+ buffer_size).batch(batch_size).repeat()
+
+val_data_multi = tf.data.Dataset.from_tensor_slices((x_val_multi, y_val_multi))
+val_data_multi = val_data_multi.batch(batch_size).repeat()
+
+print(train_data_multi)
+print(val_data_multi)
+
+# plotting function
+
+
+def multi_step_plot(history, true_future, prediction):
+ plt.figure(figsize=(12, 6))
+ num_in = create_time_steps(len(history))
+ num_out = len(true_future)
+ plt.grid()
+ plt.plot(num_in, np.array(history[:, 1]), label='History')
+ plt.plot(np.arange(num_out) / STEP, np.array(true_future), 'bo',
+ label='True Future')
+ if prediction.any():
+ plt.plot(np.arange(num_out) / STEP, np.array(prediction), 'ro',
+ label='Predicted Future')
+ plt.legend(loc='upper left')
+ plt.show()
+
+
+for x, y in train_data_multi.take(1):
+ multi_step_plot(x[0], y[0], np.array([0]))
+
+multi_step_model = tf.keras.models.Sequential()
+multi_step_model.add(tf.keras.layers.LSTM(
+ 32, return_sequences=True, input_shape=x_train_multi.shape[-2:]))
+multi_step_model.add(tf.keras.layers.Dropout(0.2))
+multi_step_model.add(tf.keras.layers.LSTM(units=100, return_sequences=False))
+multi_step_model.add(tf.keras.layers.Dropout(0.2))
+#model.add(Dense(units=1, activation='relu'))
+multi_step_model.add(tf.keras.layers.Activation("relu"))
+# aDD dropout layer (0.3)
+multi_step_model.add(tf.keras.layers.Dense(72)) # for 72 outputs
+
+multi_step_model.compile(
+ optimizer=tf.keras.optimizers.RMSprop(
+ clipvalue=1.0), loss='mae', metrics=[
+ tf.keras.metrics.RootMeanSquaredError(
+ name='rmse')])
+
+multi_step_history = multi_step_model.fit(train_data_multi, epochs=EPOCHS,
+ steps_per_epoch=steps,
+ validation_data=val_data_multi,
+ validation_steps=50)
+
+plot_loss(multi_step_history, 'Multi-Step Training and validation loss')
+
+for x, y in val_data_multi.take(5):
+ multi_step_plot(x[0], y[0], multi_step_model.predict(x)[0])
+
+scores = multi_step_model.evaluate(
+ x_train_multi,
+ y_train_multi,
+ verbose=1,
+ batch_size=200)
+print('MAE: {}'.format(scores[1]))
+
+scores_test = multi_step_model.evaluate(
+ x_val_multi, y_val_multi, verbose=1, batch_size=200)
+print('MAE: {}'.format(scores[1]))
+
+y_pred_test = multi_step_model.predict(x_val_multi, verbose=0)
+
+plt.figure(figsize=(10, 5))
+plt.plot(y_pred_test)
+plt.plot(y_val_multi)
+plt.ylabel("RUL")
+plt.xlabel("Unit Number")
+plt.legend(loc='upper left')
+plt.show()