Data Science Asked on March 10, 2021
I am new in ML and working with 1D CNN based de-noising autoencoder for TIME series ecg data. I have tried different learning rates and batch size but no significant improvement. Could anyone please point out what am I doing wrong. Thank you in anticipation.
The training,testing Database contains 48 half-hour excerpts of two-channel ambulatory ECG recordings. data source=https://www.kaggle.com/shayanfazeli/heartbeat
Number of Samples: 109446
Number of Categories: 5
Sampling Frequency: 125Hz
Data Source: Physionet’s MIT-BIH Arrhythmia Dataset
Classes: [‘N’: 0, ‘S’: 1, ‘V’: 2, ‘F’: 3, ‘Q’: 4]
Each time-series sample contains 188 features and respective label. Please note that I inserted a feature column (just copied the last column) to make 188 features originally data set contains 187 features.
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
from keras.utils.np_utils import to_categorical
from sklearn.utils import class_weight
import warnings
from keras.callbacks import TensorBoard
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Model
from keras.models import Sequential
from keras.layers import Convolution1D, ZeroPadding1D, MaxPooling1D, BatchNormalization, Activation,
Dropout, Flatten, Dense
from keras.layers import Conv1D, Dense, MaxPool1D, Flatten, Input
import tensorflow as tf
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
from tensorflow.keras import datasets, layers, models
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.preprocessing import label_binarize
warnings.filterwarnings('ignore')
train_df=pd.read_csv('mitbih_train.csv',header=None)
test_df=pd.read_csv('mitbih_test.csv',header=None)
df_1=train_df[train_df[188]==1]
df_2=train_df[train_df[188]==2]
df_3=train_df[train_df[188]==3]
df_4=train_df[train_df[188]==4]
df_0=(train_df[train_df[188]==0]).sample(n=20000,random_state=42)
df_1_upsample=resample(df_1,replace=True,n_samples=20000,random_state=123)
df_2_upsample=resample(df_2,replace=True,n_samples=20000,random_state=124)
df_3_upsample=resample(df_3,replace=True,n_samples=20000,random_state=125)
df_4_upsample=resample(df_4,replace=True,n_samples=20000,random_state=126)
df=pd.concat([df_0,df_1_upsample,df_2_upsample,df_3_upsample,df_4_upsample])
def add_gaussian_noise(signal):
noise=np.random.normal(0,0.5,187)
out=signal+noise
return out
target_train=df.iloc[:,-1]
target_test=df.iloc[:,-1]
y_train=to_categorical(target_train)
y_test=to_categorical(target_test)
X_train=df.iloc[:,:-1].values
X_test=test_df.iloc[:,:-1].values
X_train_noise=np.array(X_train)
X_test_noise=np.array(X_test)
plt.plot(X_train[10])
plt.show()
for i in range(len(X_train_noise)):
X_train_noise[i,:187]= add_gaussian_noise(X_train_noise[i,:187])
for i in range(len(X_test_noise)):
X_test_noise[i,:187]= add_gaussian_noise(X_test_noise[i,:187])
X_train_noise= X_train_noise.reshape(len(X_train_noise), X_train_noise.shape[1],1)
X_train= X_train.reshape(len(X_train), X_train.shape[1],1)
X_test = X_test.reshape(len(X_test), X_test.shape[1],1)
X_test_noise= X_test_noise.reshape(len(X_test_noise), X_test_noise.shape[1],1)
#—————————————–Autoencoder Design————————————————————————–
def encoder(input_data):
#encoder
x = layers.Conv1D(32, kernel_size=3,activation='relu', name='input')(input_data)
x = layers.MaxPooling1D(2, padding='same')(x)
x = layers.Conv1D(64, kernel_size=3, activation='relu', padding='same')(x)
x = layers.MaxPooling1D(2, padding='same')(x)
x = layers.Conv1D(64, kernel_size=3, activation='relu', padding='same')(x)
x = layers.MaxPooling1D(2, padding='same')(x)
x = layers.Conv1D(8, kernel_size=3, activation='relu', padding='same')(x)
x = layers.MaxPooling1D(2, padding='same')(x)
x = layers.Conv1D(4, kernel_size=3, activation='relu', padding='same')(x)
encoded = layers.MaxPooling1D(2,padding='same')(x)
return encoded
def decoder(encoded):
#decoder
x = layers.Conv1D(4, kernel_size=3, activation='relu', padding='same')(encoded)
x = layers.UpSampling1D(2)(x)
x = layers.Conv1D(8, kernel_size=3, activation='relu', padding='same')(x)
x = layers.UpSampling1D(2)(x)
x = layers.Conv1D(32, kernel_size=3, activation='relu', padding='same')(x)
x = layers.UpSampling1D(2)(x)
x = layers.Conv1D(64, kernel_size=3, activation='relu', padding='same')(x)
x = layers.UpSampling1D(2)(x)
x = layers.Conv1D(64, kernel_size=3, activation='relu' )(x)
x = layers.UpSampling1D(2)(x)
decoded = layers.Conv1D(1, 3, activation='relu', padding='same')(x)
return decoded
input_data = keras.Input(shape=(X_train_noise.shape[1], 1))
autoencoder = keras.Model(input_data, decoder(encoder(input_data)))
opt = keras.optimizers.Adam(learning_rate=0.001)
autoencoder.compile(optimizer='adam', loss='mse')
autoencoder.summary()
trained_autoencoder=autoencoder.fit(X_train_noise,X_train,epochs=50,
batch_size=256,
shuffle=True,
validation_data=(X_test_noise,X_test),
callbacks=[TensorBoard(log_dir='/tmp/autoencoder')])
Get help from others!
Recent Questions
Recent Answers
© 2024 TransWikia.com. All rights reserved. Sites we Love: PCI Database, UKBizDB, Menu Kuliner, Sharing RPP