KI-KNN 4.83 KB
Newer Older
Naundorf's avatar
Naundorf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
import pandas as pd
import numpy as np
from pathlib import Path
from os import chdir
import os
import csv
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from keras import models
from keras import layers
from keras import optimizers
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
import constants
Naundorf's avatar
Naundorf committed
16
from pandas import datetime
Naundorf's avatar
Naundorf committed
17

Naundorf's avatar
Naundorf committed
18
# splitting of dataset into train and test dataset
Naundorf's avatar
Naundorf committed
19
20
21
22
23
24

def train_test_data_split(X, y):
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
    print (X_train[0].shape)
    return X_train, X_test, y_train, y_test

Naundorf's avatar
Naundorf committed
25
# creating a model
Naundorf's avatar
Naundorf committed
26
def create_and_compile_model():
Naundorf's avatar
Naundorf committed
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
    print("Creating a Model")
    from keras.models import Sequential
    from keras.layers import Conv2D, Dense, MaxPooling2D, Dropout, Flatten, BatchNormalization
    model = models.Sequential()
    model.add(Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=(constants.N_FEATURE, constants.FEATURE_MAX_LEN, constants.CHANNELS)))
    model.add(Conv2D(32, kernel_size=(3, 3), activation="relu"))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Conv2D(64, kernel_size=(3, 3), activation="relu"))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Dropout(0.5))
    model.add(Flatten())
    model.add(Dense(128, activation="relu"))
    model.add(Dense(constants.OUTPUT_LAYER_DIMENSIONS, activation='softmax'))
    print("Compiling a Model")
    optimizer = keras.optimizers.RMSprop()
    model.compile(optimizer=optimizer, loss=constants.LOSS_FUNCTION_SPARSE, metrics=[constants.ACCURACY_METRICS])
    print(model.summary())
    return model
Naundorf's avatar
Naundorf committed
45

Naundorf's avatar
Naundorf committed
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
    def preprocessing_csv_data():
    print("Reading Features... ")
    data = pd.read_csv(constants.FEATURES_CSV_NAME)
    data.head()
# Dropping unnecessary columns (Column Filename is dropped)
    data = data.drop(['filename'], axis=1)
    data.head()
    return data

# Extracting classes/label column as y from csv and converting string labels to numbers using LabelEncoder 
    def encode_labels(data):
    list = data.iloc[:, -1]
    encoder = LabelEncoder()
    target_labels = encoder.fit_transform(list)
    return target_labels, encoder

Naundorf's avatar
Naundorf committed
62
63
64
65
66
def train_and_save_model(model, X_train, y_train, X_test, y_test):
    logdir = constants.LOG_DIR_PATH
    tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
    print("Start Training...")
    history = model.fit(X_train, y_train, batch_size=32, epochs=35, validation_data=(X_test, y_test), callbacks=[tensorboard_callback])
Naundorf's avatar
Naundorf committed
67
# Saving the trained model to avoid re-training
Naundorf's avatar
Naundorf committed
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
    model.save(constants.TRAINED_MODEL)
    return history

def predict(X_test, y_test):
    print("Predictions.....")
    predictions = np.argmax(model.predict(X_test), axis=-1)
    target_names = [constants.LIGHT_WEIGHT, constants.MEDIUM_WEIGHT, constants.HEAVY_WEIGHT,constants.TWO_WHEELED, constants.RAIL_BOUND]
    print(classification_report(y_test, predictions, target_names=target_names))

def plot_model_accuracy(history):
    # Plot graph Model Accuracy
    plt.plot(history.history['accuracy'])
    plt.plot(history.history['val_accuracy'])
    plt.title('Model Accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()

def plot_model_loss(history):
    # Plot graph Model Loss
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper right')
    plt.show()

# Changing Directory to Training Dataset Folder
chdir(constants.TRAINING_DATA_DIRECTORY_NAME)
trainingDataDir = Path.cwd()
trainingDataSubDirs = os.listdir(trainingDataDir)
chdir("..")
if os.path.isfile(constants.FEATURES_CSV_NAME):
    print("features.csv already exists, skip extraction")
else:
    extract_features(trainingDataDir, trainingDataSubDirs)



data = preprocessing_csv_data()
target_labels, encoder = encode_labels(data)
X = normalize_data(data)
X_train, X_test, y_train, y_test = train_test_data_split(X, target_labels)
model = create_and_compile_model()
history = train_and_save_model(model, X_train, y_train, X_test, y_test)
predict(X_test, y_test)
plot_model_accuracy(history)
plot_model_loss(history)

Naundorf's avatar
Naundorf committed
119
120
121
122
123
124
def predict(X_test, y_test):
    print("Predictions.....")
    predictions = np.argmax(model.predict(X_test), axis=-1)
    target_names = [constants.0, constants.1]
    print(classification_report(y_test, predictions, target_names=target_names))

Naundorf's avatar
Naundorf committed
125
126
127
128
129
130
131
def normalize_data(data):
    # normalizing - Extracting Remaining Columns as X and normalizing them to a common scale
    scaler = StandardScaler()
    print (data.iloc[:, :-1])
    X = scaler.fit_transform(np.array(data.iloc[:, :-1], dtype=float))
    X = X.reshape(-1, constants.N_, constants.MAX_LEN, constants.CHANNELS)
    return X
Naundorf's avatar
Naundorf committed
132
133