main.py 6.79 KB
Newer Older
Hotwani's avatar
Hotwani committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
import librosa.feature
import pandas as pd
import numpy as np
from pathlib import Path
from os import chdir
import os
import csv
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from keras import models
from keras import layers
from sklearn.metrics import classification_report
import constants
Hotwani's avatar
Hotwani committed
15
import sys
Hotwani's avatar
Hotwani committed
16
17
18


def create_csv_header():
Hotwani's avatar
Hotwani committed
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
    if os.path.isfile(constants.TRAINED_MODEL):
        sys.exit("Trained model file already exists, "
                 "remove/move trained_model.h5 to another location and start training again")
    if os.path.isfile(constants.FEATURES_CSV_NAME):
        sys.exit("features.csv already exist, please remove/move the file to another location and run main.py again")
    else:
        header = 'filename '
        for i in range(constants.MFCC_RANGE_START, constants.MFCC_RANGE_END):
            header += f' mfcc{i}'
        header += ' label'
        header = header.split()
        file = open(constants.FEATURES_CSV_NAME, 'x', newline='')
        with file:
            writer = csv.writer(file)
            writer.writerow(header)
Hotwani's avatar
Hotwani committed
34
35
36
37
38
39


def extract_features(trainingDataDir, trainingDataSubDirs):
    create_csv_header()
    # Looping over every file inside the subdirectories for feature extraction
    for trainingDataSubDir in trainingDataSubDirs:
Hotwani's avatar
Hotwani committed
40
41
42
43
44
45
46
47
48
49
        for audio_file_name in os.listdir(trainingDataDir/f'{trainingDataSubDir}'):
            if audio_file_name.endswith(".wav"):
                audio_file = trainingDataDir/f'{trainingDataSubDir}/{audio_file_name}'
                print("Extracting Features from Directory "+trainingDataSubDir+" and file "+audio_file.name)
                y, sr = librosa.load(audio_file, mono=True)
                mfcc_features = librosa.feature.mfcc(y=y, sr=sr,
                                                     n_mfcc=(constants.MFCC_RANGE_END - constants.MFCC_RANGE_START))
                to_append = f'{audio_file.name}'
                for mfcc_segment in mfcc_features:
                    to_append += f' {np.mean(mfcc_segment)}'
Hotwani's avatar
Hotwani committed
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
                if trainingDataSubDir == constants.CAR:
                    to_append += f' {constants.LIGHT_WEIGHT}'
                elif trainingDataSubDir == constants.BUS:
                    to_append += f' {constants.MEDIUM_WEIGHT}'
                elif trainingDataSubDir == constants.TRUCK:
                    to_append += f' {constants.HEAVY_WEIGHT}'
                elif trainingDataSubDir == constants.MOTORCYCLE:
                    to_append += f' {constants.TWO_WHEELED}'
                elif trainingDataSubDir == constants.TRAM:
                    to_append += f' {constants.RAIL_BOUND}'

                file = open(constants.FEATURES_CSV_NAME, 'a', newline='')
                with file:
                    writer = csv.writer(file)
                    writer.writerow(to_append.split())


def preprocessing_csv_data():
Hotwani's avatar
Hotwani committed
68
69
    features_data = pd.read_csv(constants.FEATURES_CSV_NAME)
    features_data.head()
Hotwani's avatar
Hotwani committed
70
    # Dropping unnecessary columns (Column Filename is dropped)
Hotwani's avatar
Hotwani committed
71
72
73
    updated_features_data = features_data.drop(['filename'], axis=1)
    updated_features_data.head()
    return updated_features_data
Hotwani's avatar
Hotwani committed
74
75


Hotwani's avatar
Hotwani committed
76
def encode_labels(processedFeaturesData):
Hotwani's avatar
Hotwani committed
77
    # Extracting classes/label column as y from csv and converting string labels to numbers using LabelEncoder
Hotwani's avatar
Hotwani committed
78
79
80
81
    audio_labels_list = processedFeaturesData.iloc[:, -1]
    encode_object = LabelEncoder()
    encoded_target_audio_labels = encode_object.fit_transform(audio_labels_list)
    return encoded_target_audio_labels, encode_object
Hotwani's avatar
Hotwani committed
82
83


Hotwani's avatar
Hotwani committed
84
def normalize_data(processedData):
Hotwani's avatar
Hotwani committed
85
    # normalizing - Extracting Remaining Columns as X and normalizing them to a common scale
Hotwani's avatar
Hotwani committed
86
87
88
    scale_object = StandardScaler()
    X_normalized_features = scale_object.fit_transform(np.array(processedData.iloc[:, :-1], dtype=float))
    return X_normalized_features
Hotwani's avatar
Hotwani committed
89
90


Hotwani's avatar
Hotwani committed
91
def train_test_data_split(XInput, yLabels):
Hotwani's avatar
Hotwani committed
92
    # splitting of dataset into train and test dataset
Hotwani's avatar
Hotwani committed
93
94
95
    X_split_train, X_split_test, y_split_train, y_split_test = train_test_split(XInput, yLabels,
                                                                                test_size=constants.TEST_DATA_SPLIT)
    return X_split_train, X_split_test, y_split_train, y_split_test
Hotwani's avatar
Hotwani committed
96
97
98
99


def create_and_compile_model():
    print("Creating a Model")
Hotwani's avatar
Hotwani committed
100
101
102
103
104
105
    model_instance = models.Sequential()
    model_instance.add(layers.Dense(constants.HIDDEN_LAYER_1_DIMENSIONS, activation=constants.ACTIVATION_RELU,
                                    input_shape=(X_input_features.shape[1],)))
    model_instance.add(layers.Dense(constants.HIDDEN_LAYER_2_DIMENSIONS, activation=constants.ACTIVATION_RELU))
    model_instance.add(layers.Dense(constants.HIDDEN_LAYER_3_DIMENSIONS, activation=constants.ACTIVATION_RELU))
    model_instance.add(layers.Dense(constants.OUTPUT_LAYER_DIMENSIONS, activation=constants.ACTIVATION_SOFTMAX))
Hotwani's avatar
Hotwani committed
106
107

    print("Compiling a Model")
Hotwani's avatar
Hotwani committed
108
109
110
111
    model_instance.compile(optimizer=constants.OPTIMIZER_ADAM,
                           loss=constants.LOSS_FUNCTION_SPARSE,
                           metrics=[constants.ACCURACY_METRICS])
    return model_instance
Hotwani's avatar
Hotwani committed
112
113


Hotwani's avatar
Hotwani committed
114
115
116
117
def train_and_save_model(compiledModel, X_train, y_train, X_test, y_test):
    log_directory = constants.LOG_DIR_PATH
    tensorboard_callback = keras.callbacks.TensorBoard(log_dir=log_directory)

Hotwani's avatar
Hotwani committed
118
    print("Start Training...")
Hotwani's avatar
Hotwani committed
119
120
121
122
    training_history = compiledModel.fit(X_train, y_train, epochs=35,
                                         validation_data=(X_test, y_test),
                                         callbacks=[tensorboard_callback])

Hotwani's avatar
Hotwani committed
123
    # Saving the trained model to avoid re-training
Hotwani's avatar
Hotwani committed
124
125
126
    #print(training_history)
    compiledModel.save(constants.TRAINED_MODEL)
    return training_history
Hotwani's avatar
Hotwani committed
127
128
129
130


def predict(X_test, y_test):
    print("Predictions.....")
Hotwani's avatar
Hotwani committed
131
132
133
134
    final_predictions = np.argmax(compiled_model.predict(X_test), axis=-1)
    target_names = [constants.LIGHT_WEIGHT, constants.MEDIUM_WEIGHT, constants.HEAVY_WEIGHT, constants.TWO_WHEELED,
                    constants.RAIL_BOUND]
    print(classification_report(y_test, final_predictions, target_names=target_names))
Hotwani's avatar
Hotwani committed
135
136
137
138


# Changing Directory to Training Dataset Folder
chdir(constants.TRAINING_DATA_DIRECTORY_NAME)
Hotwani's avatar
Hotwani committed
139
140
141
142
143
144
145
146
147
148
training_data_directory = Path.cwd()
training_data_sub_directories = os.listdir(training_data_directory)
extract_features(training_data_directory, training_data_sub_directories)
processed_features_data = preprocessing_csv_data()
target_audio_labels, encoder_object = encode_labels(processed_features_data)
X_input_features = normalize_data(processed_features_data)
X_train_data, X_test_data, y_train_data, y_test_data = train_test_data_split(X_input_features, target_audio_labels)
compiled_model = create_and_compile_model()
model_training_history = train_and_save_model(compiled_model, X_train_data, y_train_data, X_test_data, y_test_data)
predict(X_test_data, y_test_data)