test.py 2.9 KB
Newer Older
Hotwani's avatar
Hotwani committed
1
2
3
4
5
6
7
8
9
import librosa.feature
import pandas as pd
import numpy as np
import os
from pathlib import Path
import csv
from tensorflow import keras
from sklearn.preprocessing import LabelEncoder, StandardScaler
import constants
Hotwani's avatar
Hotwani committed
10
import sys
Hotwani's avatar
Hotwani committed
11
12
13


def create_csv_header():
Hotwani's avatar
Hotwani committed
14
15
16
17
18
19
20
21
22
23
24
    if os.path.isfile(constants.TEST_CSV_NAME):
        sys.exit("test.csv already exist, please remove/move the file to another location and run test.py again")
    else:
        header = ''
        for i in range(constants.MFCC_RANGE_START, constants.MFCC_RANGE_END):
            header += f' mfcc{i}'
        header = header.split()
        file = open(constants.TEST_CSV_NAME, 'x', newline='')
        with file:
            writer = csv.writer(file)
            writer.writerow(header)
Hotwani's avatar
Hotwani committed
25
26
27
28
29
30


def extract_features(workingDir, subDirectories):
    create_csv_header()
    for subDirectory in subDirectories:
        if subDirectory == constants.TESTING_DATA_DIRECTORY_NAME:
Hotwani's avatar
Hotwani committed
31
32
33
34
35
36
            for test_audio_file_name in os.listdir(workingDir/f'{subDirectory}'):
                if test_audio_file_name.endswith(".wav"):
                    test_audio_file = workingDir / f'{subDirectory}/{test_audio_file_name}'
                    y, sr = librosa.load(test_audio_file, mono=True)
                    mfcc_features = librosa.feature.mfcc(y=y, sr=sr,
                                                         n_mfcc=(constants.MFCC_RANGE_END - constants.MFCC_RANGE_START))
Hotwani's avatar
Hotwani committed
37
                    to_append = ''
Hotwani's avatar
Hotwani committed
38
39
                    for mfcc_segment in mfcc_features:
                        to_append += f' {np.mean(mfcc_segment)}'
Hotwani's avatar
Hotwani committed
40
41
42
43
44
45
46
47
                    file = open(constants.TEST_CSV_NAME, 'a', newline='')
                    with file:
                        writer = csv.writer(file)
                        writer.writerow(to_append.split())


def preprocessing_csv_data():
    print("Reading Features... ")
Hotwani's avatar
Hotwani committed
48
49
50
    test_features_data = pd.read_csv(constants.TEST_CSV_NAME)
    test_features_data.head()
    return test_features_data
Hotwani's avatar
Hotwani committed
51
52


Hotwani's avatar
Hotwani committed
53
def normalize_data(processedData):
Hotwani's avatar
Hotwani committed
54
    # # normalizing - Extracting Remaining Columns as X and normalizing them to a common scale
Hotwani's avatar
Hotwani committed
55
56
57
    scale_object = StandardScaler()
    X_test = scale_object.fit_transform(np.array(processedData.iloc[:, :], dtype=float))
    return X_test
Hotwani's avatar
Hotwani committed
58
59


Hotwani's avatar
Hotwani committed
60
61
62
63
64
65
66
67
68
69
working_directory = Path.cwd()
sub_directories = os.listdir(working_directory)
extract_features(working_directory, sub_directories)
processed_data = preprocessing_csv_data()
X_test_data = normalize_data(processed_data)
if os.path.isfile('./DemoTrainingDataset/Trained_Model/trained_model.h5'):
    model = keras.models.load_model('./DemoTrainingDataset/Trained_Model/trained_model.h5')
else:
    sys.exit("Trained model file does not exists")
predictions = np.argmax(model.predict(X_test_data), axis=-1)
Hotwani's avatar
Hotwani committed
70
71
72
73
74
encoder = LabelEncoder()
labels = ['Light-Weight', 'Medium-Weight', 'Heavy-Weight', 'Two-Wheeled', 'Rail-Bound']
encoder.fit_transform(labels)
print(predictions)
print(encoder.inverse_transform(predictions))