test.py 2.34 KB
Newer Older
Hotwani's avatar
Hotwani committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import librosa.feature
import pandas as pd
import numpy as np
import os
from pathlib import Path
import csv
from tensorflow import keras
from sklearn.preprocessing import LabelEncoder, StandardScaler
import constants


def create_csv_header():
    header=''
    for i in range(constants.MFCC_FEATURE_START, constants.MFCC_FEATURE_END):
        header += f' mfcc{i}'
    header = header.split()
    file = open(constants.TEST_CSV_NAME, 'w', newline='')
    with file:
        writer = csv.writer(file)
        writer.writerow(header)


def extract_features(workingDir, subDirectories):
    create_csv_header()
    for subDirectory in subDirectories:
        if subDirectory == constants.TESTING_DATA_DIRECTORY_NAME:
            for fileName in os.listdir(workingDir/f'{subDirectory}'):
                if fileName.endswith(".wav"):
                    audioFile = workingDir / f'{subDirectory}/{fileName}'
                    y, sr = librosa.load(audioFile, mono=True)
                    mfcc = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=(constants.MFCC_FEATURE_END - constants.MFCC_FEATURE_START))
                    to_append = ''
                    for g in mfcc:
                        to_append += f' {np.mean(g)}'
                    file = open(constants.TEST_CSV_NAME, 'a', newline='')
                    with file:
                        writer = csv.writer(file)
                        writer.writerow(to_append.split())


def preprocessing_csv_data():
    # reading dataset from csv
    print("Reading Features... ")
    data = pd.read_csv(constants.TEST_CSV_NAME)
    data.head()
    return data


def normalize_data(data):
    # # normalizing - Extracting Remaining Columns as X and normalizing them to a common scale
    scaler = StandardScaler()
    X = scaler.fit_transform(np.array(data.iloc[:, :], dtype=float))
    print(X)
    print(X.shape)
    return X


WorkingDir = Path.cwd()
subDirectories = os.listdir(WorkingDir)
extract_features(WorkingDir, subDirectories)
data = preprocessing_csv_data()
X = normalize_data(data)
model = keras.models.load_model('./DemoTrainingDataset/trained_model.h5')
model.summary()
predictions = np.argmax(model.predict(X), axis=-1)
encoder = LabelEncoder()
labels = ['Light-Weight', 'Medium-Weight', 'Heavy-Weight', 'Two-Wheeled', 'Rail-Bound']
encoder.fit_transform(labels)
print(predictions)
print(encoder.inverse_transform(predictions))