An error occurred while loading the file. Please try again.
test.py 2.90 KiB
import librosa.feature
import pandas as pd
import numpy as np
import os
from pathlib import Path
import csv
from tensorflow import keras
from sklearn.preprocessing import LabelEncoder, StandardScaler
import constants
import sys
def create_csv_header():
    if os.path.isfile(constants.TEST_CSV_NAME):
        sys.exit("test.csv already exist, please remove/move the file to another location and run test.py again")
    else:
        header = ''
        for i in range(constants.MFCC_RANGE_START, constants.MFCC_RANGE_END):
            header += f' mfcc{i}'
        header = header.split()
        file = open(constants.TEST_CSV_NAME, 'x', newline='')
        with file:
            writer = csv.writer(file)
            writer.writerow(header)
def extract_features(workingDir, subDirectories):
    create_csv_header()
    for subDirectory in subDirectories:
        if subDirectory == constants.TESTING_DATA_DIRECTORY_NAME:
            for test_audio_file_name in os.listdir(workingDir/f'{subDirectory}'):
                if test_audio_file_name.endswith(".wav"):
                    test_audio_file = workingDir / f'{subDirectory}/{test_audio_file_name}'
                    y, sr = librosa.load(test_audio_file, mono=True)
                    mfcc_features = librosa.feature.mfcc(y=y, sr=sr,
                                                         n_mfcc=(constants.MFCC_RANGE_END - constants.MFCC_RANGE_START))
                    to_append = ''
                    for mfcc_segment in mfcc_features:
                        to_append += f' {np.mean(mfcc_segment)}'
                    file = open(constants.TEST_CSV_NAME, 'a', newline='')
                    with file:
                        writer = csv.writer(file)
                        writer.writerow(to_append.split())
def preprocessing_csv_data():
    print("Reading Features... ")
    test_features_data = pd.read_csv(constants.TEST_CSV_NAME)
    test_features_data.head()
    return test_features_data
def normalize_data(processedData):
    # # normalizing - Extracting Remaining Columns as X and normalizing them to a common scale
    scale_object = StandardScaler()
    X_test = scale_object.fit_transform(np.array(processedData.iloc[:, :], dtype=float))
    return X_test
working_directory = Path.cwd()
sub_directories = os.listdir(working_directory)
extract_features(working_directory, sub_directories)
processed_data = preprocessing_csv_data()
X_test_data = normalize_data(processed_data)
if os.path.isfile('./DemoTrainingDataset/Trained_Model/trained_model.h5'):
    model = keras.models.load_model('./DemoTrainingDataset/Trained_Model/trained_model.h5')
else:
    sys.exit("Trained model file does not exists")
predictions = np.argmax(model.predict(X_test_data), axis=-1)
encoder = LabelEncoder()
labels = ['Light-Weight', 'Medium-Weight', 'Heavy-Weight', 'Two-Wheeled', 'Rail-Bound'] encoder.fit_transform(labels) print(predictions) print(encoder.inverse_transform(predictions))