From 891608fda492dc8cfe6c13d9557d272d2ca70e76 Mon Sep 17 00:00:00 2001
From: Sting <loic.allegre@ensiie.fr>
Date: Thu, 22 Jun 2023 14:19:16 +0200
Subject: [PATCH] Training data preprocess

---
 music_processor.py | 61 ++++++++--------------------------------------
 1 file changed, 10 insertions(+), 51 deletions(-)

diff --git a/music_processor.py b/music_processor.py
index 0ba18d9..44bd4cd 100644
--- a/music_processor.py
+++ b/music_processor.py
@@ -153,8 +153,6 @@ class Audio:
             for couple in RGX_TAGS.findall(line[2]):
                 self.timestamp.append((lastTime/100, couple[1]))
                 lastTime += int(couple[0])
-        print(type(lastTime))
-        print(self.timestamp)
         self.timestamp = np.array(self.timestamp, dtype='float, object')
 
 
@@ -169,7 +167,7 @@ def make_frame(data, nhop, nfft):
     return np.array([framedata[i*nhop:i*nhop+nfft] for i in range(length//nhop)])  
 
 
-@jit
+#@jit
 def fft_and_melscale(song, nhop=512, nffts=[1024, 2048, 4096], mel_nband=80, mel_freqlo=27.5, mel_freqhi=16000.0, include_zero_cross=False):
     """
     fft and melscale method.
@@ -183,7 +181,7 @@ def fft_and_melscale(song, nhop=512, nffts=[1024, 2048, 4096], mel_nband=80, mel
         
         feats = []
         window = signal.blackmanharris(nfft)
-        filt = mel(song.samplerate, nfft, mel_nband, mel_freqlo, mel_freqhi)
+        filt = mel(sr=song.samplerate, n_fft=nfft, n_mels=mel_nband, fmin=mel_freqlo, fmax=mel_freqhi)
         
         # get normal frame
         frame = make_frame(song.data, nhop, nfft)
@@ -204,7 +202,7 @@ def fft_and_melscale(song, nhop=512, nffts=[1024, 2048, 4096], mel_nband=80, mel
     return np.array(feat_channels)
 
 
-@jit(parallel=True)
+#@jit(parallel=True)
 def multi_fft_and_melscale(songs, nhop=512, nffts=[1024, 2048, 4096], mel_nband=80, mel_freqlo=27.5, mel_freqhi=16000.0, include_zero_cross=False):
     
     for i in prange(len(songs)):
@@ -258,19 +256,8 @@ def smooth(x, window_len=11, window='hanning'):
 
 
 
-def music_for_validation(serv, deletemusic=True, verbose=False, difficulty=1):
 
-    song = Audio(glob(serv+"/*.ogg")[0], stereo=False)
-    song.import_tja(glob(serv+"/*.tja")[-1], difficulty=difficulty)
-    song.feats = fft_and_melscale(song, nhop=512, nffts=[1024, 2048, 4096], mel_nband=80, mel_freqlo=27.5, mel_freqhi=16000.0, include_zero_cross=False)
-
-    if deletemusic:
-        song.data = None
-    with open('./data/pickles/val_data.pickle', mode='wb') as f:
-        pickle.dump(song, f)
-
-
-def music_for_train(serv, deletemusic=True, verbose=False, difficulty=0, diff=False, nhop=512, nffts=[1024, 2048, 4096], mel_nband=80, mel_freqlo=27.5, mel_freqhi=16000.0, include_zero_cross=False):
+def music_for_train(serv, verbose=False, nhop=512, nffts=[1024, 2048, 4096], mel_nband=80, mel_freqlo=27.5, mel_freqhi=16000.0, include_zero_cross=False):
     
     songplaces = glob(serv)
     songs = []
@@ -280,48 +267,20 @@ def music_for_train(serv, deletemusic=True, verbose=False, difficulty=0, diff=Fa
         if verbose:
             print(songplace)
         
-        song = Audio(glob(songplace+"/*.ogg")[0])
-        song.import_tja(glob(songplace+"/*.tja")[-1], difficulty=difficulty, diff=True)
+        song = Audio(glob(songplace+"/*.wav")[0])
+        song.import_ass(glob(songplace+"/*.ass")[-1])
         song.data = (song.data[:, 0]+song.data[:, 1])/2
         songs.append(song)
 
     multi_fft_and_melscale(songs, nhop, nffts, mel_nband, mel_freqlo, mel_freqhi, include_zero_cross=include_zero_cross)
     
-    if deletemusic:
-        for song in songs:
-            song.data = None
-    
     with open('./data/pickles/train_data.pickle', mode='wb') as f:
         pickle.dump(songs, f)
 
-def music_for_train_reduced(serv, deletemusic=True, verbose=False, difficulty=0, diff=False, nhop=512, nffts=[1024, 2048, 4096], mel_nband=80, mel_freqlo=27.5, mel_freqhi=16000.0, include_zero_cross=False):
-    
-    songplaces = glob(serv)
-    songs = []
-    
-    for songplace in songplaces:
-        
-        if verbose:
-            print(songplace)
-        
-        song = Audio(glob(songplace+"/*.ogg")[0])
-        song.import_tja(glob(songplace+"/*.tja")[-1], difficulty=difficulty, diff=True)
-        song.data = (song.data[:, 0]+song.data[:, 1])/2
-        songs.append(song)
-
-    multi_fft_and_melscale(songs, nhop, nffts, mel_nband, mel_freqlo, mel_freqhi, include_zero_cross=include_zero_cross)
-    
-    if deletemusic:
-        for song in songs:
-            song.data = None
-    
-    with open('./data/pickles/train_reduced.pickle', mode='wb') as f:
-        pickle.dump(songs, f)
-
 
 def music_for_test(serv, deletemusic=True, verbose=False):
 
-    song = Audio(glob(serv+"/*.ogg")[0], stereo=False)
+    song = Audio(glob(serv+"/*.wav")[0], stereo=False)
     # song.import_tja(glob(serv+"/*.tja")[-1])
     song.feats = fft_and_melscale(song, include_zero_cross=False)
     with open('./data/pickles/test_data.pickle', mode='wb') as f:
@@ -332,13 +291,13 @@ if __name__ == "__main__":
 
     if sys.argv[1] == 'train':
         print("preparing all train data processing...")
-        serv = "./data/train/*"
-        music_for_train(serv, verbose=True, difficulty=0, diff=True)
+        serv = "./media/train/*"
+        music_for_train(serv, verbose=True)
         print("all train data processing done!")    
 
     if sys.argv[1] == 'test':
         print("test data proccesing...")
-        serv = "./data/test/"
+        serv = "./media/test/"
         music_for_test(serv)
         print("test data processing done!")
 
-- 
GitLab