]> git.parisson.com Git - timeside-diadems.git/commitdiff
chore(analyzerResults): Results are now stored in pipe.results as as dictionnary...
authorThomas Fillon <thomas@parisson.com>
Tue, 16 Sep 2014 14:14:57 +0000 (16:14 +0200)
committerThomas Fillon <thomas@parisson.com>
Tue, 16 Sep 2014 14:14:57 +0000 (16:14 +0200)
timeside/analyzer/irit_monopoly.py
timeside/analyzer/irit_noise_startSilences.py
timeside/analyzer/irit_speech_4hz.py
timeside/analyzer/irit_speech_entropy.py
timeside/analyzer/limsi_sad.py

index 03fd9ce5dd1b7c1a7253796fd7097c3af8847229..2b6c52b5271bb205b030d5b483261cdd7d8f5124 100644 (file)
@@ -47,7 +47,7 @@ class IRITMonopoly(Analyzer):
 
         self._aubio_pitch_analyzer = AubioPitch(blocksize_s=self.wLen,
                                                 stepsize_s=self.wStep)
-        self.parents.append(self._aubio_pitch_analyzer)
+        self.parents['aubio_pitch'] = self._aubio_pitch_analyzer
 
     @interfacedoc
     def setup(self, channels=None, samplerate=None,
@@ -87,8 +87,10 @@ class IRITMonopoly(Analyzer):
 
         '''
         aubio_res_id = 'aubio_pitch.pitch_confidence'
-        pipe_results = self.process_pipe.results
-        pitch_confidences = pipe_results.get_result_by_id(aubio_res_id).data
+        aubio_uuid = self.parents['aubio_pitch'].uuid()
+        aubio_results = self.process_pipe.results[aubio_uuid]
+
+        pitch_confidences = aubio_results[aubio_res_id].data
 
         nb_frameDecision = int(self.decisionLen / self.wStep)
         epsilon = numpy.spacing(pitch_confidences[0])
@@ -110,7 +112,7 @@ class IRITMonopoly(Analyzer):
         conf.id_metadata.name += ' ' + 'Yin Confidence'
         conf.data_object.value = pitch_confidences
 
-        self.process_pipe.results.add(conf)
+        self.add_result(conf)
 
         convert = {False: 0, True: 1}
         label = {0: 'Poly', 1: 'Mono'}
@@ -126,7 +128,7 @@ class IRITMonopoly(Analyzer):
 
         segs.data_object.duration = [(float(s[1] - s[0]+1) * self.decisionLen)
                                      for s in segList]
-        self.process_pipe.results.add(segs)
+        self.add_result(segs)
         return
 
     def monoLikelihood(self, m, v):
index 6ec93f73d56928f485c5f513b2a37c29ae00cb2c..22f54cb16d6767981720303de86e81c6f82d09d9 100644 (file)
@@ -192,7 +192,7 @@ class IRITStartSeg(Analyzer):
                                  for s in segsList]
         segs.data_object.duration = [(float(s[1] - s[0]) * step)
                                      for s in segsList]
-        self.process_pipe.results.add(segs)
+        self.add_result(segs)
 
     def release(self):
         self._buffer.close()
index 18ad3915d120c124e9ec2b402ce9ad248c5ca5ed..a64c4ff2d7ad222dd99ac21946b3959148e72cb2 100644 (file)
@@ -145,7 +145,7 @@ class IRITSpeech4Hz(Analyzer):
 
         modEnergy.data_object.value = conf
 
-        self.process_pipe.results.add(modEnergy)
+        self.add_result(modEnergy)
 
         # Segment
         convert = {False: 0, True: 1}
@@ -175,7 +175,7 @@ class IRITSpeech4Hz(Analyzer):
                                      self.samplerate())
                                      for s in segList]
 
-        self.process_pipe.results.add(segs)
+        self.add_result(segs)
 
         # Median filter on decision
         segs = self.new_result(data_mode='label', time_mode='segment')
@@ -192,7 +192,7 @@ class IRITSpeech4Hz(Analyzer):
                                      self.samplerate())
                                      for s in segList_filt]
 
-        self.process_pipe.results.add(segs)
+        self.add_result(segs)
 
 
 
index b33f7bfd704e342985fc0e181535b0a512231ac2..4134e5410754e761d1da0cc393528cbf9b712d13 100644 (file)
@@ -80,7 +80,7 @@ class IRITSpeechEntropy(Analyzer):
         conf.id_metadata.name += ' ' + 'Confidence'
 
         conf.data_object.value = confEntropy
-        self.process_pipe.results.add(conf)
+        self.add_result(conf)
 
         # Binary Entropy
         binaryEntropy = modulentropy > self.threshold
@@ -105,6 +105,6 @@ class IRITSpeechEntropy(Analyzer):
                                      self.samplerate())
                                      for s in segList]
 
-        self.process_pipe.results.add(segs)
+        self.add_result(segs)
 
         return
index ed7e9243d2c67144e71ba6641c0ca730c9f1d6d4..ba18b83dfba5f1e52e06a127930adc060a894b04 100644 (file)
@@ -83,7 +83,7 @@ class LimsiSad(Analyzer):
             'mfccd2: MFCC CepsIgnoreFirstCoeff=0 blockSize=1024 stepSize=256 > Derivate DOrder=2')
         spec.addFeature('zcr: ZCR blockSize=1024 stepSize=256')
         parent_analyzer = get_processor('yaafe')(spec)
-        self.parents.append(parent_analyzer)
+        self.parents['yaafe'] = parent_analyzer
 
         # informative parameters
         # these are not really taken into account by the system
@@ -123,15 +123,11 @@ class LimsiSad(Analyzer):
         return frames, eod
 
     def post_process(self):
-        yaafe_result = self.process_pipe.results
-        mfcc = yaafe_result.get_result_by_id(
-            'yaafe.mfcc')['data_object']['value']
-        mfccd1 = yaafe_result.get_result_by_id(
-            'yaafe.mfccd1')['data_object']['value']
-        mfccd2 = yaafe_result.get_result_by_id(
-            'yaafe.mfccd2')['data_object']['value']
-        zcr = yaafe_result.get_result_by_id(
-            'yaafe.zcr')['data_object']['value']
+        yaafe_result = self.process_pipe.results[self.parents['yaafe'].uuid()]
+        mfcc = yaafe_result['yaafe.mfcc']['data_object']['value']
+        mfccd1 = yaafe_result['yaafe.mfccd1']['data_object']['value']
+        mfccd2 = yaafe_result['yaafe.mfccd2']['data_object']['value']
+        zcr = yaafe_result['yaafe.zcr']['data_object']['value']
 
         features = np.concatenate((mfcc, mfccd1, mfccd2, zcr), axis=1)
 
@@ -143,4 +139,4 @@ class LimsiSad(Analyzer):
         sad_result.id_metadata.name += ' ' + \
             'Speech Activity Detection Log Likelihood Difference'
         sad_result.data_object.value = res
-        self.process_pipe.results.add(sad_result)
+        self.add_result(sad_result)