]> git.parisson.com Git - timeside.git/commitdiff
chore(analyzerResults): Results are now stored in pipe.results as as dictionnary...
authorThomas Fillon <thomas@parisson.com>
Tue, 16 Sep 2014 14:14:57 +0000 (16:14 +0200)
committerThomas Fillon <thomas@parisson.com>
Tue, 16 Sep 2014 14:14:57 +0000 (16:14 +0200)
26 files changed:
doc/source/tutorial/frames_stack.rst
tests/test_analyzer_dc.py
tests/test_analyzer_level.py
tests/test_analyzers_default.py
tests/test_analyzers_stress.py
tests/test_decoding_stack.py
timeside/analyzer/aubio/aubio_melenergy.py
timeside/analyzer/aubio/aubio_mfcc.py
timeside/analyzer/aubio/aubio_pitch.py
timeside/analyzer/aubio/aubio_specdesc.py
timeside/analyzer/aubio/aubio_temporal.py
timeside/analyzer/core.py
timeside/analyzer/dc.py
timeside/analyzer/irit_monopoly.py
timeside/analyzer/irit_noise_startSilences.py
timeside/analyzer/irit_speech_4hz.py
timeside/analyzer/irit_speech_entropy.py
timeside/analyzer/level.py
timeside/analyzer/limsi_sad.py
timeside/analyzer/odf.py
timeside/analyzer/spectrogram.py
timeside/analyzer/vamp_plugin.py
timeside/analyzer/waveform.py
timeside/analyzer/yaafe.py
timeside/core.py
timeside/grapher/render_analyzers.py

index c89d49c54597ee31c5327db010fc3bb655c14bb1..b14002f5cdf3ffedd8405920de0d76dc5599280d 100644 (file)
@@ -20,16 +20,15 @@ Then let's setup a :class:`FileDecoder <timeside.decoder.file.FileDecoder>` with
 
 Setup an arbitrary analyzer to check that decoding process from file and from stack are equivalent:
 
->>> pitch_on_file = get_processor('aubio_pitch')()
->>> pipe = (decoder | pitch_on_file)
+>>> pitch = get_processor('aubio_pitch')()
+>>> pipe = (decoder | pitch)
 >>> print pipe.processors #doctest: +ELLIPSIS
-[<timeside.decoder.file.FileDecoder object at 0x...>, <timeside.analyzer.aubio.aubio_pitch.AubioPitch object at 0x...>]
+[gst_dec-{}, aubio_pitch-{}]
 
-After the pipe has been run, the other processes of the pipe are removed from the pipe and only the :class:`FileDecoder <timeside.decoder.file.FileDecoder>` is kept :
+
+Run the pipe:
 
 >>> pipe.run()
->>> print pipe.processors #doctest: +ELLIPSIS
-[<timeside.decoder.file.FileDecoder object at 0x...>]
 
 The processed frames are stored in the pipe attribute `frames_stack` as a list of frames :
 
@@ -47,23 +46,3 @@ Last frame :
 (array([[...]], dtype=float32), True)
 
 If the pipe is used for a second run, the processed frames stored in the stack are passed to the other processors without decoding the audio source again.
-Let's define a second analyzer equivalent to the previous one:
-
->>> pitch_on_stack = get_processor('aubio_pitch')()
-
-Add it to the pipe:
-
->>> pipe |= pitch_on_stack
->>> print pipe.processors #doctest: +ELLIPSIS
-[<timeside.decoder.file.FileDecoder object at 0x...>, <timeside.analyzer.aubio.aubio_pitch.AubioPitch object at 0x...>]
-
-And run the pipe:
-
->>> pipe.run()
-
-Assert that the frames passed to the two analyzers are the same, we check that the results of these analyzers are equivalent:
-
->>> np.allclose(pitch_on_file.results.get_result_by_id('aubio_pitch.pitch').data,
-...                    pitch_on_stack.results.get_result_by_id('aubio_pitch.pitch').data)
-True
-
index 7c49e890d48db03cf7dbc416151f9bf722b08525..c76522638b5db7f5e4264240eafffbd5626ac3ca 100755 (executable)
@@ -29,7 +29,7 @@ class TestAnalyzerDC(unittest.TestCase):
         (decoder | self.analyzer).run()
         results = self.analyzer.results
         for result_id in self.expected.keys():
-            result = results.get_result_by_id(result_id)
+            result = results[result_id]
             self.assertEquals(result.data_object.value,
                               self.expected[result_id])
 
index 9bc56e1e2fac30ed66c7737715de5be564008e6f..510ef6c6143f852df8cf954cb7ada8bf8f3184c0 100755 (executable)
@@ -38,7 +38,7 @@ class TestAnalyzerLevel(unittest.TestCase):
         (decoder | self.analyzer).run()
         results = self.analyzer.results
         for result_id in self.expected.keys():
-            result = results.get_result_by_id(result_id)
+            result = results[result_id]
             self.assertEquals(result.data_object.value,
                               self.expected[result_id])
         #print results
index 98455cf1b174602262b0d14db49ca4d0146acbf3..12434c6214851175d4e56c58828cf2515da63b0f 100755 (executable)
@@ -23,9 +23,10 @@ class TestAnalyzers_with_default(unittest.TestCase):
         """Internal function that test if there is NaN in the results
         of a given analyzer"""
 
-        pipe = (self.decoder | analyzer_cls())
+        analyzer = analyzer_cls()
+        pipe = (self.decoder | analyzer)
         pipe.run()
-        for key, result in pipe.results.items():
+        for key, result in analyzer.results.items():
             if 'value' in result.data_object.keys():
                 # Test for NaN
                 self.assertFalse(np.any(np.isnan(result.data)),
index 3bb43be0d32fe7201ba431e6193b8a2130a36500..9669643f062a5b0ad2e6e8af83e3ea441f5f03f3 100755 (executable)
@@ -23,9 +23,10 @@ class TestAnalyzers_with_zeros(unittest.TestCase):
         """Internal function that test if there is NaN in the results
         of a given analyzer"""
 
-        pipe = (self.decoder | analyzer_cls())
+        analyzer = analyzer_cls()
+        pipe = (self.decoder | analyzer)
         pipe.run()
-        for key, result in pipe.results.items():
+        for key, result in analyzer.results.items():
             if 'value' in result.data_object.keys():
                 # Test for NaN
                 self.assertFalse(np.any(np.isnan(result.data)),
index 0dc503f1f505353178388332aeb14d25ab91c832..93c45e216816c1e02e3686732c66b223c50e686f 100755 (executable)
@@ -57,30 +57,25 @@ class TestDecodingFromStack(unittest.TestCase):
                               start=self.start,
                               duration=self.duration,
                               stack=True)
-        level_on_file = Level()
-        pipe = (decoder | level_on_file)
+        level = Level()
+        pipe = (decoder | level)
 
         pipe.run()
 
         self.assertIsInstance(pipe.frames_stack, list)
 
-        results_on_file = pipe.results.get_result_by_id(
-            'level.rms').data.copy()
+        results_on_file = level.results['level.rms'].data.copy()
 
         # If the pipe is used for a second run, the processed frames stored
         # in the stack are passed to the other processors
         # without decoding the audio source again.
-        #Let's define a second analyzer equivalent to the previous one:
 
-        # Remove level_on_file from pipe
-        pipe.processors.pop()
-        level_on_stack = Level()
-        pipe |= level_on_stack
+        pipe.results = {}  # to be sure the previous results are deleted
         pipe.run()
 
         # to assert that the frames passed to the two analyzers are the same,
         # we check that the results of these analyzers are equivalent:
-        results_on_stack = pipe.results.get_result_by_id('level.rms').data
+        results_on_stack = level.results['level.rms'].data
 
         self.assertEqual(results_on_stack,
                          results_on_file)
index 9fd7a4c4b4d91ca42aa16bd8bfdf29a4145b43fa..f271a4893adb57843179243932b875e3e557f800 100644 (file)
@@ -80,4 +80,4 @@ class AubioMelEnergy(Analyzer):
         melenergy.parameters = dict(n_filters=self.n_filters,
                                     n_coeffs=self.n_coeffs)
         melenergy.data_object.value = self.melenergy_results
-        self.process_pipe.results.add(melenergy)
+        self.add_result(melenergy)
index 584de106e213b8bb3c11883df8a1931413bccc10..1889efd3c02101ea67139b4f4b25ccda4c522628 100644 (file)
@@ -83,4 +83,4 @@ class AubioMfcc(Analyzer):
         mfcc.parameters = dict(n_filters=self.n_filters,
                                n_coeffs=self.n_coeffs)
         mfcc.data_object.value = self.mfcc_results
-        self.process_pipe.results.add(mfcc)
+        self.add_result(mfcc)
index 378707b964ef9276619985e699d2ab1b830a71dc..120e828de2174a2720da1819486bdc7f159b0600 100644 (file)
@@ -107,7 +107,7 @@ class AubioPitch(Analyzer):
         pitch.id_metadata.name += ' ' + "pitch"
         pitch.id_metadata.unit = "Hz"
         pitch.data_object.value = self.pitches
-        self.process_pipe.results.add(pitch)
+        self.add_result(pitch)
 
         pitch_confidence = self.new_result(
             data_mode='value', time_mode='framewise')
@@ -115,4 +115,4 @@ class AubioPitch(Analyzer):
         pitch_confidence.id_metadata.name += ' ' + "pitch confidence"
         pitch_confidence.id_metadata.unit = None
         pitch_confidence.data_object.value = self.pitch_confidences
-        self.process_pipe.results.add(pitch_confidence)
+        self.add_result(pitch_confidence)
index f8eedd70e90453daa3af87552414fdee9ec3b599..cfe5d902e52c1f875accd298c96d7d4169535dfc 100644 (file)
@@ -95,4 +95,4 @@ class AubioSpecdesc(Analyzer):
             res_specdesc.id_metadata.name = ' ' + method
             res_specdesc.data_object.value = self.specdesc_results[method]
 
-            self.process_pipe.results.add(res_specdesc)
+            self.add_result(res_specdesc)
index 45426fff1d17660212ef3d169c49e64c7981408d..7b20ebdbbd848983feb87caa38777d08e313a249 100644 (file)
@@ -98,7 +98,7 @@ class AubioTemporal(Analyzer):
         onsets.data_object.label = numpy.ones(len(self.onsets))
         onsets.data_object.label_metadata.label = {1: 'Onset'}
 
-        self.process_pipe.results.add(onsets)
+        self.add_result(onsets)
 
         #---------------------------------
         #  Onset Rate: Segment (time, duration, value)
@@ -117,7 +117,7 @@ class AubioTemporal(Analyzer):
             onsetrate.data_object.value = []
             onsetrate.data_object.time = []
 
-        self.process_pipe.results.add(onsetrate)
+        self.add_result(onsetrate)
 
         #---------------------------------
         #  Beats: Event (time, "Beat")
@@ -130,7 +130,7 @@ class AubioTemporal(Analyzer):
         beats.data_object.label = numpy.ones(len(self.beats))
         beats.data_object.label_metadata.label = {1: 'Beat'}
 
-        self.process_pipe.results.add(beats)
+        self.add_result(beats)
 
         #---------------------------------
         #  Beat confidences: Event (time, value)
@@ -143,7 +143,7 @@ class AubioTemporal(Analyzer):
         beat_confidences.data_object.time = self.beats
         beat_confidences.data_object.value = self.beat_confidences
 
-        self.process_pipe.results.add(beat_confidences)
+        self.add_result(beat_confidences)
 
         #---------------------------------
         #  BPM: Segment (time, duration, value)
@@ -161,4 +161,4 @@ class AubioTemporal(Analyzer):
         else:
             bpm.data_object.value = []
 
-        self.process_pipe.results.add(bpm)
+        self.add_result(bpm)
index fe09dbd093627e2fb15d7de28bc5327b1f61efb2..3910dbb418bbe7c633e8065a4c468ea89bd965c0 100644 (file)
@@ -1130,12 +1130,14 @@ class Analyzer(Processor):
         self.result_blocksize = self.input_blocksize
         self.result_stepsize = self.input_stepsize
 
+    def add_result(self, result):
+        if not self.uuid() in self.process_pipe.results:
+            self.process_pipe.results[self.uuid()] = AnalyzerResultContainer()
+        self.process_pipe.results[self.uuid()][result.id] = result
+
     @property
     def results(self):
-        return AnalyzerResultContainer(
-            [self.process_pipe.results[key]
-             for key in self.process_pipe.results.keys()
-             if key.startswith(self.uuid())])
+        return self.process_pipe.results[self.uuid()]
 
     @staticmethod
     def id():
index c7c11ff9815c92f069f77fae08d199459ed5db7b..1c523912d1a3ad93b6271d4ff666f833c851d9b4 100644 (file)
@@ -63,4 +63,4 @@ class MeanDCShift(Analyzer):
         dc_result = self.new_result(data_mode='value', time_mode='global')
         dc_result.data_object.value = numpy.round(
             numpy.mean(100 * self.values), 3)
-        self.process_pipe.results.add(dc_result)
+        self.add_result(dc_result)
index 03fd9ce5dd1b7c1a7253796fd7097c3af8847229..2b6c52b5271bb205b030d5b483261cdd7d8f5124 100644 (file)
@@ -47,7 +47,7 @@ class IRITMonopoly(Analyzer):
 
         self._aubio_pitch_analyzer = AubioPitch(blocksize_s=self.wLen,
                                                 stepsize_s=self.wStep)
-        self.parents.append(self._aubio_pitch_analyzer)
+        self.parents['aubio_pitch'] = self._aubio_pitch_analyzer
 
     @interfacedoc
     def setup(self, channels=None, samplerate=None,
@@ -87,8 +87,10 @@ class IRITMonopoly(Analyzer):
 
         '''
         aubio_res_id = 'aubio_pitch.pitch_confidence'
-        pipe_results = self.process_pipe.results
-        pitch_confidences = pipe_results.get_result_by_id(aubio_res_id).data
+        aubio_uuid = self.parents['aubio_pitch'].uuid()
+        aubio_results = self.process_pipe.results[aubio_uuid]
+
+        pitch_confidences = aubio_results[aubio_res_id].data
 
         nb_frameDecision = int(self.decisionLen / self.wStep)
         epsilon = numpy.spacing(pitch_confidences[0])
@@ -110,7 +112,7 @@ class IRITMonopoly(Analyzer):
         conf.id_metadata.name += ' ' + 'Yin Confidence'
         conf.data_object.value = pitch_confidences
 
-        self.process_pipe.results.add(conf)
+        self.add_result(conf)
 
         convert = {False: 0, True: 1}
         label = {0: 'Poly', 1: 'Mono'}
@@ -126,7 +128,7 @@ class IRITMonopoly(Analyzer):
 
         segs.data_object.duration = [(float(s[1] - s[0]+1) * self.decisionLen)
                                      for s in segList]
-        self.process_pipe.results.add(segs)
+        self.add_result(segs)
         return
 
     def monoLikelihood(self, m, v):
index 6ec93f73d56928f485c5f513b2a37c29ae00cb2c..22f54cb16d6767981720303de86e81c6f82d09d9 100644 (file)
@@ -192,7 +192,7 @@ class IRITStartSeg(Analyzer):
                                  for s in segsList]
         segs.data_object.duration = [(float(s[1] - s[0]) * step)
                                      for s in segsList]
-        self.process_pipe.results.add(segs)
+        self.add_result(segs)
 
     def release(self):
         self._buffer.close()
index 18ad3915d120c124e9ec2b402ce9ad248c5ca5ed..a64c4ff2d7ad222dd99ac21946b3959148e72cb2 100644 (file)
@@ -145,7 +145,7 @@ class IRITSpeech4Hz(Analyzer):
 
         modEnergy.data_object.value = conf
 
-        self.process_pipe.results.add(modEnergy)
+        self.add_result(modEnergy)
 
         # Segment
         convert = {False: 0, True: 1}
@@ -175,7 +175,7 @@ class IRITSpeech4Hz(Analyzer):
                                      self.samplerate())
                                      for s in segList]
 
-        self.process_pipe.results.add(segs)
+        self.add_result(segs)
 
         # Median filter on decision
         segs = self.new_result(data_mode='label', time_mode='segment')
@@ -192,7 +192,7 @@ class IRITSpeech4Hz(Analyzer):
                                      self.samplerate())
                                      for s in segList_filt]
 
-        self.process_pipe.results.add(segs)
+        self.add_result(segs)
 
 
 
index b33f7bfd704e342985fc0e181535b0a512231ac2..4134e5410754e761d1da0cc393528cbf9b712d13 100644 (file)
@@ -80,7 +80,7 @@ class IRITSpeechEntropy(Analyzer):
         conf.id_metadata.name += ' ' + 'Confidence'
 
         conf.data_object.value = confEntropy
-        self.process_pipe.results.add(conf)
+        self.add_result(conf)
 
         # Binary Entropy
         binaryEntropy = modulentropy > self.threshold
@@ -105,6 +105,6 @@ class IRITSpeechEntropy(Analyzer):
                                      self.samplerate())
                                      for s in segList]
 
-        self.process_pipe.results.add(segs)
+        self.add_result(segs)
 
         return
index 8136ad983334a36b079963fbb5556765a383c377..9b4cc4c776965d2e43e4ec51329261195a03f0d9 100644 (file)
@@ -79,7 +79,7 @@ class Level(Analyzer):
 
         max_level.data_object.value = np.round(
             20 * np.log10(self.max_value), 3)
-        self.process_pipe.results.add(max_level)
+        self.add_result(max_level)
 
         # RMS level
         rms_level = self.new_result(data_mode='value', time_mode='global')
@@ -92,4 +92,4 @@ class Level(Analyzer):
             rms_val = MACHINE_EPSILON
 
         rms_level.data_object.value = np.round(20 * np.log10(rms_val), 3)
-        self.process_pipe.results.add(rms_level)
+        self.add_result(rms_level)
index ed7e9243d2c67144e71ba6641c0ca730c9f1d6d4..ba18b83dfba5f1e52e06a127930adc060a894b04 100644 (file)
@@ -83,7 +83,7 @@ class LimsiSad(Analyzer):
             'mfccd2: MFCC CepsIgnoreFirstCoeff=0 blockSize=1024 stepSize=256 > Derivate DOrder=2')
         spec.addFeature('zcr: ZCR blockSize=1024 stepSize=256')
         parent_analyzer = get_processor('yaafe')(spec)
-        self.parents.append(parent_analyzer)
+        self.parents['yaafe'] = parent_analyzer
 
         # informative parameters
         # these are not really taken into account by the system
@@ -123,15 +123,11 @@ class LimsiSad(Analyzer):
         return frames, eod
 
     def post_process(self):
-        yaafe_result = self.process_pipe.results
-        mfcc = yaafe_result.get_result_by_id(
-            'yaafe.mfcc')['data_object']['value']
-        mfccd1 = yaafe_result.get_result_by_id(
-            'yaafe.mfccd1')['data_object']['value']
-        mfccd2 = yaafe_result.get_result_by_id(
-            'yaafe.mfccd2')['data_object']['value']
-        zcr = yaafe_result.get_result_by_id(
-            'yaafe.zcr')['data_object']['value']
+        yaafe_result = self.process_pipe.results[self.parents['yaafe'].uuid()]
+        mfcc = yaafe_result['yaafe.mfcc']['data_object']['value']
+        mfccd1 = yaafe_result['yaafe.mfccd1']['data_object']['value']
+        mfccd2 = yaafe_result['yaafe.mfccd2']['data_object']['value']
+        zcr = yaafe_result['yaafe.zcr']['data_object']['value']
 
         features = np.concatenate((mfcc, mfccd1, mfccd2, zcr), axis=1)
 
@@ -143,4 +139,4 @@ class LimsiSad(Analyzer):
         sad_result.id_metadata.name += ' ' + \
             'Speech Activity Detection Log Likelihood Difference'
         sad_result.data_object.value = res
-        self.process_pipe.results.add(sad_result)
+        self.add_result(sad_result)
index 1845f85dd401d42cb606aec8c09438eeb93c2a54..7d485621bca695980d7152d9efbd96fbf649d181 100644 (file)
@@ -42,8 +42,9 @@ class OnsetDetectionFunction(Analyzer):
         else:
             self.input_stepsize = blocksize / 2
 
-        self.parents.append(Spectrogram(blocksize=self.input_blocksize,
-                            stepsize=self.input_stepsize))
+        self.parents['spectrogram'] = Spectrogram(
+            blocksize=self.input_blocksize,
+            stepsize=self.input_stepsize)
 
     @interfacedoc
     def setup(self, channels=None, samplerate=None,
@@ -73,8 +74,8 @@ class OnsetDetectionFunction(Analyzer):
 
         #spectrogram = self.parents()[0]['spectrogram_analyzer'].data
         results = self.process_pipe.results
-
-        spectrogram = results.get_result_by_id('spectrogram_analyzer').data
+        parent_uuid = self.parents['spectrogram'].uuid()
+        spectrogram = results[parent_uuid]['spectrogram_analyzer'].data
         #spectrogram = self.pipe._results[self.parents()[0].id]
 
         # Low-pass filtering of the spectrogram amplitude along the time axis
@@ -108,4 +109,4 @@ class OnsetDetectionFunction(Analyzer):
         odf = self.new_result(data_mode='value', time_mode='framewise')
         #odf.parameters = {'FFT_SIZE': self.FFT_SIZE}
         odf.data_object.value = odf_diff
-        self.process_pipe.results.add(odf)
+        self.add_result(odf)
index cff857412e401db6256fd1f403d55bcf2cdbdd2e..082e40f086ebd5b556274e1d0f1704947b99bbb5 100644 (file)
@@ -36,6 +36,8 @@ class Spectrogram(Analyzer):
     # Define Parameters
     class _Param(HasTraits):
         FFT_SIZE = Int()
+        input_blocksize = Int()
+        input_stepsize = Int()
 
     def __init__(self, blocksize=2048, stepsize=None, fft_size=None):
         super(Spectrogram, self).__init__()
@@ -88,4 +90,4 @@ class Spectrogram(Analyzer):
         spectrogram.data_object.y_value = (np.arange(0, nb_freq) *
                                            self.samplerate() / self.FFT_SIZE)
 
-        self.process_pipe.results.add(spectrogram)
+        self.add_result(spectrogram)
index 2ed2af540bf9c680e4c91c59188f77b67edcd768..1b32f3545d012155c8c2e2a649a151838b6f8c52 100644 (file)
@@ -136,7 +136,7 @@ class VampSimpleHost(Analyzer):
             plugin_res.id_metadata.name += ' ' + \
                 ' '.join(plugin_line[1:])
 
-            self.process_pipe.results.add(plugin_res)
+            self.add_result(plugin_res)
 
     @staticmethod
     def vamp_plugin(plugin, wavfile):
index 713fbf72d072f20428c140fdb206bf5598c548f8..eb465c9aa97db20228767ff591b0cd3b8575ac5c 100644 (file)
@@ -68,4 +68,4 @@ class Waveform(Analyzer):
     def post_process(self):
         waveform = self.new_result(data_mode='value', time_mode='framewise')
         waveform.data_object.value = np.vstack(self.values)
-        self.process_pipe.results.add(waveform)
+        self.add_result(waveform)
index 5106990cd2925f22d60d53da1248a909c08942ba..5367b72bcf5094857a6951742c8e828133a630a8 100644 (file)
@@ -120,4 +120,4 @@ class Yaafe(Analyzer):
 
             # Store results in Container
             if len(result.data_object.value):
-                self.process_pipe.results.add(result)
+                self.add_result(result)
index 6b43135e44966e7a49dc1582dcf4bc83d74a0e71..9f2e4dac58b3e6296925c20f769ecbb5342c524b 100644 (file)
@@ -75,7 +75,7 @@ class Processor(Component, HasParam):
 
 
     Attributes:
-              parents :  List of parent Processors that must be processed
+              parents :  Dictionnary of parent Processors that must be processed
                          before the current Processor
               pipe :     The ProcessPipe in which the Processor will run
         """
@@ -89,7 +89,7 @@ class Processor(Component, HasParam):
     def __init__(self):
         super(Processor, self).__init__()
 
-        self.parents = []
+        self.parents = {}
         self.source_mediainfo = None
         self.process_pipe = None
         self.UUID = uuid.uuid4()
@@ -175,7 +175,7 @@ class Processor(Component, HasParam):
                 self.get_parameters() == other.get_parameters())
 
     def __repr__(self):
-        return self.id() + '\n' + self.get_parameters()
+        return '-'.join([self.id(), self.get_parameters()])
 
 
 class FixedSizeInputAdapter(object):
@@ -275,7 +275,8 @@ class ProcessPipe(object):
 
     Attributes:
         processor: List of all processors in the Process pipe
-        results : Results Container for all the analyzers of the Pipe process
+        results : Dictionnary of Results Container from all the analyzers
+                  in the Pipe process
 """
 
     def __init__(self, *others):
@@ -287,8 +288,7 @@ class ProcessPipe(object):
 
         self |= others
 
-        from timeside.analyzer.core import AnalyzerResultContainer
-        self.results = AnalyzerResultContainer()
+        self.results = {}
 
     def append_processor(self, proc, source_proc=None):
         "Append a new processor to the pipe"
@@ -319,7 +319,7 @@ class ProcessPipe(object):
                                      type='audio_source')
             proc.process_pipe = self
             # Add an edge between each parent and proc
-            for parent in proc.parents:
+            for parent in proc.parents.values():
                     self._graph.add_edge(parent.uuid(), proc.uuid(),
                                          type='data_source')
 
@@ -363,7 +363,7 @@ class ProcessPipe(object):
 
     def __ior__(self, other):
         if isinstance(other, Processor):
-            for parent in other.parents:
+            for parent in other.parents.values():
                 self |= parent
             self.append_processor(other)
 
index 5362158472d98f31af7ed15e679910afca433538..9adb1e4227205eeff304370f0c0500cd4aa23923 100644 (file)
@@ -54,12 +54,14 @@ class DisplayAnalyzer(Grapher):
     @interfacedoc
     def post_process(self):
         pipe_result = self.process_pipe.results
-        parent_result = pipe_result.get_result_by_id(self._result_id)
+        parent_uuid = self.parents['analyzer'].uuid()
+        parent_result = pipe_result[parent_uuid][self._result_id]
 
         fg_image = parent_result._render_PIL((self.image_width,
                                               self.image_height), self.dpi)
         if self._background:
-            bg_result = pipe_result.get_result_by_id(self._bg_id)
+            bg_uuid = self.parents['bg_analyzer'].uuid()
+            bg_result = pipe_result[bg_uuid][self._bg_id]
             bg_image = bg_result._render_PIL((self.image_width,
                                               self.image_height), self.dpi)
             # convert image to grayscale
@@ -93,18 +95,20 @@ class DisplayAnalyzer(Grapher):
                     self._background = True
                     bg_analyzer = get_processor('waveform_analyzer')()
                     self._bg_id = bg_analyzer.id()
-                    self.parents.append(bg_analyzer)
+                    self.parents['bg_analyzer'] = bg_analyzer
                 elif background == 'spectrogram':
                     self._background = True
                     bg_analyzer = get_processor('spectrogram_analyzer')()
                     self._bg_id = bg_analyzer.id()
-                    self.parents.append(bg_analyzer)
+                    self.parents['bg_analyzer'] = bg_analyzer
 
                 else:
                     self._background = None
 
-                self.parents.append(analyzer(**analyzer_parameters))
+                parent_analyzer = analyzer(**analyzer_parameters)
+                self.parents['analyzer'] = parent_analyzer
                 # TODO : make it generic when analyzer will be "atomize"
+                self._parent_uuid =  parent_analyzer.uuid()
                 self._result_id = result_id
 
             @staticmethod