From c19561457bca11add5949537bbefe4c0b2fa0f0b Mon Sep 17 00:00:00 2001 From: Thomas Fillon Date: Tue, 16 Sep 2014 16:14:57 +0200 Subject: [PATCH] chore(analyzerResults): Results are now stored in pipe.results as as dictionnary of AnalyzerResults --- doc/source/tutorial/frames_stack.rst | 31 +++---------------- tests/test_analyzer_dc.py | 2 +- tests/test_analyzer_level.py | 2 +- tests/test_analyzers_default.py | 5 +-- tests/test_analyzers_stress.py | 5 +-- tests/test_decoding_stack.py | 15 +++------ timeside/analyzer/aubio/aubio_melenergy.py | 2 +- timeside/analyzer/aubio/aubio_mfcc.py | 2 +- timeside/analyzer/aubio/aubio_pitch.py | 4 +-- timeside/analyzer/aubio/aubio_specdesc.py | 2 +- timeside/analyzer/aubio/aubio_temporal.py | 10 +++--- timeside/analyzer/core.py | 10 +++--- timeside/analyzer/dc.py | 2 +- timeside/analyzer/irit_monopoly.py | 12 ++++--- timeside/analyzer/irit_noise_startSilences.py | 2 +- timeside/analyzer/irit_speech_4hz.py | 6 ++-- timeside/analyzer/irit_speech_entropy.py | 4 +-- timeside/analyzer/level.py | 4 +-- timeside/analyzer/limsi_sad.py | 18 +++++------ timeside/analyzer/odf.py | 11 ++++--- timeside/analyzer/spectrogram.py | 4 ++- timeside/analyzer/vamp_plugin.py | 2 +- timeside/analyzer/waveform.py | 2 +- timeside/analyzer/yaafe.py | 2 +- timeside/core.py | 16 +++++----- timeside/grapher/render_analyzers.py | 14 ++++++--- 26 files changed, 86 insertions(+), 103 deletions(-) diff --git a/doc/source/tutorial/frames_stack.rst b/doc/source/tutorial/frames_stack.rst index c89d49c..b14002f 100644 --- a/doc/source/tutorial/frames_stack.rst +++ b/doc/source/tutorial/frames_stack.rst @@ -20,16 +20,15 @@ Then let's setup a :class:`FileDecoder ` with Setup an arbitrary analyzer to check that decoding process from file and from stack are equivalent: ->>> pitch_on_file = get_processor('aubio_pitch')() ->>> pipe = (decoder | pitch_on_file) +>>> pitch = get_processor('aubio_pitch')() +>>> pipe = (decoder | pitch) >>> print pipe.processors #doctest: +ELLIPSIS -[, ] +[gst_dec-{}, aubio_pitch-{}] -After the pipe has been run, the other processes of the pipe are removed from the pipe and only the :class:`FileDecoder ` is kept : + +Run the pipe: >>> pipe.run() ->>> print pipe.processors #doctest: +ELLIPSIS -[] The processed frames are stored in the pipe attribute `frames_stack` as a list of frames : @@ -47,23 +46,3 @@ Last frame : (array([[...]], dtype=float32), True) If the pipe is used for a second run, the processed frames stored in the stack are passed to the other processors without decoding the audio source again. -Let's define a second analyzer equivalent to the previous one: - ->>> pitch_on_stack = get_processor('aubio_pitch')() - -Add it to the pipe: - ->>> pipe |= pitch_on_stack ->>> print pipe.processors #doctest: +ELLIPSIS -[, ] - -And run the pipe: - ->>> pipe.run() - -Assert that the frames passed to the two analyzers are the same, we check that the results of these analyzers are equivalent: - ->>> np.allclose(pitch_on_file.results.get_result_by_id('aubio_pitch.pitch').data, -... pitch_on_stack.results.get_result_by_id('aubio_pitch.pitch').data) -True - diff --git a/tests/test_analyzer_dc.py b/tests/test_analyzer_dc.py index 7c49e89..c765226 100755 --- a/tests/test_analyzer_dc.py +++ b/tests/test_analyzer_dc.py @@ -29,7 +29,7 @@ class TestAnalyzerDC(unittest.TestCase): (decoder | self.analyzer).run() results = self.analyzer.results for result_id in self.expected.keys(): - result = results.get_result_by_id(result_id) + result = results[result_id] self.assertEquals(result.data_object.value, self.expected[result_id]) diff --git a/tests/test_analyzer_level.py b/tests/test_analyzer_level.py index 9bc56e1..510ef6c 100755 --- a/tests/test_analyzer_level.py +++ b/tests/test_analyzer_level.py @@ -38,7 +38,7 @@ class TestAnalyzerLevel(unittest.TestCase): (decoder | self.analyzer).run() results = self.analyzer.results for result_id in self.expected.keys(): - result = results.get_result_by_id(result_id) + result = results[result_id] self.assertEquals(result.data_object.value, self.expected[result_id]) #print results diff --git a/tests/test_analyzers_default.py b/tests/test_analyzers_default.py index 98455cf..12434c6 100755 --- a/tests/test_analyzers_default.py +++ b/tests/test_analyzers_default.py @@ -23,9 +23,10 @@ class TestAnalyzers_with_default(unittest.TestCase): """Internal function that test if there is NaN in the results of a given analyzer""" - pipe = (self.decoder | analyzer_cls()) + analyzer = analyzer_cls() + pipe = (self.decoder | analyzer) pipe.run() - for key, result in pipe.results.items(): + for key, result in analyzer.results.items(): if 'value' in result.data_object.keys(): # Test for NaN self.assertFalse(np.any(np.isnan(result.data)), diff --git a/tests/test_analyzers_stress.py b/tests/test_analyzers_stress.py index 3bb43be..9669643 100755 --- a/tests/test_analyzers_stress.py +++ b/tests/test_analyzers_stress.py @@ -23,9 +23,10 @@ class TestAnalyzers_with_zeros(unittest.TestCase): """Internal function that test if there is NaN in the results of a given analyzer""" - pipe = (self.decoder | analyzer_cls()) + analyzer = analyzer_cls() + pipe = (self.decoder | analyzer) pipe.run() - for key, result in pipe.results.items(): + for key, result in analyzer.results.items(): if 'value' in result.data_object.keys(): # Test for NaN self.assertFalse(np.any(np.isnan(result.data)), diff --git a/tests/test_decoding_stack.py b/tests/test_decoding_stack.py index 0dc503f..93c45e2 100755 --- a/tests/test_decoding_stack.py +++ b/tests/test_decoding_stack.py @@ -57,30 +57,25 @@ class TestDecodingFromStack(unittest.TestCase): start=self.start, duration=self.duration, stack=True) - level_on_file = Level() - pipe = (decoder | level_on_file) + level = Level() + pipe = (decoder | level) pipe.run() self.assertIsInstance(pipe.frames_stack, list) - results_on_file = pipe.results.get_result_by_id( - 'level.rms').data.copy() + results_on_file = level.results['level.rms'].data.copy() # If the pipe is used for a second run, the processed frames stored # in the stack are passed to the other processors # without decoding the audio source again. - #Let's define a second analyzer equivalent to the previous one: - # Remove level_on_file from pipe - pipe.processors.pop() - level_on_stack = Level() - pipe |= level_on_stack + pipe.results = {} # to be sure the previous results are deleted pipe.run() # to assert that the frames passed to the two analyzers are the same, # we check that the results of these analyzers are equivalent: - results_on_stack = pipe.results.get_result_by_id('level.rms').data + results_on_stack = level.results['level.rms'].data self.assertEqual(results_on_stack, results_on_file) diff --git a/timeside/analyzer/aubio/aubio_melenergy.py b/timeside/analyzer/aubio/aubio_melenergy.py index 9fd7a4c..f271a48 100644 --- a/timeside/analyzer/aubio/aubio_melenergy.py +++ b/timeside/analyzer/aubio/aubio_melenergy.py @@ -80,4 +80,4 @@ class AubioMelEnergy(Analyzer): melenergy.parameters = dict(n_filters=self.n_filters, n_coeffs=self.n_coeffs) melenergy.data_object.value = self.melenergy_results - self.process_pipe.results.add(melenergy) + self.add_result(melenergy) diff --git a/timeside/analyzer/aubio/aubio_mfcc.py b/timeside/analyzer/aubio/aubio_mfcc.py index 584de10..1889efd 100644 --- a/timeside/analyzer/aubio/aubio_mfcc.py +++ b/timeside/analyzer/aubio/aubio_mfcc.py @@ -83,4 +83,4 @@ class AubioMfcc(Analyzer): mfcc.parameters = dict(n_filters=self.n_filters, n_coeffs=self.n_coeffs) mfcc.data_object.value = self.mfcc_results - self.process_pipe.results.add(mfcc) + self.add_result(mfcc) diff --git a/timeside/analyzer/aubio/aubio_pitch.py b/timeside/analyzer/aubio/aubio_pitch.py index 378707b..120e828 100644 --- a/timeside/analyzer/aubio/aubio_pitch.py +++ b/timeside/analyzer/aubio/aubio_pitch.py @@ -107,7 +107,7 @@ class AubioPitch(Analyzer): pitch.id_metadata.name += ' ' + "pitch" pitch.id_metadata.unit = "Hz" pitch.data_object.value = self.pitches - self.process_pipe.results.add(pitch) + self.add_result(pitch) pitch_confidence = self.new_result( data_mode='value', time_mode='framewise') @@ -115,4 +115,4 @@ class AubioPitch(Analyzer): pitch_confidence.id_metadata.name += ' ' + "pitch confidence" pitch_confidence.id_metadata.unit = None pitch_confidence.data_object.value = self.pitch_confidences - self.process_pipe.results.add(pitch_confidence) + self.add_result(pitch_confidence) diff --git a/timeside/analyzer/aubio/aubio_specdesc.py b/timeside/analyzer/aubio/aubio_specdesc.py index f8eedd7..cfe5d90 100644 --- a/timeside/analyzer/aubio/aubio_specdesc.py +++ b/timeside/analyzer/aubio/aubio_specdesc.py @@ -95,4 +95,4 @@ class AubioSpecdesc(Analyzer): res_specdesc.id_metadata.name = ' ' + method res_specdesc.data_object.value = self.specdesc_results[method] - self.process_pipe.results.add(res_specdesc) + self.add_result(res_specdesc) diff --git a/timeside/analyzer/aubio/aubio_temporal.py b/timeside/analyzer/aubio/aubio_temporal.py index 45426ff..7b20ebd 100644 --- a/timeside/analyzer/aubio/aubio_temporal.py +++ b/timeside/analyzer/aubio/aubio_temporal.py @@ -98,7 +98,7 @@ class AubioTemporal(Analyzer): onsets.data_object.label = numpy.ones(len(self.onsets)) onsets.data_object.label_metadata.label = {1: 'Onset'} - self.process_pipe.results.add(onsets) + self.add_result(onsets) #--------------------------------- # Onset Rate: Segment (time, duration, value) @@ -117,7 +117,7 @@ class AubioTemporal(Analyzer): onsetrate.data_object.value = [] onsetrate.data_object.time = [] - self.process_pipe.results.add(onsetrate) + self.add_result(onsetrate) #--------------------------------- # Beats: Event (time, "Beat") @@ -130,7 +130,7 @@ class AubioTemporal(Analyzer): beats.data_object.label = numpy.ones(len(self.beats)) beats.data_object.label_metadata.label = {1: 'Beat'} - self.process_pipe.results.add(beats) + self.add_result(beats) #--------------------------------- # Beat confidences: Event (time, value) @@ -143,7 +143,7 @@ class AubioTemporal(Analyzer): beat_confidences.data_object.time = self.beats beat_confidences.data_object.value = self.beat_confidences - self.process_pipe.results.add(beat_confidences) + self.add_result(beat_confidences) #--------------------------------- # BPM: Segment (time, duration, value) @@ -161,4 +161,4 @@ class AubioTemporal(Analyzer): else: bpm.data_object.value = [] - self.process_pipe.results.add(bpm) + self.add_result(bpm) diff --git a/timeside/analyzer/core.py b/timeside/analyzer/core.py index fe09dbd..3910dbb 100644 --- a/timeside/analyzer/core.py +++ b/timeside/analyzer/core.py @@ -1130,12 +1130,14 @@ class Analyzer(Processor): self.result_blocksize = self.input_blocksize self.result_stepsize = self.input_stepsize + def add_result(self, result): + if not self.uuid() in self.process_pipe.results: + self.process_pipe.results[self.uuid()] = AnalyzerResultContainer() + self.process_pipe.results[self.uuid()][result.id] = result + @property def results(self): - return AnalyzerResultContainer( - [self.process_pipe.results[key] - for key in self.process_pipe.results.keys() - if key.startswith(self.uuid())]) + return self.process_pipe.results[self.uuid()] @staticmethod def id(): diff --git a/timeside/analyzer/dc.py b/timeside/analyzer/dc.py index c7c11ff..1c52391 100644 --- a/timeside/analyzer/dc.py +++ b/timeside/analyzer/dc.py @@ -63,4 +63,4 @@ class MeanDCShift(Analyzer): dc_result = self.new_result(data_mode='value', time_mode='global') dc_result.data_object.value = numpy.round( numpy.mean(100 * self.values), 3) - self.process_pipe.results.add(dc_result) + self.add_result(dc_result) diff --git a/timeside/analyzer/irit_monopoly.py b/timeside/analyzer/irit_monopoly.py index 03fd9ce..2b6c52b 100644 --- a/timeside/analyzer/irit_monopoly.py +++ b/timeside/analyzer/irit_monopoly.py @@ -47,7 +47,7 @@ class IRITMonopoly(Analyzer): self._aubio_pitch_analyzer = AubioPitch(blocksize_s=self.wLen, stepsize_s=self.wStep) - self.parents.append(self._aubio_pitch_analyzer) + self.parents['aubio_pitch'] = self._aubio_pitch_analyzer @interfacedoc def setup(self, channels=None, samplerate=None, @@ -87,8 +87,10 @@ class IRITMonopoly(Analyzer): ''' aubio_res_id = 'aubio_pitch.pitch_confidence' - pipe_results = self.process_pipe.results - pitch_confidences = pipe_results.get_result_by_id(aubio_res_id).data + aubio_uuid = self.parents['aubio_pitch'].uuid() + aubio_results = self.process_pipe.results[aubio_uuid] + + pitch_confidences = aubio_results[aubio_res_id].data nb_frameDecision = int(self.decisionLen / self.wStep) epsilon = numpy.spacing(pitch_confidences[0]) @@ -110,7 +112,7 @@ class IRITMonopoly(Analyzer): conf.id_metadata.name += ' ' + 'Yin Confidence' conf.data_object.value = pitch_confidences - self.process_pipe.results.add(conf) + self.add_result(conf) convert = {False: 0, True: 1} label = {0: 'Poly', 1: 'Mono'} @@ -126,7 +128,7 @@ class IRITMonopoly(Analyzer): segs.data_object.duration = [(float(s[1] - s[0]+1) * self.decisionLen) for s in segList] - self.process_pipe.results.add(segs) + self.add_result(segs) return def monoLikelihood(self, m, v): diff --git a/timeside/analyzer/irit_noise_startSilences.py b/timeside/analyzer/irit_noise_startSilences.py index 6ec93f7..22f54cb 100644 --- a/timeside/analyzer/irit_noise_startSilences.py +++ b/timeside/analyzer/irit_noise_startSilences.py @@ -192,7 +192,7 @@ class IRITStartSeg(Analyzer): for s in segsList] segs.data_object.duration = [(float(s[1] - s[0]) * step) for s in segsList] - self.process_pipe.results.add(segs) + self.add_result(segs) def release(self): self._buffer.close() diff --git a/timeside/analyzer/irit_speech_4hz.py b/timeside/analyzer/irit_speech_4hz.py index 18ad391..a64c4ff 100644 --- a/timeside/analyzer/irit_speech_4hz.py +++ b/timeside/analyzer/irit_speech_4hz.py @@ -145,7 +145,7 @@ class IRITSpeech4Hz(Analyzer): modEnergy.data_object.value = conf - self.process_pipe.results.add(modEnergy) + self.add_result(modEnergy) # Segment convert = {False: 0, True: 1} @@ -175,7 +175,7 @@ class IRITSpeech4Hz(Analyzer): self.samplerate()) for s in segList] - self.process_pipe.results.add(segs) + self.add_result(segs) # Median filter on decision segs = self.new_result(data_mode='label', time_mode='segment') @@ -192,7 +192,7 @@ class IRITSpeech4Hz(Analyzer): self.samplerate()) for s in segList_filt] - self.process_pipe.results.add(segs) + self.add_result(segs) diff --git a/timeside/analyzer/irit_speech_entropy.py b/timeside/analyzer/irit_speech_entropy.py index b33f7bf..4134e54 100644 --- a/timeside/analyzer/irit_speech_entropy.py +++ b/timeside/analyzer/irit_speech_entropy.py @@ -80,7 +80,7 @@ class IRITSpeechEntropy(Analyzer): conf.id_metadata.name += ' ' + 'Confidence' conf.data_object.value = confEntropy - self.process_pipe.results.add(conf) + self.add_result(conf) # Binary Entropy binaryEntropy = modulentropy > self.threshold @@ -105,6 +105,6 @@ class IRITSpeechEntropy(Analyzer): self.samplerate()) for s in segList] - self.process_pipe.results.add(segs) + self.add_result(segs) return diff --git a/timeside/analyzer/level.py b/timeside/analyzer/level.py index 8136ad9..9b4cc4c 100644 --- a/timeside/analyzer/level.py +++ b/timeside/analyzer/level.py @@ -79,7 +79,7 @@ class Level(Analyzer): max_level.data_object.value = np.round( 20 * np.log10(self.max_value), 3) - self.process_pipe.results.add(max_level) + self.add_result(max_level) # RMS level rms_level = self.new_result(data_mode='value', time_mode='global') @@ -92,4 +92,4 @@ class Level(Analyzer): rms_val = MACHINE_EPSILON rms_level.data_object.value = np.round(20 * np.log10(rms_val), 3) - self.process_pipe.results.add(rms_level) + self.add_result(rms_level) diff --git a/timeside/analyzer/limsi_sad.py b/timeside/analyzer/limsi_sad.py index ed7e924..ba18b83 100644 --- a/timeside/analyzer/limsi_sad.py +++ b/timeside/analyzer/limsi_sad.py @@ -83,7 +83,7 @@ class LimsiSad(Analyzer): 'mfccd2: MFCC CepsIgnoreFirstCoeff=0 blockSize=1024 stepSize=256 > Derivate DOrder=2') spec.addFeature('zcr: ZCR blockSize=1024 stepSize=256') parent_analyzer = get_processor('yaafe')(spec) - self.parents.append(parent_analyzer) + self.parents['yaafe'] = parent_analyzer # informative parameters # these are not really taken into account by the system @@ -123,15 +123,11 @@ class LimsiSad(Analyzer): return frames, eod def post_process(self): - yaafe_result = self.process_pipe.results - mfcc = yaafe_result.get_result_by_id( - 'yaafe.mfcc')['data_object']['value'] - mfccd1 = yaafe_result.get_result_by_id( - 'yaafe.mfccd1')['data_object']['value'] - mfccd2 = yaafe_result.get_result_by_id( - 'yaafe.mfccd2')['data_object']['value'] - zcr = yaafe_result.get_result_by_id( - 'yaafe.zcr')['data_object']['value'] + yaafe_result = self.process_pipe.results[self.parents['yaafe'].uuid()] + mfcc = yaafe_result['yaafe.mfcc']['data_object']['value'] + mfccd1 = yaafe_result['yaafe.mfccd1']['data_object']['value'] + mfccd2 = yaafe_result['yaafe.mfccd2']['data_object']['value'] + zcr = yaafe_result['yaafe.zcr']['data_object']['value'] features = np.concatenate((mfcc, mfccd1, mfccd2, zcr), axis=1) @@ -143,4 +139,4 @@ class LimsiSad(Analyzer): sad_result.id_metadata.name += ' ' + \ 'Speech Activity Detection Log Likelihood Difference' sad_result.data_object.value = res - self.process_pipe.results.add(sad_result) + self.add_result(sad_result) diff --git a/timeside/analyzer/odf.py b/timeside/analyzer/odf.py index 1845f85..7d48562 100644 --- a/timeside/analyzer/odf.py +++ b/timeside/analyzer/odf.py @@ -42,8 +42,9 @@ class OnsetDetectionFunction(Analyzer): else: self.input_stepsize = blocksize / 2 - self.parents.append(Spectrogram(blocksize=self.input_blocksize, - stepsize=self.input_stepsize)) + self.parents['spectrogram'] = Spectrogram( + blocksize=self.input_blocksize, + stepsize=self.input_stepsize) @interfacedoc def setup(self, channels=None, samplerate=None, @@ -73,8 +74,8 @@ class OnsetDetectionFunction(Analyzer): #spectrogram = self.parents()[0]['spectrogram_analyzer'].data results = self.process_pipe.results - - spectrogram = results.get_result_by_id('spectrogram_analyzer').data + parent_uuid = self.parents['spectrogram'].uuid() + spectrogram = results[parent_uuid]['spectrogram_analyzer'].data #spectrogram = self.pipe._results[self.parents()[0].id] # Low-pass filtering of the spectrogram amplitude along the time axis @@ -108,4 +109,4 @@ class OnsetDetectionFunction(Analyzer): odf = self.new_result(data_mode='value', time_mode='framewise') #odf.parameters = {'FFT_SIZE': self.FFT_SIZE} odf.data_object.value = odf_diff - self.process_pipe.results.add(odf) + self.add_result(odf) diff --git a/timeside/analyzer/spectrogram.py b/timeside/analyzer/spectrogram.py index cff8574..082e40f 100644 --- a/timeside/analyzer/spectrogram.py +++ b/timeside/analyzer/spectrogram.py @@ -36,6 +36,8 @@ class Spectrogram(Analyzer): # Define Parameters class _Param(HasTraits): FFT_SIZE = Int() + input_blocksize = Int() + input_stepsize = Int() def __init__(self, blocksize=2048, stepsize=None, fft_size=None): super(Spectrogram, self).__init__() @@ -88,4 +90,4 @@ class Spectrogram(Analyzer): spectrogram.data_object.y_value = (np.arange(0, nb_freq) * self.samplerate() / self.FFT_SIZE) - self.process_pipe.results.add(spectrogram) + self.add_result(spectrogram) diff --git a/timeside/analyzer/vamp_plugin.py b/timeside/analyzer/vamp_plugin.py index 2ed2af5..1b32f35 100644 --- a/timeside/analyzer/vamp_plugin.py +++ b/timeside/analyzer/vamp_plugin.py @@ -136,7 +136,7 @@ class VampSimpleHost(Analyzer): plugin_res.id_metadata.name += ' ' + \ ' '.join(plugin_line[1:]) - self.process_pipe.results.add(plugin_res) + self.add_result(plugin_res) @staticmethod def vamp_plugin(plugin, wavfile): diff --git a/timeside/analyzer/waveform.py b/timeside/analyzer/waveform.py index 713fbf7..eb465c9 100644 --- a/timeside/analyzer/waveform.py +++ b/timeside/analyzer/waveform.py @@ -68,4 +68,4 @@ class Waveform(Analyzer): def post_process(self): waveform = self.new_result(data_mode='value', time_mode='framewise') waveform.data_object.value = np.vstack(self.values) - self.process_pipe.results.add(waveform) + self.add_result(waveform) diff --git a/timeside/analyzer/yaafe.py b/timeside/analyzer/yaafe.py index 5106990..5367b72 100644 --- a/timeside/analyzer/yaafe.py +++ b/timeside/analyzer/yaafe.py @@ -120,4 +120,4 @@ class Yaafe(Analyzer): # Store results in Container if len(result.data_object.value): - self.process_pipe.results.add(result) + self.add_result(result) diff --git a/timeside/core.py b/timeside/core.py index 6b43135..9f2e4da 100644 --- a/timeside/core.py +++ b/timeside/core.py @@ -75,7 +75,7 @@ class Processor(Component, HasParam): Attributes: - parents : List of parent Processors that must be processed + parents : Dictionnary of parent Processors that must be processed before the current Processor pipe : The ProcessPipe in which the Processor will run """ @@ -89,7 +89,7 @@ class Processor(Component, HasParam): def __init__(self): super(Processor, self).__init__() - self.parents = [] + self.parents = {} self.source_mediainfo = None self.process_pipe = None self.UUID = uuid.uuid4() @@ -175,7 +175,7 @@ class Processor(Component, HasParam): self.get_parameters() == other.get_parameters()) def __repr__(self): - return self.id() + '\n' + self.get_parameters() + return '-'.join([self.id(), self.get_parameters()]) class FixedSizeInputAdapter(object): @@ -275,7 +275,8 @@ class ProcessPipe(object): Attributes: processor: List of all processors in the Process pipe - results : Results Container for all the analyzers of the Pipe process + results : Dictionnary of Results Container from all the analyzers + in the Pipe process """ def __init__(self, *others): @@ -287,8 +288,7 @@ class ProcessPipe(object): self |= others - from timeside.analyzer.core import AnalyzerResultContainer - self.results = AnalyzerResultContainer() + self.results = {} def append_processor(self, proc, source_proc=None): "Append a new processor to the pipe" @@ -319,7 +319,7 @@ class ProcessPipe(object): type='audio_source') proc.process_pipe = self # Add an edge between each parent and proc - for parent in proc.parents: + for parent in proc.parents.values(): self._graph.add_edge(parent.uuid(), proc.uuid(), type='data_source') @@ -363,7 +363,7 @@ class ProcessPipe(object): def __ior__(self, other): if isinstance(other, Processor): - for parent in other.parents: + for parent in other.parents.values(): self |= parent self.append_processor(other) diff --git a/timeside/grapher/render_analyzers.py b/timeside/grapher/render_analyzers.py index 5362158..9adb1e4 100644 --- a/timeside/grapher/render_analyzers.py +++ b/timeside/grapher/render_analyzers.py @@ -54,12 +54,14 @@ class DisplayAnalyzer(Grapher): @interfacedoc def post_process(self): pipe_result = self.process_pipe.results - parent_result = pipe_result.get_result_by_id(self._result_id) + parent_uuid = self.parents['analyzer'].uuid() + parent_result = pipe_result[parent_uuid][self._result_id] fg_image = parent_result._render_PIL((self.image_width, self.image_height), self.dpi) if self._background: - bg_result = pipe_result.get_result_by_id(self._bg_id) + bg_uuid = self.parents['bg_analyzer'].uuid() + bg_result = pipe_result[bg_uuid][self._bg_id] bg_image = bg_result._render_PIL((self.image_width, self.image_height), self.dpi) # convert image to grayscale @@ -93,18 +95,20 @@ class DisplayAnalyzer(Grapher): self._background = True bg_analyzer = get_processor('waveform_analyzer')() self._bg_id = bg_analyzer.id() - self.parents.append(bg_analyzer) + self.parents['bg_analyzer'] = bg_analyzer elif background == 'spectrogram': self._background = True bg_analyzer = get_processor('spectrogram_analyzer')() self._bg_id = bg_analyzer.id() - self.parents.append(bg_analyzer) + self.parents['bg_analyzer'] = bg_analyzer else: self._background = None - self.parents.append(analyzer(**analyzer_parameters)) + parent_analyzer = analyzer(**analyzer_parameters) + self.parents['analyzer'] = parent_analyzer # TODO : make it generic when analyzer will be "atomize" + self._parent_uuid = parent_analyzer.uuid() self._result_id = result_id @staticmethod -- 2.39.5