- The frames can be stored in the `ProcessPipe` frames_stack attribute if the pipe is run with argument 'stack=True' (default stack=False)
- The ProcessPipe is now linked to its processors through the processor self.process_pipe attribute (this avoid confusion with `decoder.pipe` or `decoder.pipeline` (-> reflect this change in all analyzers)
- After the process, during the release every processors of the pipe are removed from the pipe (except the decoder)
+++ /dev/null
-.. This file is part of TimeSide
- @author: Thomas Fillon
-
-===============================================
- Running a pipe with previously decoded frames
-===============================================
-
-Example of use of the :class:`ArrayDecoder <timeside.decoder.core.ArrayDecoder>` and :class:`Waveform analyzer <timeside.analyzer.waveform.Waveform>` to run a pipe with previously frames from memory on a second pass
-
-First, setup a :class:`FileDecoder <timeside.decoder.core.FileDecoder>` on an audio file:
-
->>> import timeside
->>> import numpy as np
->>>
->>> audio_file = 'http://github.com/yomguy/timeside-samples/raw/master/samples/sweep.mp3'
->>>
->>> file_decoder = timeside.decoder.FileDecoder(audio_file)
-
-Then, setup an arbitrary analyzer to check that both decoding process are equivalent and a :class:`Waveform analyzer <timeside.analyzer.waveform.Waveform>` which result will store the decoded frames:
-
->>> pitch_on_file = timeside.analyzer.AubioPitch()
->>> waveform = timeside.analyzer.Waveform()
-
-And run the pipe:
-
->>> (file_decoder | pitch_on_file | waveform).run()
-
-To run the second pass, we need to get back the decoded samples and the original samplerate and pass them to :class:`ArrayDecoder <timeside.decoder.core.ArrayDecoder>`:
-
->>> samples = waveform.results['waveform_analyzer'].data
->>> samplerate = waveform.results['waveform_analyzer'].frame_metadata.samplerate
->>> array_decoder = timeside.decoder.ArrayDecoder(samples=samples, samplerate=samplerate)
-
-Then we can run a second pipe with the previously decoded frames and pass the frames to the same analyzer:
-
->>> pitch_on_array = timeside.analyzer.AubioPitch()
->>> (array_decoder | pitch_on_array).run()
-
-To assert that the frames passed to the two analyzers are the same, we check that the results of these analyzers are equivalent:
-
->>> np.allclose(pitch_on_file.results['aubio_pitch.pitch'].data,
-... pitch_on_array.results['aubio_pitch.pitch'].data)
-True
-
--- /dev/null
+.. This file is part of TimeSide
+ @author: Thomas Fillon
+
+===============================================
+ Running a pipe with previously decoded frames
+===============================================
+
+Example of use of the `stack` option in :func:`timeside.core.ProcessPipe.run` to run a pipe with previously decoded frames stacked in memory on a second pass.
+
+>>> import timeside
+>>> import numpy as np
+>>> audio_file = 'http://github.com/yomguy/timeside-samples/raw/master/samples/sweep.mp3'
+>>> decoder = timeside.decoder.FileDecoder(audio_file)
+
+Setup an arbitrary analyzer to check that decoding process from file and from stack are equivalent:
+
+>>> pitch_on_file = timeside.analyzer.AubioPitch()
+>>> myPipe = (decoder | pitch_on_file)
+>>> print myPipe.processors #doctest: +ELLIPSIS
+[<timeside.decoder.core.FileDecoder object at 0x...>, <timeside.analyzer.aubio_pitch.AubioPitch object at 0x...>]
+
+If the pipe is run with the default argument `stack=False`, the other processes of the pipe are released from the pipe after the run and only the :class:`fileDecoder <timeside.decoder.core.FileDecoder>` is kept in the pipe:
+
+>>> myPipe.run()
+>>> print myPipe.processors #doctest: +ELLIPSIS
+[<timeside.decoder.core.FileDecoder object at 0x...>]
+
+
+If the pipe is run with the argument `stack=True`, the processed frames are stored in the pipe attribute `frames_stack`.
+The other processes of the pipe are also released from the pipe after the run but the :class:`fileDecoder <timeside.decoder.core.FileDecoder>` is replaced by an :class:`ArrayDecoder <timeside.decoder.core.ArrayDecoder>`:
+
+>>> myPipe = (decoder | pitch_on_file)
+>>> myPipe.run(stack=True)
+>>> print myPipe.processors #doctest: +ELLIPSIS
+[<timeside.decoder.core.ArrayDecoder object at 0x...>]
+
+The stack
+
+>>> myPipe.frames_stack #doctest: +ELLIPSIS
+array([[...]], dtype=float32)
+
+
+Then we can run a second pipe with the previously decoded frames and pass the frames to the same analyzer.
+
+Define a second analyzer equivalent to the previous one:
+
+>>> pitch_on_stack = timeside.analyzer.AubioPitch()
+
+Add it to the pipe:
+
+>>> myPipe |= pitch_on_stack
+>>> print myPipe.processors #doctest: +ELLIPSIS
+[<timeside.decoder.core.ArrayDecoder object at 0x...>, <timeside.analyzer.aubio_pitch.AubioPitch object at 0x...>]
+
+
+Run the pipe:
+
+>>> myPipe.run()
+
+Assert that the frames passed to the two analyzers are the same, we check that the results of these analyzers are equivalent:
+
+>>> np.allclose(pitch_on_file.results['aubio_pitch.pitch'].data,
+... pitch_on_stack.results['aubio_pitch.pitch'].data)
+True
+
Quick start <quick_start>
Usage of AnalyzerResult <AnalyzerResult>
- Running a pipe with previously decoded frames <ArrayDecoder>
+ Running a pipe with previously decoded frames <frames_stack>
self.result.as_dict().keys())
if __name__ == '__main__':
- unittest.main(testRunner=TestRunner())
\ No newline at end of file
+ unittest.main(testRunner=TestRunner())
melenergy.parameters = dict(n_filters=self.n_filters,
n_coeffs=self.n_coeffs)
melenergy.data_object.value = self.melenergy_results
- self.pipe.results.add(melenergy)
+ self.process_pipe.results.add(melenergy)
mfcc.parameters = dict(n_filters=self.n_filters,
n_coeffs=self.n_coeffs)
mfcc.data_object.value = self.mfcc_results
- self.pipe.results.add(mfcc)
+ self.process_pipe.results.add(mfcc)
pitch.id_metadata.name += ' ' + "pitch"
pitch.id_metadata.unit = "Hz"
pitch.data_object.value = self.pitches
- self.pipe.results.add(pitch)
+ self.process_pipe.results.add(pitch)
pitch_confidence = self.new_result(data_mode='value', time_mode='framewise')
pitch_confidence.id_metadata.id += '.' + "pitch_confidence"
pitch_confidence.id_metadata.name += ' ' + "pitch confidence"
pitch_confidence.id_metadata.unit = None
pitch_confidence.data_object.value = self.pitch_confidences
- self.pipe.results.add(pitch_confidence)
+ self.process_pipe.results.add(pitch_confidence)
res_specdesc.id_metadata.name = ' ' + method
res_specdesc.data_object.value = self.specdesc_results[method]
- self.pipe.results.add(res_specdesc)
+ self.process_pipe.results.add(res_specdesc)
onsets.data_object.label = numpy.ones(len(self.onsets))
onsets.label_metadata.label = {1: 'Onset'}
- self.pipe.results.add(onsets)
+ self.process_pipe.results.add(onsets)
#---------------------------------
# Onset Rate: Segment (time, duration, value)
onsetrate.data_object.value = []
onsetrate.data_object.time = []
- self.pipe.results.add(onsetrate)
+ self.process_pipe.results.add(onsetrate)
#---------------------------------
# Beats: Event (time, "Beat")
beats.data_object.label = numpy.ones(len(self.beats))
beats.label_metadata.label = {1: 'Beat'}
- self.pipe.results.add(beats)
+ self.process_pipe.results.add(beats)
#---------------------------------
# Beat confidences: Event (time, value)
beat_confidences.data_object.time = self.beats
beat_confidences.data_object.value = self.beat_confidences
- self.pipe.results.add(beat_confidences)
+ self.process_pipe.results.add(beat_confidences)
#---------------------------------
# BPM: Segment (time, duration, value)
else:
bpm.data_object.value = []
- self.pipe.results.add(bpm)
+ self.process_pipe.results.add(bpm)
def results(self):
return AnalyzerResultContainer(
- [self.pipe.results[key] for key in self.pipe.results.keys()
+ [self.process_pipe.results[key] for key in self.process_pipe.results.keys()
if key.split('.')[0] == self.id()])
@staticmethod
dc_result = self.new_result(data_mode='value', time_mode='global')
dc_result.data_object.value = numpy.round(
numpy.mean(100 * self.values), 3)
- self.pipe.results.add(dc_result)
+ self.process_pipe.results.add(dc_result)
max_level.id_metadata.name += ' ' + "Max"
max_level.data_object.value = np.round(20*np.log10(self.max_value), 3)
- self.pipe.results.add(max_level)
+ self.process_pipe.results.add(max_level)
# RMS level
rms_level = self.new_result(data_mode='value', time_mode='global')
rms_level.data_object.value = np.round(20*np.log10(
np.sqrt(np.mean(self.mean_values))), 3)
- self.pipe.results.add(rms_level)
+ self.process_pipe.results.add(rms_level)
def post_process(self):
#spectrogram = self.parents()[0]['spectrogram_analyzer'].data
- spectrogram = self.pipe.results['spectrogram_analyzer'].data
+ spectrogram = self.process_pipe.results['spectrogram_analyzer'].data
#spectrogram = self.pipe._results[self.parents()[0].id]
# Low-pass filtering of the spectrogram amplitude along the time axis
odf = self.new_result(data_mode='value', time_mode='framewise')
#odf.parameters = {'FFT_SIZE': self.FFT_SIZE}
odf.data_object.value = odf_diff
- self.pipe.results.add(odf)
+ self.process_pipe.results.add(odf)
spectrogram = self.new_result(data_mode='value', time_mode='framewise')
spectrogram.parameters = {'FFT_SIZE': self.FFT_SIZE}
spectrogram.data_object.value = self.values
- self.pipe.results.add(spectrogram)
+ self.process_pipe.results.add(spectrogram)
plugin_res.id_metadata.name += ' ' + \
' '.join(plugin_line[1:])
- self.pipe.results.add(plugin_res)
+ self.process_pipe.results.add(plugin_res)
@staticmethod
def vamp_plugin(plugin, wavfile):
def post_process(self):
waveform = self.new_result(data_mode='value', time_mode='framewise')
waveform.data_object.value = np.vstack(self.values)
- self.pipe.results.add(waveform)
+ self.process_pipe.results.add(waveform)
result.data_object.value = self.yaafe_engine.readOutput(featName)
# Store results in Container
if len(result.data_object.value):
- self.pipe.results.add(result)
+ self.process_pipe.results.add(result)
from timeside.component import *
from timeside.api import IProcessor
from timeside.exceptions import Error, ApiError
+
+
import re
import time
import numpy
from timeside.analyzer.core import AnalyzerResultContainer
self.results = AnalyzerResultContainer()
- for proc in self.processors:
- proc.pipe = self
-
def __or__(self, other):
return ProcessPipe(self, other)
def __ior__(self, other):
if isinstance(other, Processor):
- parents = other.parents
- for parent in parents:
+ for parent in other.parents:
self |= parent
self.processors.append(other)
+ other.process_pipe = self
elif isinstance(other, ProcessPipe):
self.processors.extend(other.processors)
else:
pipe += ' | '
return pipe
- def run(self, channels = None, samplerate = None, blocksize = None):
+ def run(self, channels=None, samplerate=None, blocksize=None, stack=None):
"""Setup/reset all processors in cascade and stream audio data along
the pipe. Also returns the pipe itself."""
source = self.processors[0]
items = self.processors[1:]
- source.setup(channels = channels, samplerate = samplerate, blocksize = blocksize)
+ source.setup(channels=channels, samplerate=samplerate,
+ blocksize=blocksize)
+
+ if stack is None:
+ self.stack = False
+ else:
+ self.stack = stack
+
+ if self.stack:
+ self.frames_stack = []
last = source
eod = False
while not eod:
frames, eod = source.process()
+ if self.stack:
+ self.frames_stack.append(frames)
for item in items:
frames, eod = item.process(frames, eod)
+ # Post-processing
for item in items:
item.post_process()
+ # Release processors
+ if self.stack:
+ if not isinstance(self.frames_stack, numpy.ndarray):
+ self.frames_stack = numpy.vstack(self.frames_stack)
+ from timeside.decoder.core import ArrayDecoder
+ new_source = ArrayDecoder(samples=self.frames_stack,
+ samplerate=source.samplerate())
+ new_source.setup(channels=source.channels(),
+ samplerate=source.samplerate(),
+ blocksize=source.blocksize())
+ self.processors[0] = new_source
+
for item in items:
item.release()
+ self.processors.remove(item)
def id():
return "gst_dec"
- def __init__(self, uri, start = 0, duration = None):
+ def __init__(self, uri, start=0, duration=None):
"""
Construct a new FileDecoder
from tests import test_decoding, test_array_decoding
run_test_module([test_decoding, test_array_decoding])
-