>>> for key,value in res.items():
... print '%s : %s' % (key, value)
...
- id_metadata : {'description': '', 'author': '', 'uuid': '', 'version': '', 'date': '', 'id': '', 'unit': '', 'name': ''}
+ id_metadata : {'description': '', 'author': '', 'res_uuid': '', 'version': '', 'date': '', 'proc_uuid': '', 'id': '', 'unit': '', 'name': ''}
- data_object : {'value': array([], dtype=float64)}
+ data_object : {'y_value': array([], dtype=float64), 'value': array([], dtype=float64), 'frame_metadata': {'blocksize': None, 'samplerate': None, 'stepsize': None}}
audio_metadata : {'sha1': '', 'is_segment': None, 'uri': '', 'channels': None, 'start': 0, 'channelsManagement': '', 'duration': None}
- frame_metadata : {'blocksize': None, 'samplerate': None, 'stepsize': None}
parameters : {}
>>> encoders = get_processor('gst_mp3_enc')('sweep.mp3') | get_processor('gst_flac_enc')('sweep.flac')
>>> (decoder | levels | encoders).run()
>>> print levels.results
- {'...': GlobalValueResult(id_metadata=IdMetadata(id='level.max', name='Level Max', unit='dBFS', description='', date='...', version='...', author='TimeSide', proc_uuid='...', res_uuid='...'), data_object=DataObject(value=array([-6.021])), audio_metadata=AudioMetadata(uri='file://...sweep.wav', start=0.0, duration=8.0, is_segment=False, sha1='...', channels=2, channelsManagement=''), parameters={}), '...': GlobalValueResult(id_metadata=IdMetadata(id='level.rms', name='Level RMS', unit='dBFS', description='', date='...', version='...', author='TimeSide', proc_uuid='...', res_uuid='...'), data_object=DataObject(value=array([-9.856])), audio_metadata=AudioMetadata(uri='file://...sweep.wav', start=0.0, duration=8.0, is_segment=False, sha1='...', channels=2, channelsManagement=''), parameters={})}
- {'level.max': AnalyzerResult(id_metadata=IdMetadata(id='level.max', name='Level Max', unit='dBFS', description='', date='...', version='...', author='TimeSide', uuid='...'), data_object=GlobalValueObject(value=array([-6.021]), y_value=array([], dtype=float64)), audio_metadata=AudioMetadata(uri='file:///.../sweep.wav', start=0.0, duration=8.0, is_segment=False, sha1='...', channels=2, channelsManagement=''), parameters={}), 'level.rms': AnalyzerResult(id_metadata=IdMetadata(id='level.rms', name='Level RMS', unit='dBFS', description='', date='...', version='...', author='TimeSide', uuid=...'), data_object=GlobalValueObject(value=array([-9.856]), y_value=array([], dtype=float64)), audio_metadata=AudioMetadata(uri='file:///.../sweep.wav', start=0.0, duration=8.0, is_segment=False, sha1='...', channels=2, channelsManagement=''), parameters={})}
++ {'...': GlobalValueResult(id_metadata=IdMetadata(id='level.max', name='Level Max', unit='dBFS', description='', date='...', version='...', author='TimeSide', proc_uuid='...', res_uuid='...'), data_object=DataObject(value=array([-6.021])), audio_metadata=AudioMetadata(uri='file://...sweep.wav', start=0.0, duration=8.0, is_segment=False, sha1='...', channels=2, channelsManagement=''), parameters={}), '...': GlobalValueResult(id_metadata=IdMetadata(id='level.rms', name='Level RMS', unit='dBFS', description='', date='...', version='...', author='TimeSide', proc_uuid='...', res_uuid='...'), data_object=GlobalValueObject(value=array([-9.856]), y_value=array([], dtype=float64)), audio_metadata=AudioMetadata(uri='file://...sweep.wav', start=0.0, duration=8.0, is_segment=False, sha1='...', channels=2, channelsManagement=''), parameters={})}
++
>>> a = Analyzer()
>>> (d|a).run()
>>> a.new_result() #doctest: +ELLIPSIS
- FrameValueResult(id_metadata=IdMetadata(id='analyzer', name='Generic analyzer', unit='', description='', date='...', version='...', author='TimeSide', proc_uuid='...', res_uuid='...'), data_object=DataObject(value=array([], dtype=float64)), audio_metadata=AudioMetadata(uri='...', start=0.0, duration=8.0..., is_segment=False, sha1='...', channels=2, channelsManagement=''), frame_metadata=FrameMetadata(samplerate=44100, blocksize=8192, stepsize=8192), parameters={})
- AnalyzerResult(id_metadata=IdMetadata(id='analyzer', name='Generic analyzer', unit='', description='', date='...', version='...', author='TimeSide', uuid='...'), data_object=FrameValueObject(value=array([], dtype=float64), y_value=array([], dtype=float64), frame_metadata=FrameMetadata(samplerate=44100, blocksize=8192, stepsize=8192)), audio_metadata=AudioMetadata(uri='...', start=0.0, duration=8.0..., is_segment=False, sha1='...', channels=2, channelsManagement=''), parameters={})
++ FrameValueResult(id_metadata=IdMetadata(id='analyzer', name='Generic analyzer', unit='', description='', date='...', version='...', author='TimeSide', proc_uuid='...', res_uuid='...'), data_object=FrameValueObject(value=array([], dtype=float64), y_value=array([], dtype=float64), audio_metadata=AudioMetadata(uri='...', start=0.0, duration=8.0..., is_segment=False, sha1='...', channels=2, channelsManagement=''), frame_metadata=FrameMetadata(samplerate=44100, blocksize=8192, stepsize=8192), parameters={})
>>> resContainer = timeside.analyzer.core.AnalyzerResultContainer()
'''
@interfacedoc
def post_process(self):
-
- parent_result = self.process_pipe.results[self._result_id]
+ pipe_result = self.process_pipe.results
+ parent_result = pipe_result.get_result_by_id(self._result_id)
- self.image = parent_result._render_PIL((self.image_width,
- self.image_height), self.dpi)
+ fg_image = parent_result._render_PIL((self.image_width,
+ self.image_height), self.dpi)
+ if self._background:
+ bg_result = self.process_pipe.results[self._bg_id]
+ bg_image = bg_result._render_PIL((self.image_width,
+ self.image_height), self.dpi)
+ # convert image to grayscale
+ bg_image = bg_image.convert('LA').convert('RGBA')
+
+ # Merge background and foreground images
+ from PIL.Image import blend
+ fg_image = blend(fg_image, bg_image, 0.25)
+
+ self.image = fg_image
@classmethod
- def create(cls, analyzer, result_id, grapher_id, grapher_name):
+ def create(cls, analyzer, analyzer_parameters={}, result_id=None,
+ grapher_id=None, grapher_name=None,
+ background=None):
class NewGrapher(cls):