def duration(self):
return numpy.zeros(len(self))
+ def _render_plot(self, ax):
+ ax.stem(self.time, self.data)
+
class SegmentObject(EventObject):
_time_mode = 'segment'
class EventValueResult(ValueObject, EventObject, AnalyzerResult):
- def _render_plot(self, ax):
- for time, value in (self.time, self.data):
- ax.axvline(time, ymin=0, ymax=value, color='r')
- # TODO : check value shape !!!
-
+ pass
class EventLabelResult(LabelObject, EventObject, AnalyzerResult):
- def _render_plot(self, ax):
- pass
-
+ pass
class SegmentValueResult(ValueObject, SegmentObject, AnalyzerResult):
def _render_plot(self, ax):
- pass
+ import itertools
+ colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
+ for time, value in (self.time, self.data):
+ ax.axvline(time, ymin=0, ymax=value, color='r')
+ # TODO : check value shape !!!
class SegmentLabelResult(LabelObject, SegmentObject, AnalyzerResult):
def _render_plot(self, ax):
- pass
+ import itertools
+ colors = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])
+ ax_color = {}
+ for key in self.label_metadata.label.keys():
+ ax_color[key] = colors.next()
+ for time, duration, label in zip(self.time, self.duration, self.data):
+ ax.axvspan(time, time+duration, color=ax_color[label], alpha=0.3)
class AnalyzerResultContainer(dict):
>>> a.new_result() #doctest: +ELLIPSIS
FrameValueResult(id_metadata=IdMetadata(id='analyzer', name='Generic analyzer', unit='', description='', date='...', version='...', author='TimeSide', uuid='...'), data_object=DataObject(value=array([], dtype=float64)), audio_metadata=AudioMetadata(uri='...', start=0.0, duration=8.0..., is_segment=False, channels=None, channelsManagement=''), frame_metadata=FrameMetadata(samplerate=44100, blocksize=8192, stepsize=8192), parameters={})
>>> resContainer = timeside.analyzer.core.AnalyzerResultContainer()
-
'''
def name():
return grapher_name
+ __doc__ = """Builds a PIL image representing """ + grapher_name
+
NewGrapher.__name__ = 'Display'+result_id
return NewGrapher
result_id='waveform_analyzer',
grapher_id='grapher_waveform',
grapher_name='Waveform from Analyzer')
+irit4hz = analyzer.IRITSpeech4Hz()
+Display4hzSpeechSegmentation = DisplayAnalyzer.create(analyzer=irit4hz,
+ result_id='irit_speech_4hz.segments',
+ grapher_id='grapher_irit_speech_4hz_segments',
+ grapher_name='Irit 4Hz Speech Segmentation')