From 532bccaaa0d05d562ec91935d3408819bc4fb06c Mon Sep 17 00:00:00 2001 From: Guillaume Pellerin Date: Mon, 26 Jan 2015 00:38:22 +0100 Subject: [PATCH] fix dive in doctests --- README.rst | 18 +-- doc/make_readme.sh | 3 +- doc/source/intro.rst | 10 +- timeside/grapher/render_analyzers.py | 228 --------------------------- 4 files changed, 12 insertions(+), 247 deletions(-) delete mode 100644 timeside/grapher/render_analyzers.py diff --git a/README.rst b/README.rst index 95c9a34..f0bface 100644 --- a/README.rst +++ b/README.rst @@ -58,23 +58,19 @@ The streaming architecture of TimeSide relies on 2 main parts: a processing engi Dive in ======== -To list all available plugins: +To list all available plugins:: -.. testcleanup:: import os os.remove('sweep.ogg') os.remove('waveform.png') -.. doctest:: >>> import timeside - >>> timeside.core.list_processors() # doctest: +SKIP -Define some processors: +Define some processors:: -.. doctest:: >>> from timeside.core import get_processor >>> from timeside.core.tools.test_samples import samples @@ -84,23 +80,19 @@ Define some processors: >>> analyzer = get_processor('level')() >>> encoder = get_processor('vorbis_encoder')('sweep.ogg') -Then run the *magic* pipeline: +Then run the *magic* pipeline:: -.. doctest:: >>> (decoder | grapher | analyzer | encoder).run() -Render the grapher results: +Render the grapher results:: -.. doctest:: >>> grapher.render(output='waveform.png') -Show the analyzer results: +Show the analyzer results:: -.. doctest:: - >>> print 'Level:', analyzer.results # doctest: +ELLIPSIS Level: {'level.max': AnalyzerResult(...)} diff --git a/doc/make_readme.sh b/doc/make_readme.sh index 6e2afa6..7932716 100755 --- a/doc/make_readme.sh +++ b/doc/make_readme.sh @@ -1,4 +1,5 @@ #/bin/sh -cat source/intro.rst source/news.rst source/processors.rst source/doc.rst source/install.rst source/ui.rst source/dev.rst source/related.rst source/copyright.rst > ../README.rst +sed '/doctest/d' source/intro.rst | sed '/testcleanup/d' > ../README.rst +cat source/news.rst source/processors.rst source/doc.rst source/install.rst source/ui.rst source/dev.rst source/related.rst source/copyright.rst >> ../README.rst diff --git a/doc/source/intro.rst b/doc/source/intro.rst index 1ac68ea..3a82d78 100644 --- a/doc/source/intro.rst +++ b/doc/source/intro.rst @@ -58,7 +58,7 @@ The streaming architecture of TimeSide relies on 2 main parts: a processing engi Dive in ======== -To list all available plugins: +To list all available plugins:: .. testcleanup:: @@ -72,7 +72,7 @@ To list all available plugins: >>> timeside.core.list_processors() # doctest: +SKIP -Define some processors: +Define some processors:: .. doctest:: @@ -84,19 +84,19 @@ Define some processors: >>> analyzer = get_processor('level')() >>> encoder = get_processor('vorbis_encoder')('sweep.ogg') -Then run the *magic* pipeline: +Then run the *magic* pipeline:: .. doctest:: >>> (decoder | grapher | analyzer | encoder).run() -Render the grapher results: +Render the grapher results:: .. doctest:: >>> grapher.render(output='waveform.png') -Show the analyzer results: +Show the analyzer results:: .. doctest:: diff --git a/timeside/grapher/render_analyzers.py b/timeside/grapher/render_analyzers.py deleted file mode 100644 index eec5eea..0000000 --- a/timeside/grapher/render_analyzers.py +++ /dev/null @@ -1,228 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2007-2014 Guillaume Pellerin -# Copyright (c) 2013-2014 Thomas Fillon - -# This file is part of TimeSide. - -# TimeSide is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. - -# TimeSide is distributed in the hope that it will be useful, -# but _WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. - -# You should have received a copy of the GNU General Public License -# along with TimeSide. If not, see . - -from __future__ import division - -from ..core import implements, interfacedoc, abstract, get_processor -from ..api import IGrapher -from .core import Grapher -from ..exceptions import PIDError - - -class DisplayAnalyzer(Grapher): - - """ - image from analyzer result - This is an Abstract base class - """ - dpi = 72 # Web default value for Telemeta - - implements(IGrapher) - abstract() - - @interfacedoc - def __init__(self, width=1024, height=256, bg_color=(0, 0, 0), - color_scheme='default'): - super(DisplayAnalyzer, self).__init__(width, height, bg_color, - color_scheme) - - self._result_id = None - self._id = NotImplemented - self._name = NotImplemented - - @interfacedoc - def process(self, frames, eod=False): - return frames, eod - - @interfacedoc - def post_process(self): - pipe_result = self.process_pipe.results - analyzer_uuid = self.parents['analyzer'].uuid() - analyzer_result = pipe_result[analyzer_uuid][self._result_id] - - fg_image = analyzer_result._render_PIL((self.image_width, - self.image_height), self.dpi) - if self._background: - bg_uuid = self.parents['bg_analyzer'].uuid() - bg_result = pipe_result[bg_uuid][self._bg_id] - bg_image = bg_result._render_PIL((self.image_width, - self.image_height), self.dpi) - # convert image to grayscale - bg_image = bg_image.convert('LA').convert('RGBA') - - # Merge background and foreground images - from PIL.Image import blend - fg_image = blend(fg_image, bg_image, 0.15) - - self.image = fg_image - - @classmethod - def create(cls, analyzer, analyzer_parameters={}, result_id=None, - grapher_id=None, grapher_name=None, - background=None, staging=False): - - class NewGrapher(cls): - - _id = grapher_id - _staging = staging - - implements(IGrapher) - - @interfacedoc - def __init__(self, width=1024, height=256, bg_color=(0, 0, 0), - color_scheme='default'): - super(NewGrapher, self).__init__(width, height, bg_color, - color_scheme) - - # Add a parent waveform analyzer - if background == 'waveform': - self._background = True - bg_analyzer = get_processor('waveform_analyzer')() - self._bg_id = bg_analyzer.id() - self.parents['bg_analyzer'] = bg_analyzer - elif background == 'spectrogram': - self._background = True - bg_analyzer = get_processor('spectrogram_analyzer')() - self._bg_id = bg_analyzer.id() - self.parents['bg_analyzer'] = bg_analyzer - - else: - self._background = None - - parent_analyzer = analyzer(**analyzer_parameters) - self.parents['analyzer'] = parent_analyzer - self._result_id = result_id - - @staticmethod - @interfacedoc - def id(): - return grapher_id - - @staticmethod - @interfacedoc - def name(): - return grapher_name - - __doc__ = """Image representing """ + grapher_name - - NewGrapher.__name__ = 'Display' + '.' + result_id - - return NewGrapher - -#------------------------------------------------- -# From here define new Graphers based on Analyzers -#------------------------------------------------- - -# Aubio Pitch -try: # because of the dependencies on the Aubio librairy - aubiopitch = get_processor('aubio_pitch') - DisplayAubioPitch = DisplayAnalyzer.create( - analyzer=aubiopitch, - result_id='aubio_pitch.pitch', - grapher_id='grapher_aubio_pitch', - grapher_name='Pitch', - background='spectrogram') -except PIDError: - pass - -# Onset Detection Function -odf = get_processor('onset_detection_function') -DisplayOnsetDetectionFunction = DisplayAnalyzer.create( - analyzer=odf, - result_id='onset_detection_function', - grapher_id='grapher_onset_detection_function', - grapher_name='Onset detection') - -# Waveform -wav = get_processor('waveform_analyzer') -DisplayWaveform = DisplayAnalyzer.create(analyzer=wav, - result_id='waveform_analyzer', - grapher_id='grapher_waveform', - grapher_name='Waveform from Analyzer', - staging=True) - -# IRIT 4Hz -irit4hz = get_processor('irit_speech_4hz') -Display4hzSpeechSegmentation = DisplayAnalyzer.create( - analyzer=irit4hz, - result_id='irit_speech_4hz.segments', - grapher_id='grapher_irit_speech_4hz_segments', - grapher_name='Speech segmentation', - background='waveform', - staging=True) - - -# IRIT 4Hz with median filter -irit4hz = get_processor('irit_speech_4hz') -Display4hzSpeechSegmentation = DisplayAnalyzer.create( - analyzer=irit4hz, - result_id='irit_speech_4hz.segments_median', - grapher_id='grapher_irit_speech_4hz_segments_median', - grapher_name='Speech segmentation (median)', - background='waveform', - staging=True) - -# IRIT Monopoly -try: # because of the dependencies on Aubio Pitch - iritmonopoly = get_processor('irit_monopoly') - DisplayMonopoly = DisplayAnalyzer.create( - analyzer=iritmonopoly, - result_id='irit_monopoly.segments', - grapher_id='grapher_monopoly_segments', - grapher_name='Mono/Poly segmentation', - background='waveform', - staging=True) -except PIDError: - pass - -# Limsi SAD : 2 models -try: - limsi_sad = get_processor('limsi_sad') - - DisplayLIMSI_SAD_etape = DisplayAnalyzer.create( - analyzer=limsi_sad, - analyzer_parameters={'sad_model': 'etape'}, - result_id='limsi_sad.sad_lhh_diff', - grapher_id='grapher_limsi_sad_etape', - grapher_name='Speech activity (ETAPE)', - background='waveform', - staging=True) - - DisplayLIMSI_SAD_maya = DisplayAnalyzer.create( - analyzer=limsi_sad, - analyzer_parameters={'sad_model': 'maya'}, - result_id='limsi_sad.sad_lhh_diff', - grapher_id='grapher_limsi_sad_maya', - grapher_name='Speech activity (Mayan)', - background='waveform', - staging=True) - -except PIDError: - pass - -# IRIT Start Seg -irit_startseg = get_processor('irit_startseg') -DisplayIRIT_Start = DisplayAnalyzer.create( - analyzer=irit_startseg, - result_id='irit_startseg.segments', - grapher_id='grapher_irit_startseg', - grapher_name='Analogous start point', - background='waveform', - staging=True) -- 2.39.5