From 72b7a182d3a30f015f9990c864a52280b560a126 Mon Sep 17 00:00:00 2001 From: yomguy Date: Thu, 2 Sep 2010 20:42:49 +0000 Subject: [PATCH] fix joy grapher, *CHANGE API* so that the output for the grapher is given to the save() method, see api.py --- setup.py | 2 +- timeside/__init__.py | 5 +- timeside/api.py | 4 +- timeside/component.py | 22 +++--- timeside/grapher/__init__.py | 2 +- timeside/grapher/core.py | 117 +++-------------------------- timeside/grapher/spectrogram.py | 13 ++-- timeside/grapher/waveform.py | 11 ++- timeside/grapher/waveform_joy.py | 123 +++++++++++++++++++++++++++++-- timeside/tools/waveform_batch.py | 8 +- 10 files changed, 159 insertions(+), 148 deletions(-) diff --git a/setup.py b/setup.py index 406403a..280ce18 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ '''The setup and build script for the python-twitter library.''' __author__ = 'yomguy@parisson.com' -__version__ = '0.1-beta' +__version__ = '0.2' # The base package metadata to be used by both distutils and setuptools diff --git a/timeside/__init__.py b/timeside/__init__.py index 59bd36d..37fe08f 100644 --- a/timeside/__init__.py +++ b/timeside/__init__.py @@ -9,7 +9,8 @@ import timeside.decoder import timeside.encoder import timeside.grapher import timeside.analyzer -import timeside.tests -from timeside.core import * +#import timeside.tests + + diff --git a/timeside/api.py b/timeside/api.py index 7ceaecf..edd68b5 100644 --- a/timeside/api.py +++ b/timeside/api.py @@ -164,9 +164,9 @@ class IGrapher(IProcessor): """Set the colors used for image generation. background is a RGB tuple, and scheme a a predefined color theme name""" - def render(self): + def render(self, output=None): """Return a PIL Image object visually representing all of the data passed - by repeatedly calling process()""" + by repeatedly calling process() and write the image to the output if specified""" class IAnalyzer(IProcessor): """Media item analyzer driver interface. This interface is abstract, it doesn't diff --git a/timeside/component.py b/timeside/component.py index ff20670..25ddec6 100644 --- a/timeside/component.py +++ b/timeside/component.py @@ -18,7 +18,7 @@ # along with TimeSide. If not, see . -# This file defines a generic object interface mechanism and +# This file defines a generic object interface mechanism and # a way to determine which components implements a given interface. # # For example, the following defines the Music class as implementing the @@ -30,19 +30,19 @@ # class Music(Component): # implements(Listenable) # -# Several class can implements a such interface, and it is possible to +# Several class can implements a such interface, and it is possible to # discover which class implements it with implementations(): # # list_of_classes = implementations(Listenable) # -# This mechanism support inheritance of interfaces: a class implementing a given +# This mechanism support inheritance of interfaces: a class implementing a given # interface is also considered to implement all the ascendants of this interface. # -# However, inheritance is not supported for components. The descendants of a class -# implementing a given interface are not automatically considered to implement this -# interface too. +# However, inheritance is not supported for components. The descendants of a class +# implementing a given interface are not automatically considered to implement this +# interface too. -__all__ = ['Component', 'MetaComponent', 'implements', 'abstract', +__all__ = ['Component', 'MetaComponent', 'implements', 'abstract', 'interfacedoc', 'Interface', 'implementations', 'ComponentError'] class Interface(object): @@ -58,8 +58,8 @@ def abstract(): MetaComponent.abstract = True def implementations(interface, recurse=True, abstract=False): - """Returns the components implementing interface, and if recurse, any of - the descendants of interface. If abstract is True, also return the + """Returns the components implementing interface, and if recurse, any of + the descendants of interface. If abstract is True, also return the abstract implementations.""" result = [] find_implementations(interface, recurse, abstract, result) @@ -89,7 +89,7 @@ class MetaComponent(type): if MetaComponent.implements: for i in MetaComponent.implements: MetaComponent.implementations.append({ - 'interface': i, + 'interface': i, 'class': new_class, 'abstract': MetaComponent.abstract}) @@ -108,7 +108,7 @@ class MetaComponent(type): raise ComponentError("@interfacedoc: %s.%s: no such member in implemented interfaces: %s" % (new_class.__name__, name, str(MetaComponent.implements))) member.__doc__ = if_member.__doc__ - + MetaComponent.implements = [] MetaComponent.abstract = False diff --git a/timeside/grapher/__init__.py b/timeside/grapher/__init__.py index d8ed293..c0f9246 100644 --- a/timeside/grapher/__init__.py +++ b/timeside/grapher/__init__.py @@ -2,5 +2,5 @@ from core import * from waveform import * -from waveform_joy import * from spectrogram import * +#from waveform_joy import * diff --git a/timeside/grapher/core.py b/timeside/grapher/core.py index 51a4552..f4aad49 100644 --- a/timeside/grapher/core.py +++ b/timeside/grapher/core.py @@ -26,7 +26,6 @@ import optparse, math, sys import ImageFilter, ImageChops, Image, ImageDraw, ImageColor import numpy -from scipy.signal import cspline1d, cspline1d_eval from timeside.core import FixedSizeInputAdapter @@ -132,13 +131,12 @@ class WaveformImage(object): Adds pixels iteratively thanks to the adapter providing fixed size frame buffers. Peaks are colored relative to the spectral centroids of each frame packet. """ - def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme, filename=None): + def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme): self.image_width = image_width self.image_height = image_height self.nframes = nframes self.samplerate = samplerate self.fft_size = fft_size - self.filename = filename self.bg_color = bg_color self.color_scheme = color_scheme @@ -205,13 +203,13 @@ class WaveformImage(object): def draw_anti_aliased_pixels(self, x, y1, y2, color): """ vertical anti-aliasing at y1 and y2 """ - + y_max = max(y1, y2) y_max_int = int(y_max) alpha = y_max - y_max_int if alpha > 0.0 and alpha < 1.0 and y_max_int + 1 < self.image_height: - current_pix = self.pixel[x, y_max_int + 1] + current_pix = self.pixel[int(x), y_max_int + 1] r = int((1-alpha)*current_pix[0] + alpha*color[0]) g = int((1-alpha)*current_pix[1] + alpha*color[1]) @@ -243,123 +241,26 @@ class WaveformImage(object): self.draw_peaks(self.pixel_cursor, peaks, spectral_centroid) self.pixel_cursor += 1 - def save(self): + def save(self, filename): """ Apply last 2D transforms and write all pixels to the file. """ # middle line (0 for none) a = 1 - - for x in range(self.image_width): - self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2])) - self.image.save(self.filename) - - -class WaveformImageJoyContour(WaveformImage): - - def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme, filename=None): - WaveformImage.__init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme, filename=filename) - self.contour = numpy.zeros(self.image_width) - self.centroids = numpy.zeros(self.image_width) - self.ndiv = 4 - self.x = numpy.r_[0:self.image_width-1:1] - self.dx1 = self.x[1]-self.x[0] - - def get_peaks_contour(self, x, peaks, spectral_centroid=None): - self.contour[x] = numpy.max(peaks) - self.centroids[x] = spectral_centroid - - def mean(self, samples): - return numpy.mean(samples) - - def normalize(self, contour): - contour = contour-min(contour) - return contour/max(contour) - - def draw_peaks_contour(self): - contour = self.contour.copy() - - # Smoothing - contour = smooth(contour, window_len=16) - - # Normalize - contour = self.normalize(contour) - - # Scaling - #ratio = numpy.mean(contour)/numpy.sqrt(2) - ratio = 1 - contour = self.normalize(numpy.expm1(contour/ratio)) - - # Spline - #contour = cspline1d(contour) - #contour = cspline1d_eval(contour, self.x, dx=self.dx1, x0=self.x[0]) - - # Multicurve rotating - for i in range(0,self.ndiv): - self.previous_x, self.previous_y = None, None - - #bright_color = 255 - bright_color = int(255*(1-float(i)/(self.ndiv*2))) - line_color = (bright_color,bright_color,bright_color) - - # Linear - #contour = contour*(1.0-float(i)/self.ndiv) - #contour = contour*(1-float(i)/self.ndiv) - - # Cosine - contour = contour*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi - #contour = self.contour*(1-float(i)*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi/self.ndiv) - - # Negative Sine - #contour = contour + ((1-contour)*2/numpy.pi*numpy.arcsin(float(i)/self.ndiv)) - - for j in range(0,self.image_width-1): - #line_color = self.color_lookup[int(self.centroids[j]*255.0)] - x = self.x[j] - y = contour[j]*(self.image_height-2)/2+self.image_height/2 - if self.previous_y: - self.draw.line([self.previous_x, self.previous_y, x, y], line_color) - self.draw.line([self.previous_x, -self.previous_y+self.image_height, x, -y+self.image_height], line_color) - else: - self.draw.point((x, y), line_color) - self.draw_anti_aliased_pixels(x, y, y, line_color) - self.draw_anti_aliased_pixels(x, -y+self.image_height, -y+self.image_height, line_color) - self.previous_x, self.previous_y = x, y - - def process(self, frames, eod): - if len(frames) != 1: - buffer = frames[:,0].copy() - buffer.shape = (len(buffer),1) - for samples, end in self.pixels_adapter.process(buffer, eod): - if self.pixel_cursor < self.image_width: - #(spectral_centroid, db_spectrum) = self.spectrum.process(buffer, True) - peaks = self.peaks(samples) - self.get_peaks_contour(self.pixel_cursor, peaks) - self.pixel_cursor += 1 - if eod: - self.draw_peaks_contour() - - def save(self): - """ Apply last 2D transforms and write all pixels to the file. """ - # middle line (0 for none) - a = 1 - for x in range(self.image_width): self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2])) -# self.image = self.image.transpose(Image.FLIP_TOP_BOTTOM) - self.image.save(self.filename) + self.image.save(filename) class SpectrogramImage(object): """ Builds a PIL image representing a spectrogram of the audio stream (level vs. frequency vs. time). Adds pixels iteratively thanks to the adapter providing fixed size frame buffers.""" - def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color=None, color_scheme='default', filename=None): + def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color=None, color_scheme='default'): self.image_width = image_width self.image_height = image_height self.nframes = nframes self.samplerate = samplerate self.fft_size = fft_size - self.filename = filename self.color_scheme = color_scheme if isinstance(color_scheme, dict): @@ -419,10 +320,10 @@ class SpectrogramImage(object): self.draw_spectrum(self.pixel_cursor, db_spectrum) self.pixel_cursor += 1 - def save(self): + def save(self, filename): """ Apply last 2D transforms and write all pixels to the file. """ self.image.putdata(self.pixels) - self.image.transpose(Image.ROTATE_90).save(self.filename) + self.image.transpose(Image.ROTATE_90).save(filename) class Noise(object): @@ -519,12 +420,12 @@ def smooth(x, window_len=10, window='hanning'): raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" s=numpy.r_[2*x[0]-x[window_len:1:-1], x, 2*x[-1]-x[-1:-window_len:-1]] - #print(len(s)) if window == 'flat': #moving average w = numpy.ones(window_len,'d') else: w = getattr(numpy, window)(window_len) + y = numpy.convolve(w/w.sum(), s, mode='same') return y[window_len-1:-window_len+1] diff --git a/timeside/grapher/spectrogram.py b/timeside/grapher/spectrogram.py index de898b8..5d53331 100644 --- a/timeside/grapher/spectrogram.py +++ b/timeside/grapher/spectrogram.py @@ -30,12 +30,11 @@ class Spectrogram(Processor): FFT_SIZE = 0x400 @interfacedoc - def __init__(self, width=1024, height=256, output=None, bg_color=(0,0,0), color_scheme='default'): + def __init__(self, width=1024, height=256, bg_color=(0,0,0), color_scheme='default'): self.width = width self.height = height self.bg_color = bg_color self.color_scheme = color_scheme - self.filename = output self.graph = None @staticmethod @@ -59,7 +58,7 @@ class Spectrogram(Processor): if self.graph: self.graph = None self.graph = SpectrogramImage(self.width, self.height, self.nframes(), self.samplerate(), self.FFT_SIZE, - bg_color=self.bg_color, color_scheme=self.color_scheme, filename=self.filename) + bg_color=self.bg_color, color_scheme=self.color_scheme) @interfacedoc def process(self, frames, eod=False): @@ -67,8 +66,8 @@ class Spectrogram(Processor): return frames, eod @interfacedoc - def render(self): - if self.filename: - self.graph.save() + def render(self, output=None): + if output: + self.graph.save(output) return self.graph.image - + diff --git a/timeside/grapher/waveform.py b/timeside/grapher/waveform.py index 0f78054..486802f 100644 --- a/timeside/grapher/waveform.py +++ b/timeside/grapher/waveform.py @@ -30,12 +30,11 @@ class Waveform(Processor): FFT_SIZE = 0x400 @interfacedoc - def __init__(self, width=1024, height=256, output=None, bg_color=(0,0,0), color_scheme='default'): + def __init__(self, width=1024, height=256, bg_color=(0,0,0), color_scheme='default'): self.width = width self.height = height self.bg_color = bg_color self.color_scheme = color_scheme - self.filename = output self.graph = None @staticmethod @@ -59,7 +58,7 @@ class Waveform(Processor): if self.graph: self.graph = None self.graph = WaveformImage(self.width, self.height, self.nframes(), self.samplerate(), self.FFT_SIZE, - bg_color=self.bg_color, color_scheme=self.color_scheme, filename=self.filename) + bg_color=self.bg_color, color_scheme=self.color_scheme) @interfacedoc def process(self, frames, eod=False): @@ -67,7 +66,7 @@ class Waveform(Processor): return frames, eod @interfacedoc - def render(self): - if self.filename: - self.graph.save() + def render(self, output=None): + if output: + self.graph.save(output) return self.graph.image diff --git a/timeside/grapher/waveform_joy.py b/timeside/grapher/waveform_joy.py index 31edf48..1a6ede3 100644 --- a/timeside/grapher/waveform_joy.py +++ b/timeside/grapher/waveform_joy.py @@ -23,6 +23,116 @@ from timeside.core import Processor, implements, interfacedoc, FixedSizeInputAda from timeside.api import IGrapher from timeside.grapher.core import * +class WaveformImageJoyContour(WaveformImage): + + def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme, ndiv=1, symetry=None): + WaveformImage.__init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme) + self.contour = numpy.zeros(self.image_width) + self.centroids = numpy.zeros(self.image_width) + self.ndiv = ndiv + self.x = numpy.r_[0:self.image_width-1:1] + self.dx1 = self.x[1]-self.x[0] + self.symetry = symetry + + def get_peaks_contour(self, x, peaks, spectral_centroid=None): + self.contour[x] = numpy.max(peaks) + self.centroids[x] = spectral_centroid + + def mean(self, samples): + return numpy.mean(samples) + + def normalize(self, contour): + contour = contour-min(contour) + return contour/max(contour) + + def draw_peaks_contour(self): + contour = self.contour.copy() + + # Smoothing + contour = smooth(contour, window_len=16) + + # Normalize + contour = self.normalize(contour) + + # Scaling + #ratio = numpy.mean(contour)/numpy.sqrt(2) + ratio = 1 + contour = self.normalize(numpy.expm1(contour/ratio))*(1-10**-6) + + # Spline + #contour = cspline1d(contour) + #contour = cspline1d_eval(contour, self.x, dx=self.dx1, x0=self.x[0]) + + if self.symetry: + height = int(self.image_height/2) + else: + height = self.image_height + + # Multicurve rotating + for i in range(0,self.ndiv): + self.previous_x, self.previous_y = None, None + + #bright_color = 255 + bright_color = int(255*(1-float(i)/(self.ndiv*2))) +# bright_color = 255-bright_color+150 +# line_color = self.color_lookup[int(self.centroids[j]*255.0)] + line_color = (bright_color,bright_color,bright_color) + + # Linear + #contour = contour*(1.0-float(i)/self.ndiv) + #contour = contour*(1-float(i)/self.ndiv) + + # Cosine + contour = contour*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi + #contour = self.contour*(1-float(i)*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi/self.ndiv) + + # Negative Sine + #contour = contour + ((1-contour)*2/numpy.pi*numpy.arcsin(float(i)/self.ndiv)) + + curve = (height-1)*contour +# curve = contour*(height-2)/2+height/2 + + for x in self.x: + x = int(x) + y = curve[x] + if not x == 0: + if not self.symetry: + self.draw.line([self.previous_x, self.previous_y, x, y], line_color) + self.draw_anti_aliased_pixels(x, y, y, line_color) + else: + self.draw.line([self.previous_x, self.previous_y+height, x, y+height], line_color) + self.draw_anti_aliased_pixels(x, y+height, y+height, line_color) + self.draw.line([self.previous_x, -self.previous_y+height, x, -y+height], line_color) + self.draw_anti_aliased_pixels(x, -y+height, -y+height, line_color) + else: + if not self.symetry: + self.draw.point((x, y), line_color) + else: + self.draw.point((x, y+height), line_color) + self.previous_x, self.previous_y = x, y + + def process(self, frames, eod): + if len(frames) != 1: + buffer = frames[:,0].copy() + buffer.shape = (len(buffer),1) + for samples, end in self.pixels_adapter.process(buffer, eod): + if self.pixel_cursor < self.image_width: + #(spectral_centroid, db_spectrum) = self.spectrum.process(buffer, True) + peaks = self.peaks(samples) + self.get_peaks_contour(self.pixel_cursor, peaks) + self.pixel_cursor += 1 + if eod: + self.draw_peaks_contour() + + def save(self, filename): + """ Apply last 2D transforms and write all pixels to the file. """ + # middle line (0 for none) + a = 1 + for x in range(self.image_width): + self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2])) + self.image = self.image.transpose(Image.FLIP_TOP_BOTTOM) + self.image.save(filename) + class WaveformJoyDiv(Processor): implements(IGrapher) @@ -30,13 +140,14 @@ class WaveformJoyDiv(Processor): FFT_SIZE = 0x400 @interfacedoc - def __init__(self, width=1024, height=256, output=None, bg_color=(136,136,136), color_scheme='default'): + def __init__(self, width=1024, height=256, bg_color=(136,136,136), color_scheme='default'): self.width = width self.height = height self.bg_color = bg_color self.color_scheme = color_scheme - self.filename = output self.graph = None + self.ndiv = 4 + self.symetry = False @staticmethod @interfacedoc @@ -59,7 +170,7 @@ class WaveformJoyDiv(Processor): if self.graph: self.graph = None self.graph = WaveformImageJoyContour(self.width, self.height, self.nframes(), self.samplerate(), self.FFT_SIZE, - bg_color=self.bg_color, color_scheme=self.color_scheme, filename=self.filename) + bg_color=self.bg_color, color_scheme=self.color_scheme, ndiv=self.ndiv, symetry=self.symetry) @interfacedoc def process(self, frames, eod=False): @@ -67,7 +178,7 @@ class WaveformJoyDiv(Processor): return frames, eod @interfacedoc - def render(self): - if self.filename: - self.graph.save() + def render(self, output): + if output: + self.graph.save(output) return self.graph.image diff --git a/timeside/tools/waveform_batch.py b/timeside/tools/waveform_batch.py index c5baaf5..b0c2208 100644 --- a/timeside/tools/waveform_batch.py +++ b/timeside/tools/waveform_batch.py @@ -42,7 +42,7 @@ class GrapherScheme: self.width = 655 # Height of the image - self.height = 95 + self.height = 96 # Background color self.bg_color = (136,136,136) @@ -65,7 +65,7 @@ class Media2Waveform(object): self.media_list = self.get_media_list() if not os.path.exists(self.img_dir): - os.mkdir(self.img_dir) + os.makedirs(self.img_dir) self.path_dict = self.get_path_dict() def get_media_list(self): @@ -92,7 +92,7 @@ class Media2Waveform(object): audio = os.path.join(os.path.dirname(__file__), source) decoder = timeside.decoder.FileDecoder(audio) analyzer = timeside.analyzer.Duration() - waveform = timeside.grapher.WaveformJoyDiv(width=self.width, height=self.height, output=image, + waveform = timeside.grapher.WaveformJoyDiv(width=self.width, height=self.height, bg_color=self.bg_color, color_scheme=self.color_scheme) (decoder | analyzer | waveform).run() duration = analyzer.result() @@ -102,7 +102,7 @@ class Media2Waveform(object): waveform.graph.filename = image print 'Rendering ', source, ' to ', waveform.graph.filename, '...' print 'frames per pixel = ', waveform.graph.samples_per_pixel - waveform.render() + waveform.render(output=image) if __name__ == '__main__': -- 2.39.5