From: yomguy Date: Wed, 25 Aug 2010 12:27:30 +0000 (+0000) Subject: add spline joy_division style waveform grapher X-Git-Tag: 0.3.2~118 X-Git-Url: https://git.parisson.com/?a=commitdiff_plain;h=2b281ab4f230f347b347f092a06f63f0a1cfa10f;p=timeside.git add spline joy_division style waveform grapher --- diff --git a/timeside/grapher/__init__.py b/timeside/grapher/__init__.py index ce2ebca..d8ed293 100644 --- a/timeside/grapher/__init__.py +++ b/timeside/grapher/__init__.py @@ -2,4 +2,5 @@ from core import * from waveform import * +from waveform_joy import * from spectrogram import * diff --git a/timeside/grapher/core.py b/timeside/grapher/core.py index b5f122a..8577871 100644 --- a/timeside/grapher/core.py +++ b/timeside/grapher/core.py @@ -26,6 +26,7 @@ import optparse, math, sys import ImageFilter, ImageChops, Image, ImageDraw, ImageColor import numpy +from scipy.signal import cspline1d, cspline1d_eval from timeside.core import FixedSizeInputAdapter @@ -193,7 +194,7 @@ class WaveformImage(object): line_color = self.color_lookup[int(spectral_centroid*255.0)] - if self.previous_y != None: + if self.previous_y: self.draw.line([self.previous_x, self.previous_y, x, y1, x, y2], line_color) else: self.draw.line([x, y1, x, y2], line_color) @@ -253,6 +254,90 @@ class WaveformImage(object): self.image.save(self.filename) +class WaveformImageJoyContour(WaveformImage): + + def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme, filename=None): + WaveformImage.__init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme, filename=filename) + self.contour = numpy.zeros(self.image_width) + self.centroids = numpy.zeros(self.image_width) + self.ndiv = 6 + self.x = numpy.r_[0:self.image_width-1:1] + #self.dx1 = self.x[1]-self.x[0] + self.dx2 = self.x[self.samples_per_pixel/(self.ndiv*10)]-self.x[0] + + def get_peaks_contour(self, x, peaks, spectral_centroid=None): + """ draw 2 peaks at x using the spectral_centroid for color """ + self.contour[x] = numpy.max(peaks) + self.centroids[x] = spectral_centroid + + def draw_peaks_contour(self): + contour = cspline1d(self.contour.copy()) + #contour = cspline1d_eval(contour, self.x, dx=self.dx1, x0=self.x[0]) + contour = cspline1d_eval(contour, self.x, dx=self.dx2, x0=self.x[0]) + #print len(contour) + + l_min = min(self.contour) + l_max = max(self.contour) + l_range= l_max - l_min + + self.contour = (contour-l_min)/l_range + #print contour + + # Multispline scales + for i in range(0,self.ndiv): + self.previous_x, self.previous_y = None, None + bright_color = int(255*(1-float(i)/self.ndiv)) + line_color = (bright_color,bright_color,bright_color) + print line_color + + # Linear + #contour = contour*(1.0-float(i)/self.ndiv) + #contour = contour*(1-float(i)/self.ndiv) + + # Cosine + contour = contour*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi + #contour = self.contour*(1-float(i)*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi/self.ndiv) + + # Negative Sine + #contour = contour + ((1-contour)*2/numpy.pi*numpy.arcsin(float(i)/self.ndiv)) + + for j in range(0,self.image_width-1): + #line_color = self.color_lookup[int(self.centroids[j]*255.0)] + x = self.x[j] + y = contour[j]*self.image_height + #print y + if self.previous_y: + self.draw.line([self.previous_x, self.previous_y, x, y], line_color) + self.draw_anti_aliased_pixels(x, y, y, line_color) + else: + self.draw.point((x, y), line_color) + self.previous_x, self.previous_y = x, y + + def process(self, frames, eod): + if len(frames) != 1: + buffer = frames[:,0].copy() + buffer.shape = (len(buffer),1) + for samples, end in self.pixels_adapter.process(buffer, eod): + if self.pixel_cursor < self.image_width: + #(spectral_centroid, db_spectrum) = self.spectrum.process(buffer, True) + peaks = self.peaks(samples) + self.get_peaks_contour(self.pixel_cursor, peaks) + self.pixel_cursor += 1 + if eod: + self.draw_peaks_contour() + + def save(self): + """ Apply last 2D transforms and write all pixels to the file. """ + + # middle line (0 for none) + a = 1 + + for x in range(self.image_width): + self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2])) + self.image = self.image.transpose(Image.FLIP_TOP_BOTTOM) + self.image.save(self.filename) + + class SpectrogramImage(object): """ Builds a PIL image representing a spectrogram of the audio stream (level vs. frequency vs. time). Adds pixels iteratively thanks to the adapter providing fixed size frame buffers.""" diff --git a/timeside/grapher/waveform_joy.py b/timeside/grapher/waveform_joy.py new file mode 100644 index 0000000..2289a9f --- /dev/null +++ b/timeside/grapher/waveform_joy.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2007-2010 Guillaume Pellerin +# Copyright (c) 2010 Olivier Guilyardi + +# This file is part of TimeSide. + +# TimeSide is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. + +# TimeSide is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with TimeSide. If not, see . + + +from timeside.core import Processor, implements, interfacedoc, FixedSizeInputAdapter +from timeside.api import IGrapher +from timeside.grapher.core import * + + +class WaveformJoyDiv(Processor): + implements(IGrapher) + + FFT_SIZE = 0x400 + + @interfacedoc + def __init__(self, width=1024, height=256, output=None, bg_color=(0,0,0), color_scheme='default'): + self.width = width + self.height = height + self.bg_color = bg_color + self.color_scheme = color_scheme + self.filename = output + self.graph = None + + @staticmethod + @interfacedoc + def id(): + return "waveform_joydiv" + + @staticmethod + @interfacedoc + def name(): + return "Waveform JoyDiv" + + @interfacedoc + def set_colors(self, background, scheme): + self.bg_color = background + self.color_scheme = scheme + + @interfacedoc + def setup(self, channels=None, samplerate=None, nframes=None): + super(WaveformJoyDiv, self).setup(channels, samplerate, nframes) + if self.graph: + self.graph = None + self.graph = WaveformImageJoyContour(self.width, self.height, self.nframes(), self.samplerate(), self.FFT_SIZE, + bg_color=self.bg_color, color_scheme=self.color_scheme, filename=self.filename) + + @interfacedoc + def process(self, frames, eod=False): + self.graph.process(frames, eod) + return frames, eod + + @interfacedoc + def render(self): + if self.filename: + self.graph.save() + return self.graph.image diff --git a/timeside/tools/waveform_batch b/timeside/tools/waveform_batch index 618aea7..ea433c9 100644 --- a/timeside/tools/waveform_batch +++ b/timeside/tools/waveform_batch @@ -39,16 +39,16 @@ class GrapherScheme: ]} # Width of the image - self.width = 2048 + self.width = 1024 # Height of the image - self.height = 128 + self.height = 320 # Background color - self.bg_color = (255,255,255) + self.bg_color = (25,25,25) # Force computation. By default, the class doesn't overwrite existing image files. - self.force = False + self.force = True class Media2Waveform(object): @@ -74,7 +74,7 @@ class Media2Waveform(object): if root: for file in files: ext = file.split('.')[-1] - if ext == 'mp3' or ext == 'MP3': + if ext == 'wav' or ext == 'WAV': media_list.append(root+os.sep+file) return media_list @@ -83,7 +83,7 @@ class Media2Waveform(object): for media in self.media_list: name = os.path.splitext(media) name = name[0].split(os.sep)[-1] - path_dict[media] = self.img_dir + os.sep + name + '.png' + path_dict[media] = unicode(self.img_dir + os.sep + name + '.png') return path_dict def process(self): @@ -92,7 +92,7 @@ class Media2Waveform(object): print 'Rendering ', source, ' to ', image, '...' audio = os.path.join(os.path.dirname(__file__), source) decoder = timeside.decoder.FileDecoder(audio) - waveform = timeside.grapher.Waveform(width=self.width, height=self.height, output=image, + waveform = timeside.grapher.WaveformJoyDiv(width=self.width, height=self.height, output=image, bg_color=self.bg_color, color_scheme=self.color_scheme) (decoder | waveform).run() print 'frames per pixel = ', waveform.graph.samples_per_pixel