import optparse, math, sys
import ImageFilter, ImageChops, Image, ImageDraw, ImageColor
import numpy
+from scipy.signal import cspline1d, cspline1d_eval
from timeside.core import FixedSizeInputAdapter
line_color = self.color_lookup[int(spectral_centroid*255.0)]
- if self.previous_y != None:
+ if self.previous_y:
self.draw.line([self.previous_x, self.previous_y, x, y1, x, y2], line_color)
else:
self.draw.line([x, y1, x, y2], line_color)
self.image.save(self.filename)
+class WaveformImageJoyContour(WaveformImage):
+
+ def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme, filename=None):
+ WaveformImage.__init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme, filename=filename)
+ self.contour = numpy.zeros(self.image_width)
+ self.centroids = numpy.zeros(self.image_width)
+ self.ndiv = 6
+ self.x = numpy.r_[0:self.image_width-1:1]
+ #self.dx1 = self.x[1]-self.x[0]
+ self.dx2 = self.x[self.samples_per_pixel/(self.ndiv*10)]-self.x[0]
+
+ def get_peaks_contour(self, x, peaks, spectral_centroid=None):
+ """ draw 2 peaks at x using the spectral_centroid for color """
+ self.contour[x] = numpy.max(peaks)
+ self.centroids[x] = spectral_centroid
+
+ def draw_peaks_contour(self):
+ contour = cspline1d(self.contour.copy())
+ #contour = cspline1d_eval(contour, self.x, dx=self.dx1, x0=self.x[0])
+ contour = cspline1d_eval(contour, self.x, dx=self.dx2, x0=self.x[0])
+ #print len(contour)
+
+ l_min = min(self.contour)
+ l_max = max(self.contour)
+ l_range= l_max - l_min
+
+ self.contour = (contour-l_min)/l_range
+ #print contour
+
+ # Multispline scales
+ for i in range(0,self.ndiv):
+ self.previous_x, self.previous_y = None, None
+ bright_color = int(255*(1-float(i)/self.ndiv))
+ line_color = (bright_color,bright_color,bright_color)
+ print line_color
+
+ # Linear
+ #contour = contour*(1.0-float(i)/self.ndiv)
+ #contour = contour*(1-float(i)/self.ndiv)
+
+ # Cosine
+ contour = contour*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi
+ #contour = self.contour*(1-float(i)*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi/self.ndiv)
+
+ # Negative Sine
+ #contour = contour + ((1-contour)*2/numpy.pi*numpy.arcsin(float(i)/self.ndiv))
+
+ for j in range(0,self.image_width-1):
+ #line_color = self.color_lookup[int(self.centroids[j]*255.0)]
+ x = self.x[j]
+ y = contour[j]*self.image_height
+ #print y
+ if self.previous_y:
+ self.draw.line([self.previous_x, self.previous_y, x, y], line_color)
+ self.draw_anti_aliased_pixels(x, y, y, line_color)
+ else:
+ self.draw.point((x, y), line_color)
+ self.previous_x, self.previous_y = x, y
+
+ def process(self, frames, eod):
+ if len(frames) != 1:
+ buffer = frames[:,0].copy()
+ buffer.shape = (len(buffer),1)
+ for samples, end in self.pixels_adapter.process(buffer, eod):
+ if self.pixel_cursor < self.image_width:
+ #(spectral_centroid, db_spectrum) = self.spectrum.process(buffer, True)
+ peaks = self.peaks(samples)
+ self.get_peaks_contour(self.pixel_cursor, peaks)
+ self.pixel_cursor += 1
+ if eod:
+ self.draw_peaks_contour()
+
+ def save(self):
+ """ Apply last 2D transforms and write all pixels to the file. """
+
+ # middle line (0 for none)
+ a = 1
+
+ for x in range(self.image_width):
+ self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2]))
+ self.image = self.image.transpose(Image.FLIP_TOP_BOTTOM)
+ self.image.save(self.filename)
+
+
class SpectrogramImage(object):
""" Builds a PIL image representing a spectrogram of the audio stream (level vs. frequency vs. time).
Adds pixels iteratively thanks to the adapter providing fixed size frame buffers."""
--- /dev/null
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2007-2010 Guillaume Pellerin <yomguy@parisson.com>
+# Copyright (c) 2010 Olivier Guilyardi <olivier@samalyse.com>
+
+# This file is part of TimeSide.
+
+# TimeSide is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+
+# TimeSide is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with TimeSide. If not, see <http://www.gnu.org/licenses/>.
+
+
+from timeside.core import Processor, implements, interfacedoc, FixedSizeInputAdapter
+from timeside.api import IGrapher
+from timeside.grapher.core import *
+
+
+class WaveformJoyDiv(Processor):
+ implements(IGrapher)
+
+ FFT_SIZE = 0x400
+
+ @interfacedoc
+ def __init__(self, width=1024, height=256, output=None, bg_color=(0,0,0), color_scheme='default'):
+ self.width = width
+ self.height = height
+ self.bg_color = bg_color
+ self.color_scheme = color_scheme
+ self.filename = output
+ self.graph = None
+
+ @staticmethod
+ @interfacedoc
+ def id():
+ return "waveform_joydiv"
+
+ @staticmethod
+ @interfacedoc
+ def name():
+ return "Waveform JoyDiv"
+
+ @interfacedoc
+ def set_colors(self, background, scheme):
+ self.bg_color = background
+ self.color_scheme = scheme
+
+ @interfacedoc
+ def setup(self, channels=None, samplerate=None, nframes=None):
+ super(WaveformJoyDiv, self).setup(channels, samplerate, nframes)
+ if self.graph:
+ self.graph = None
+ self.graph = WaveformImageJoyContour(self.width, self.height, self.nframes(), self.samplerate(), self.FFT_SIZE,
+ bg_color=self.bg_color, color_scheme=self.color_scheme, filename=self.filename)
+
+ @interfacedoc
+ def process(self, frames, eod=False):
+ self.graph.process(frames, eod)
+ return frames, eod
+
+ @interfacedoc
+ def render(self):
+ if self.filename:
+ self.graph.save()
+ return self.graph.image
]}
# Width of the image
- self.width = 2048
+ self.width = 1024
# Height of the image
- self.height = 128
+ self.height = 320
# Background color
- self.bg_color = (255,255,255)
+ self.bg_color = (25,25,25)
# Force computation. By default, the class doesn't overwrite existing image files.
- self.force = False
+ self.force = True
class Media2Waveform(object):
if root:
for file in files:
ext = file.split('.')[-1]
- if ext == 'mp3' or ext == 'MP3':
+ if ext == 'wav' or ext == 'WAV':
media_list.append(root+os.sep+file)
return media_list
for media in self.media_list:
name = os.path.splitext(media)
name = name[0].split(os.sep)[-1]
- path_dict[media] = self.img_dir + os.sep + name + '.png'
+ path_dict[media] = unicode(self.img_dir + os.sep + name + '.png')
return path_dict
def process(self):
print 'Rendering ', source, ' to ', image, '...'
audio = os.path.join(os.path.dirname(__file__), source)
decoder = timeside.decoder.FileDecoder(audio)
- waveform = timeside.grapher.Waveform(width=self.width, height=self.height, output=image,
+ waveform = timeside.grapher.WaveformJoyDiv(width=self.width, height=self.height, output=image,
bg_color=self.bg_color, color_scheme=self.color_scheme)
(decoder | waveform).run()
print 'frames per pixel = ', waveform.graph.samples_per_pixel