'''The setup and build script for the python-twitter library.'''
__author__ = 'yomguy@parisson.com'
-__version__ = '0.1-beta'
+__version__ = '0.2'
# The base package metadata to be used by both distutils and setuptools
import timeside.encoder
import timeside.grapher
import timeside.analyzer
-import timeside.tests
-from timeside.core import *
+#import timeside.tests
+
+
"""Set the colors used for image generation. background is a RGB tuple,
and scheme a a predefined color theme name"""
- def render(self):
+ def render(self, output=None):
"""Return a PIL Image object visually representing all of the data passed
- by repeatedly calling process()"""
+ by repeatedly calling process() and write the image to the output if specified"""
class IAnalyzer(IProcessor):
"""Media item analyzer driver interface. This interface is abstract, it doesn't
# along with TimeSide. If not, see <http://www.gnu.org/licenses/>.
-# This file defines a generic object interface mechanism and
+# This file defines a generic object interface mechanism and
# a way to determine which components implements a given interface.
#
# For example, the following defines the Music class as implementing the
# class Music(Component):
# implements(Listenable)
#
-# Several class can implements a such interface, and it is possible to
+# Several class can implements a such interface, and it is possible to
# discover which class implements it with implementations():
#
# list_of_classes = implementations(Listenable)
#
-# This mechanism support inheritance of interfaces: a class implementing a given
+# This mechanism support inheritance of interfaces: a class implementing a given
# interface is also considered to implement all the ascendants of this interface.
#
-# However, inheritance is not supported for components. The descendants of a class
-# implementing a given interface are not automatically considered to implement this
-# interface too.
+# However, inheritance is not supported for components. The descendants of a class
+# implementing a given interface are not automatically considered to implement this
+# interface too.
-__all__ = ['Component', 'MetaComponent', 'implements', 'abstract',
+__all__ = ['Component', 'MetaComponent', 'implements', 'abstract',
'interfacedoc', 'Interface', 'implementations', 'ComponentError']
class Interface(object):
MetaComponent.abstract = True
def implementations(interface, recurse=True, abstract=False):
- """Returns the components implementing interface, and if recurse, any of
- the descendants of interface. If abstract is True, also return the
+ """Returns the components implementing interface, and if recurse, any of
+ the descendants of interface. If abstract is True, also return the
abstract implementations."""
result = []
find_implementations(interface, recurse, abstract, result)
if MetaComponent.implements:
for i in MetaComponent.implements:
MetaComponent.implementations.append({
- 'interface': i,
+ 'interface': i,
'class': new_class,
'abstract': MetaComponent.abstract})
raise ComponentError("@interfacedoc: %s.%s: no such member in implemented interfaces: %s"
% (new_class.__name__, name, str(MetaComponent.implements)))
member.__doc__ = if_member.__doc__
-
+
MetaComponent.implements = []
MetaComponent.abstract = False
from core import *
from waveform import *
-from waveform_joy import *
from spectrogram import *
+#from waveform_joy import *
import optparse, math, sys
import ImageFilter, ImageChops, Image, ImageDraw, ImageColor
import numpy
-from scipy.signal import cspline1d, cspline1d_eval
from timeside.core import FixedSizeInputAdapter
Adds pixels iteratively thanks to the adapter providing fixed size frame buffers.
Peaks are colored relative to the spectral centroids of each frame packet. """
- def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme, filename=None):
+ def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme):
self.image_width = image_width
self.image_height = image_height
self.nframes = nframes
self.samplerate = samplerate
self.fft_size = fft_size
- self.filename = filename
self.bg_color = bg_color
self.color_scheme = color_scheme
def draw_anti_aliased_pixels(self, x, y1, y2, color):
""" vertical anti-aliasing at y1 and y2 """
-
+
y_max = max(y1, y2)
y_max_int = int(y_max)
alpha = y_max - y_max_int
if alpha > 0.0 and alpha < 1.0 and y_max_int + 1 < self.image_height:
- current_pix = self.pixel[x, y_max_int + 1]
+ current_pix = self.pixel[int(x), y_max_int + 1]
r = int((1-alpha)*current_pix[0] + alpha*color[0])
g = int((1-alpha)*current_pix[1] + alpha*color[1])
self.draw_peaks(self.pixel_cursor, peaks, spectral_centroid)
self.pixel_cursor += 1
- def save(self):
+ def save(self, filename):
""" Apply last 2D transforms and write all pixels to the file. """
# middle line (0 for none)
a = 1
-
- for x in range(self.image_width):
- self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2]))
- self.image.save(self.filename)
-
-
-class WaveformImageJoyContour(WaveformImage):
-
- def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme, filename=None):
- WaveformImage.__init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme, filename=filename)
- self.contour = numpy.zeros(self.image_width)
- self.centroids = numpy.zeros(self.image_width)
- self.ndiv = 4
- self.x = numpy.r_[0:self.image_width-1:1]
- self.dx1 = self.x[1]-self.x[0]
-
- def get_peaks_contour(self, x, peaks, spectral_centroid=None):
- self.contour[x] = numpy.max(peaks)
- self.centroids[x] = spectral_centroid
-
- def mean(self, samples):
- return numpy.mean(samples)
-
- def normalize(self, contour):
- contour = contour-min(contour)
- return contour/max(contour)
-
- def draw_peaks_contour(self):
- contour = self.contour.copy()
-
- # Smoothing
- contour = smooth(contour, window_len=16)
-
- # Normalize
- contour = self.normalize(contour)
-
- # Scaling
- #ratio = numpy.mean(contour)/numpy.sqrt(2)
- ratio = 1
- contour = self.normalize(numpy.expm1(contour/ratio))
-
- # Spline
- #contour = cspline1d(contour)
- #contour = cspline1d_eval(contour, self.x, dx=self.dx1, x0=self.x[0])
-
- # Multicurve rotating
- for i in range(0,self.ndiv):
- self.previous_x, self.previous_y = None, None
-
- #bright_color = 255
- bright_color = int(255*(1-float(i)/(self.ndiv*2)))
- line_color = (bright_color,bright_color,bright_color)
-
- # Linear
- #contour = contour*(1.0-float(i)/self.ndiv)
- #contour = contour*(1-float(i)/self.ndiv)
-
- # Cosine
- contour = contour*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi
- #contour = self.contour*(1-float(i)*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi/self.ndiv)
-
- # Negative Sine
- #contour = contour + ((1-contour)*2/numpy.pi*numpy.arcsin(float(i)/self.ndiv))
-
- for j in range(0,self.image_width-1):
- #line_color = self.color_lookup[int(self.centroids[j]*255.0)]
- x = self.x[j]
- y = contour[j]*(self.image_height-2)/2+self.image_height/2
- if self.previous_y:
- self.draw.line([self.previous_x, self.previous_y, x, y], line_color)
- self.draw.line([self.previous_x, -self.previous_y+self.image_height, x, -y+self.image_height], line_color)
- else:
- self.draw.point((x, y), line_color)
- self.draw_anti_aliased_pixels(x, y, y, line_color)
- self.draw_anti_aliased_pixels(x, -y+self.image_height, -y+self.image_height, line_color)
- self.previous_x, self.previous_y = x, y
-
- def process(self, frames, eod):
- if len(frames) != 1:
- buffer = frames[:,0].copy()
- buffer.shape = (len(buffer),1)
- for samples, end in self.pixels_adapter.process(buffer, eod):
- if self.pixel_cursor < self.image_width:
- #(spectral_centroid, db_spectrum) = self.spectrum.process(buffer, True)
- peaks = self.peaks(samples)
- self.get_peaks_contour(self.pixel_cursor, peaks)
- self.pixel_cursor += 1
- if eod:
- self.draw_peaks_contour()
-
- def save(self):
- """ Apply last 2D transforms and write all pixels to the file. """
- # middle line (0 for none)
- a = 1
-
for x in range(self.image_width):
self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2]))
-# self.image = self.image.transpose(Image.FLIP_TOP_BOTTOM)
- self.image.save(self.filename)
+ self.image.save(filename)
class SpectrogramImage(object):
""" Builds a PIL image representing a spectrogram of the audio stream (level vs. frequency vs. time).
Adds pixels iteratively thanks to the adapter providing fixed size frame buffers."""
- def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color=None, color_scheme='default', filename=None):
+ def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color=None, color_scheme='default'):
self.image_width = image_width
self.image_height = image_height
self.nframes = nframes
self.samplerate = samplerate
self.fft_size = fft_size
- self.filename = filename
self.color_scheme = color_scheme
if isinstance(color_scheme, dict):
self.draw_spectrum(self.pixel_cursor, db_spectrum)
self.pixel_cursor += 1
- def save(self):
+ def save(self, filename):
""" Apply last 2D transforms and write all pixels to the file. """
self.image.putdata(self.pixels)
- self.image.transpose(Image.ROTATE_90).save(self.filename)
+ self.image.transpose(Image.ROTATE_90).save(filename)
class Noise(object):
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=numpy.r_[2*x[0]-x[window_len:1:-1], x, 2*x[-1]-x[-1:-window_len:-1]]
- #print(len(s))
if window == 'flat': #moving average
w = numpy.ones(window_len,'d')
else:
w = getattr(numpy, window)(window_len)
+
y = numpy.convolve(w/w.sum(), s, mode='same')
return y[window_len-1:-window_len+1]
FFT_SIZE = 0x400
@interfacedoc
- def __init__(self, width=1024, height=256, output=None, bg_color=(0,0,0), color_scheme='default'):
+ def __init__(self, width=1024, height=256, bg_color=(0,0,0), color_scheme='default'):
self.width = width
self.height = height
self.bg_color = bg_color
self.color_scheme = color_scheme
- self.filename = output
self.graph = None
@staticmethod
if self.graph:
self.graph = None
self.graph = SpectrogramImage(self.width, self.height, self.nframes(), self.samplerate(), self.FFT_SIZE,
- bg_color=self.bg_color, color_scheme=self.color_scheme, filename=self.filename)
+ bg_color=self.bg_color, color_scheme=self.color_scheme)
@interfacedoc
def process(self, frames, eod=False):
return frames, eod
@interfacedoc
- def render(self):
- if self.filename:
- self.graph.save()
+ def render(self, output=None):
+ if output:
+ self.graph.save(output)
return self.graph.image
-
+
FFT_SIZE = 0x400
@interfacedoc
- def __init__(self, width=1024, height=256, output=None, bg_color=(0,0,0), color_scheme='default'):
+ def __init__(self, width=1024, height=256, bg_color=(0,0,0), color_scheme='default'):
self.width = width
self.height = height
self.bg_color = bg_color
self.color_scheme = color_scheme
- self.filename = output
self.graph = None
@staticmethod
if self.graph:
self.graph = None
self.graph = WaveformImage(self.width, self.height, self.nframes(), self.samplerate(), self.FFT_SIZE,
- bg_color=self.bg_color, color_scheme=self.color_scheme, filename=self.filename)
+ bg_color=self.bg_color, color_scheme=self.color_scheme)
@interfacedoc
def process(self, frames, eod=False):
return frames, eod
@interfacedoc
- def render(self):
- if self.filename:
- self.graph.save()
+ def render(self, output=None):
+ if output:
+ self.graph.save(output)
return self.graph.image
from timeside.api import IGrapher
from timeside.grapher.core import *
+class WaveformImageJoyContour(WaveformImage):
+
+ def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme, ndiv=1, symetry=None):
+ WaveformImage.__init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme)
+ self.contour = numpy.zeros(self.image_width)
+ self.centroids = numpy.zeros(self.image_width)
+ self.ndiv = ndiv
+ self.x = numpy.r_[0:self.image_width-1:1]
+ self.dx1 = self.x[1]-self.x[0]
+ self.symetry = symetry
+
+ def get_peaks_contour(self, x, peaks, spectral_centroid=None):
+ self.contour[x] = numpy.max(peaks)
+ self.centroids[x] = spectral_centroid
+
+ def mean(self, samples):
+ return numpy.mean(samples)
+
+ def normalize(self, contour):
+ contour = contour-min(contour)
+ return contour/max(contour)
+
+ def draw_peaks_contour(self):
+ contour = self.contour.copy()
+
+ # Smoothing
+ contour = smooth(contour, window_len=16)
+
+ # Normalize
+ contour = self.normalize(contour)
+
+ # Scaling
+ #ratio = numpy.mean(contour)/numpy.sqrt(2)
+ ratio = 1
+ contour = self.normalize(numpy.expm1(contour/ratio))*(1-10**-6)
+
+ # Spline
+ #contour = cspline1d(contour)
+ #contour = cspline1d_eval(contour, self.x, dx=self.dx1, x0=self.x[0])
+
+ if self.symetry:
+ height = int(self.image_height/2)
+ else:
+ height = self.image_height
+
+ # Multicurve rotating
+ for i in range(0,self.ndiv):
+ self.previous_x, self.previous_y = None, None
+
+ #bright_color = 255
+ bright_color = int(255*(1-float(i)/(self.ndiv*2)))
+# bright_color = 255-bright_color+150
+# line_color = self.color_lookup[int(self.centroids[j]*255.0)]
+ line_color = (bright_color,bright_color,bright_color)
+
+ # Linear
+ #contour = contour*(1.0-float(i)/self.ndiv)
+ #contour = contour*(1-float(i)/self.ndiv)
+
+ # Cosine
+ contour = contour*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi
+ #contour = self.contour*(1-float(i)*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi/self.ndiv)
+
+ # Negative Sine
+ #contour = contour + ((1-contour)*2/numpy.pi*numpy.arcsin(float(i)/self.ndiv))
+
+ curve = (height-1)*contour
+# curve = contour*(height-2)/2+height/2
+
+ for x in self.x:
+ x = int(x)
+ y = curve[x]
+ if not x == 0:
+ if not self.symetry:
+ self.draw.line([self.previous_x, self.previous_y, x, y], line_color)
+ self.draw_anti_aliased_pixels(x, y, y, line_color)
+ else:
+ self.draw.line([self.previous_x, self.previous_y+height, x, y+height], line_color)
+ self.draw_anti_aliased_pixels(x, y+height, y+height, line_color)
+ self.draw.line([self.previous_x, -self.previous_y+height, x, -y+height], line_color)
+ self.draw_anti_aliased_pixels(x, -y+height, -y+height, line_color)
+ else:
+ if not self.symetry:
+ self.draw.point((x, y), line_color)
+ else:
+ self.draw.point((x, y+height), line_color)
+ self.previous_x, self.previous_y = x, y
+
+ def process(self, frames, eod):
+ if len(frames) != 1:
+ buffer = frames[:,0].copy()
+ buffer.shape = (len(buffer),1)
+ for samples, end in self.pixels_adapter.process(buffer, eod):
+ if self.pixel_cursor < self.image_width:
+ #(spectral_centroid, db_spectrum) = self.spectrum.process(buffer, True)
+ peaks = self.peaks(samples)
+ self.get_peaks_contour(self.pixel_cursor, peaks)
+ self.pixel_cursor += 1
+ if eod:
+ self.draw_peaks_contour()
+
+ def save(self, filename):
+ """ Apply last 2D transforms and write all pixels to the file. """
+ # middle line (0 for none)
+ a = 1
+ for x in range(self.image_width):
+ self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2]))
+ self.image = self.image.transpose(Image.FLIP_TOP_BOTTOM)
+ self.image.save(filename)
+
class WaveformJoyDiv(Processor):
implements(IGrapher)
FFT_SIZE = 0x400
@interfacedoc
- def __init__(self, width=1024, height=256, output=None, bg_color=(136,136,136), color_scheme='default'):
+ def __init__(self, width=1024, height=256, bg_color=(136,136,136), color_scheme='default'):
self.width = width
self.height = height
self.bg_color = bg_color
self.color_scheme = color_scheme
- self.filename = output
self.graph = None
+ self.ndiv = 4
+ self.symetry = False
@staticmethod
@interfacedoc
if self.graph:
self.graph = None
self.graph = WaveformImageJoyContour(self.width, self.height, self.nframes(), self.samplerate(), self.FFT_SIZE,
- bg_color=self.bg_color, color_scheme=self.color_scheme, filename=self.filename)
+ bg_color=self.bg_color, color_scheme=self.color_scheme, ndiv=self.ndiv, symetry=self.symetry)
@interfacedoc
def process(self, frames, eod=False):
return frames, eod
@interfacedoc
- def render(self):
- if self.filename:
- self.graph.save()
+ def render(self, output):
+ if output:
+ self.graph.save(output)
return self.graph.image
self.width = 655
# Height of the image
- self.height = 95
+ self.height = 96
# Background color
self.bg_color = (136,136,136)
self.media_list = self.get_media_list()
if not os.path.exists(self.img_dir):
- os.mkdir(self.img_dir)
+ os.makedirs(self.img_dir)
self.path_dict = self.get_path_dict()
def get_media_list(self):
audio = os.path.join(os.path.dirname(__file__), source)
decoder = timeside.decoder.FileDecoder(audio)
analyzer = timeside.analyzer.Duration()
- waveform = timeside.grapher.WaveformJoyDiv(width=self.width, height=self.height, output=image,
+ waveform = timeside.grapher.WaveformJoyDiv(width=self.width, height=self.height,
bg_color=self.bg_color, color_scheme=self.color_scheme)
(decoder | analyzer | waveform).run()
duration = analyzer.result()
waveform.graph.filename = image
print 'Rendering ', source, ' to ', waveform.graph.filename, '...'
print 'frames per pixel = ', waveform.graph.samples_per_pixel
- waveform.render()
+ waveform.render(output=image)
if __name__ == '__main__':