# WAVEFORMS
def testWav2Waveform(self):
"Test WAV to Waveform"
- from timeside.grapher.waveform import Waveform
+ from timeside.grapher.waveform_simple import Waveform
self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.wav")
self.image = "/tmp/test_waveform_sweep_wav.png"
self.grapher = Waveform(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
def testFlac2Waveform(self):
"Test FLAC to Waveform"
- from timeside.grapher.waveform import Waveform
+ from timeside.grapher.waveform_simple import Waveform
self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.flac")
self.image = "/tmp/test_waveform_sweep_flac.png"
self.grapher = Waveform(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
def testMp32Waveform(self):
"Test MP3 to Waveform"
- from timeside.grapher.waveform import Waveform
+ from timeside.grapher.waveform_simple import Waveform
self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.mp3")
self.image = "/tmp/test_waveform_sweep_mp3.png"
self.grapher = Waveform(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
def testOgg2Waveform(self):
"Test OGG to Waveform"
- from timeside.grapher.waveform import Waveform
+ from timeside.grapher.waveform_simple import Waveform
self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.ogg")
self.image = "/tmp/test_waveform_sweep_ogg.png"
self.grapher = Waveform(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
- # WAVEFORM SIMPLE
- def testWav2WaveformSimple(self):
- "Test WAV to WaveformSimple"
- from timeside.grapher.waveform_simple import WaveformSimple
+ # WAVEFORM CENTROID
+ def testWav2WaveformCentroid(self):
+ "Test WAV to WaveformCentroid"
+ from timeside.grapher.waveform_centroid import WaveformCentroid
self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.wav")
- self.image = "/tmp/test_waveform_simple_sweep_wav.png"
- self.grapher = WaveformSimple(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
+ self.image = "/tmp/test_waveform_centroid_sweep_wav.png"
+ self.grapher = WaveformCentroid(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
+
+ def testFlac2WaveformCentroid(self):
+ "Test FLAC to WaveformCentroid"
+ from timeside.grapher.waveform_centroid import WaveformCentroid
+ self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.wav")
+ self.image = "/tmp/test_waveform_centroid_sweep_wav.png"
+ self.grapher = WaveformCentroid(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
+
+ def testMp32WaveformCentroid(self):
+ "Test MP3 to WaveformCentroid"
+ from timeside.grapher.waveform_centroid import WaveformCentroid
+ self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.wav")
+ self.image = "/tmp/test_waveform_centroid_sweep_wav.png"
+ self.grapher = WaveformCentroid(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
+
+ def testOgg2WaveformCentroid(self):
+ "Test OGG to WaveformCentroid"
+ from timeside.grapher.waveform_centroid import WaveformCentroid
+ self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.wav")
+ self.image = "/tmp/test_waveform_centroid_sweep_wav.png"
+ self.grapher = WaveformCentroid(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
+
+ # WAVEFORMS TRANSPARENT
+ def testWav2WaveformTransparent(self):
+ "Test WAV to WaveformTransparent"
+ from timeside.grapher.waveform_transparent import WaveformTransparent
+ self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.wav")
+ self.image = "/tmp/test_waveform_transparent_sweep_wav.png"
+ self.grapher = WaveformTransparent(width=1024, height=256, bg_color=None, color_scheme='default')
+
+ def testFlac2WaveformContourWhite(self):
+ "Test FLAC to WaveformTransparent"
+ from timeside.grapher.waveform_transparent import WaveformTransparent
+ self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.flac")
+ self.image = "/tmp/test_waveform_transparent_sweep_flac.png"
+ self.grapher = WaveformTransparent(width=1024, height=256, bg_color=None, color_scheme='default')
+
+ def testMp32WaveformTransparent(self):
+ "Test MP3 to WaveformTransparent"
+ from timeside.grapher.waveform_transparent import WaveformTransparent
+ self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.mp3")
+ self.image = "/tmp/test_waveform_transparent_sweep_mp3.png"
+ self.grapher = WaveformTransparent(width=1024, height=256, bg_color=None, color_scheme='default')
+
+ def testOggWaveformTransparent(self):
+ "Test OGG to WaveformTransparent"
+ from timeside.grapher.waveform_transparent import WaveformTransparent
+ self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.ogg")
+ self.image = "/tmp/test_waveform_transparent_sweep_ogg.png"
+ self.grapher = WaveformTransparent(width=1024, height=256, bg_color=None, color_scheme='default')
# WAVEFORMS CONTOUR BLACK
def testWav2WaveformContourBlack(self):
"Test WAV to WaveformContourBlack"
- from timeside.grapher.waveform_contour_bk import WaveformContourBlack
+ from timeside.grapher.waveform_contour_black import WaveformContourBlack
self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.wav")
self.image = "/tmp/test_waveform_contour_bk_sweep_wav.png"
self.grapher = WaveformContourBlack(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
def testFlac2WaveformContourBlack(self):
"Test FLAC to WaveformContourBlack"
- from timeside.grapher.waveform_contour_bk import WaveformContourBlack
+ from timeside.grapher.waveform_contour_black import WaveformContourBlack
self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.flac")
self.image = "/tmp/test_waveform_contour_bk_sweep_flac.png"
self.grapher = WaveformContourBlack(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
def testMp32WaveformContourBlack(self):
"Test MP3 to WaveformContourBlack"
- from timeside.grapher.waveform_contour_bk import WaveformContourBlack
+ from timeside.grapher.waveform_contour_black import WaveformContourBlack
self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.mp3")
self.image = "/tmp/test_waveform_contour_bk_sweep_mp3.png"
self.grapher = WaveformContourBlack(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
def testOgg2WaveformContourBlack(self):
"Test OGG to WaveformContourBlack"
- from timeside.grapher.waveform_contour_bk import WaveformContourBlack
+ from timeside.grapher.waveform_contour_black import WaveformContourBlack
self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.ogg")
self.image = "/tmp/test_waveform_contour_bk_sweep_ogg.png"
self.grapher = WaveformContourBlack(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
# WAVEFORMS CONTOUR WHITE
def testWav2WaveformContourWhite(self):
"Test WAV to WaveformContourWhite"
- from timeside.grapher.waveform_contour_wh import WaveformContourWhite
+ from timeside.grapher.waveform_contour_white import WaveformContourWhite
self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.wav")
self.image = "/tmp/test_waveform_contour_wh_sweep_wav.png"
- self.grapher = WaveformContourWhite(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
+ self.grapher = WaveformContourWhite(width=1024, height=256, bg_color=(255,255,255), color_scheme='default')
def testFlac2WaveformContourWhite(self):
"Test FLAC to WaveformContourWhite"
- from timeside.grapher.waveform_contour_wh import WaveformContourWhite
+ from timeside.grapher.waveform_contour_white import WaveformContourWhite
self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.flac")
self.image = "/tmp/test_waveform_contour_wh_sweep_flac.png"
- self.grapher = WaveformContourWhite(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
+ self.grapher = WaveformContourWhite(width=1024, height=256, bg_color=(255,255,255), color_scheme='default')
def testMp32WaveformContourWhite(self):
"Test MP3 to WaveformContourWhite"
- from timeside.grapher.waveform_contour_wh import WaveformContourWhite
+ from timeside.grapher.waveform_contour_white import WaveformContourWhite
self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.mp3")
self.image = "/tmp/test_waveform_contour_wh_sweep_mp3.png"
- self.grapher = WaveformContourWhite(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
+ self.grapher = WaveformContourWhite(width=1024, height=256, bg_color=(255,255,255), color_scheme='default')
def testOggWaveformContourWhite(self):
"Test OGG to WaveformContourWhite"
- from timeside.grapher.waveform_contour_wh import WaveformContourWhite
+ from timeside.grapher.waveform_contour_white import WaveformContourWhite
self.source = os.path.join (os.path.dirname(__file__), "samples/sweep.ogg")
self.image = "/tmp/test_waveform_contour_wh_sweep_ogg.png"
- self.grapher = WaveformContourWhite(width=1024, height=256, bg_color=(0,0,0), color_scheme='default')
-
+ self.grapher = WaveformContourWhite(width=1024, height=256, bg_color=(255,255,255), color_scheme='default')
# SPECTROGRAMS
def testWav2Spectrogram(self):
# -*- coding: utf-8 -*-
-from core import *
+from waveform_simple import *
from waveform_centroid import *
+from waveform_transparent import *
+from waveform_contour_black import *
+from waveform_contour_white import *
from spectrogram import *
-from waveform_contour_bk import *
-from waveform_contour_wh import *
-from waveform_simple import *
class Spectrum(object):
""" FFT based frequency analysis of audio frames."""
- def __init__(self, fft_size, totalframes, samplerate, lower, higher, window_function=numpy.hanning):
+ def __init__(self, fft_size, samplerate, blocksize, totalframes, lower, higher, window_function=numpy.hanning):
self.fft_size = fft_size
self.window = window_function(self.fft_size)
self.window_function = window_function
self.spectrum_range = None
self.lower = lower
self.higher = higher
+ self.blocksize = blocksize
self.lower_log = math.log10(self.lower)
self.higher_log = math.log10(self.higher)
self.clip = lambda val, low, high: min(high, max(low, val))
self.totalframes = totalframes
self.samplerate = samplerate
+ self.window_function = window_function
+ self.window = self.window_function(self.blocksize)
def process(self, frames, eod, spec_range=120.0):
""" Returns a tuple containing the spectral centroid and the spectrum (dB scales) of the input audio frames.
FFT window sizes are adatable to the input frame size."""
samples = frames[:,0]
- nsamples = len(samples)
- window = self.window_function(nsamples)
- samples *= window
+ nsamples = len(frames[:,0])
+ if nsamples != self.blocksize:
+ self.window = self.window_function(nsamples)
+ samples *= self.window
while nsamples > self.fft_size:
self.fft_size = 2 * self.fft_size
zeros_n = numpy.zeros(self.fft_size/2-int(nsamples/2)-1)
else:
zeros_n = numpy.zeros(self.fft_size/2-int(nsamples/2))
-
samples = numpy.concatenate((zeros_p, samples, zeros_n), axis=0)
fft = numpy.fft.fft(samples)
- spectrum = numpy.abs(fft[:fft.shape[0] / 2 + 1]) / float(nsamples) # normalized abs(FFT) between 0 and 1
+ # normalized abs(FFT) between 0 and 1
+ spectrum = numpy.abs(fft[:fft.shape[0] / 2 + 1]) / float(nsamples)
length = numpy.float64(spectrum.shape[0])
# scale the db spectrum from [- spec_range db ... 0 db] > [0..1]
# calculate the spectral centroid
if self.spectrum_range == None:
self.spectrum_range = numpy.arange(length)
-
spectral_centroid = (spectrum * self.spectrum_range).sum() / (energy * (length - 1)) * self.samplerate * 0.5
# clip > log10 > scale between 0 and 1
spectral_centroid = (math.log10(self.clip(spectral_centroid, self.lower, self.higher)) - \
Generic abstract class for the graphers
'''
- fft_size = 0x400
+ fft_size = 0x800
frame_cursor = 0
pixel_cursor = 0
lower_freq = 200
higher_freq = 22050
- def __init__(self, width=1024, height=256, bg_color=(255,255,255), color_scheme='default'):
+ def __init__(self, width=1024, height=256, bg_color=None, color_scheme='default'):
self.bg_color = bg_color
self.color_scheme = color_scheme
self.graph = None
self.buffer_size = int(round(self.samples_per_pixel, 0))
self.pixels_adapter = FixedSizeInputAdapter(self.buffer_size, 1, pad=False)
self.pixels_adapter_totalframes = self.pixels_adapter.blocksize(self.totalframes)
- self.spectrum = Spectrum(self.fft_size, self.totalframes, self.samplerate,
+ self.spectrum = Spectrum(self.fft_size, self.samplerate, self.blocksize, self.totalframes,
self.lower_freq, self.higher_freq, numpy.hanning)
self.pixel = self.image.load()
self.draw = ImageDraw.Draw(self.image)
def watermark(self, text, font=None, color=(255, 255, 255), opacity=.6, margin=(5,5)):
self.image = im_watermark(text, text, color=color, opacity=opacity, margin=margin)
- def draw_centroid_peaks(self, x, peaks, spectral_centroid):
- """ draw 2 peaks at x using the spectral_centroid for color """
+ def draw_peaks(self, x, peaks, line_color):
+ """Draw 2 peaks at x"""
y1 = self.image_height * 0.5 - peaks[0] * (self.image_height - 4) * 0.5
y2 = self.image_height * 0.5 - peaks[1] * (self.image_height - 4) * 0.5
- line_color = self.color_lookup[int(spectral_centroid*255.0)]
if self.previous_y:
self.draw.line([self.previous_x, self.previous_y, x, y1, x, y2], line_color)
else:
self.draw.line([x, y1, x, y2], line_color)
- self.previous_x, self.previous_y = x, y2
self.draw_anti_aliased_pixels(x, y1, y2, line_color)
+ self.previous_x, self.previous_y = x, y2
- def draw_simple_peaks(self, x, peaks, line_color):
- """ draw 2 peaks at x using the spectral_centroid for color """
+ def draw_peaks_inverted(self, x, peaks, line_color):
+ """Draw 2 inverted peaks at x"""
y1 = self.image_height * 0.5 - peaks[0] * (self.image_height - 4) * 0.5
y2 = self.image_height * 0.5 - peaks[1] * (self.image_height - 4) * 0.5
self.draw.line((x, self.image_height , x, y1), line_color)
else:
self.draw.line((x, 0, x, self.image_height), line_color)
+ self.draw_anti_aliased_pixels(x, y1, y2, line_color)
self.previous_x, self.previous_y = x, y1
def draw_anti_aliased_pixels(self, x, y1, y2, color):
#contour = contour*(1.0-float(i)/self.ndiv)
#contour = contour*(1-float(i)/self.ndiv)
- # Scaled
+ # Cosinus
contour = contour*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi
#contour = self.contour*(1-float(i)*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi/self.ndiv)
#contour = contour + ((1-contour)*2/numpy.pi*numpy.arcsin(float(i)/self.ndiv))
# along with TimeSide. If not, see <http://www.gnu.org/licenses/>.
-from timeside.core import Processor, implements, interfacedoc, FixedSizeInputAdapter
+from timeside.core import implements, interfacedoc
from timeside.api import IGrapher
from timeside.grapher.core import *
y_max = math.log10(f_max)
for y in range(self.image_height):
freq = math.pow(10.0, y_min + y / (self.image_height - 1.0) *(y_max - y_min))
- bin = freq / 22050.0 * (self.fft_size/2 + 1)
- if bin < self.fft_size/2:
- alpha = bin - int(bin)
- self.y_to_bin.append((int(bin), alpha * 255))
+ fft_bin = freq / 22050.0 * (self.fft_size/2 + 1)
+ if fft_bin < self.fft_size/2:
+ alpha = fft_bin - int(fft_bin)
+ self.y_to_bin.append((int(fft_bin), alpha * 255))
@staticmethod
@interfacedoc
@interfacedoc
def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None):
super(Spectrogram, self).setup(channels, samplerate, blocksize, totalframes)
- self.spectrum = Spectrum(self.fft_size, self.totalframes, self.samplerate,
- self.lower_freq, self.higher_freq, numpy.hanning)
self.image = Image.new("P", (self.image_height, self.image_width))
self.image.putpalette(interpolate_colors(self.colors, True))
@interfacedoc
def process(self, frames, eod=False):
if len(frames) != 1:
- buffer = frames[:,0].copy()
- buffer.shape = (len(buffer),1)
- for samples, end in self.pixels_adapter.process(buffer, eod):
+ chunk = frames[:,0].copy()
+ chunk.shape = (len(chunk),1)
+ for samples, end in self.pixels_adapter.process(chunk, eod):
if self.pixel_cursor < self.image_width:
(spectral_centroid, db_spectrum) = self.spectrum.process(samples, True)
self.draw_spectrum(self.pixel_cursor, db_spectrum)
@interfacedoc
def __init__(self, width=1024, height=256, bg_color=(0,0,0), color_scheme='default'):
super(WaveformCentroid, self).__init__(width, height, bg_color, color_scheme)
+ colors = default_color_schemes[color_scheme]['waveform']
+ self.color_lookup = interpolate_colors(colors)
@staticmethod
@interfacedoc
for samples, end in self.pixels_adapter.process(buffer, eod):
if self.pixel_cursor < self.image_width:
(spectral_centroid, db_spectrum) = self.spectrum.process(samples, True)
- self.draw_centroid_peaks(self.pixel_cursor, peaks(samples), spectral_centroid)
+ line_color = self.color_lookup[int(spectral_centroid*255.0)]
+ self.draw_peaks(self.pixel_cursor, peaks(samples), line_color)
self.pixel_cursor += 1
return frames, eod
def __init__(self, width=1024, height=256, bg_color=(0,0,0), color_scheme='default'):
super(WaveformContourBlack, self).__init__(width, height, bg_color, color_scheme)
self.contour = numpy.zeros(self.image_width)
- self.centroids = numpy.zeros(self.image_width)
self.ndiv = 4
self.x = numpy.r_[0:self.image_width-1:1]
- self.dx1 = self.x[1]-self.x[0]
self.symetry = True
self.color_offset = 160
def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None):
super(WaveformContourBlack, self).setup(channels, samplerate, blocksize, totalframes)
- def get_peaks_contour(self, x, peaks, spectral_centroid=None):
- self.contour[x] = numpy.max(peaks)
- self.centroids[x] = spectral_centroid
-
@interfacedoc
def process(self, frames, eod=False):
if len(frames) != 1:
buffer.shape = (len(buffer),1)
for samples, end in self.pixels_adapter.process(buffer, eod):
if self.pixel_cursor < self.image_width:
- self.get_peaks_contour(self.pixel_cursor, peaks(samples))
+ self.contour[self.pixel_cursor] = numpy.max(peaks(samples))
self.pixel_cursor += 1
if eod:
self.draw_peaks_contour()
from timeside.core import Processor, implements, interfacedoc, FixedSizeInputAdapter
from timeside.api import IGrapher
from timeside.grapher.core import *
-from timeside.grapher.waveform_contour_bk import WaveformContourBlack
+from timeside.grapher.waveform_contour_black import WaveformContourBlack
class WaveformContourWhite(WaveformContourBlack):
def __init__(self, width=1024, height=256, bg_color=(255,255,255), color_scheme='default'):
super(Waveform, self).__init__(width, height, bg_color, color_scheme)
self.line_color = (0,0,0)
- colors = default_color_schemes[color_scheme]['waveform']
- self.color_lookup = interpolate_colors(colors)
@staticmethod
@interfacedoc
buffer.shape = (len(buffer),1)
for samples, end in self.pixels_adapter.process(buffer, eod):
if self.pixel_cursor < self.image_width-1:
- self.draw_simple_peaks(self.pixel_cursor, peaks(samples), self.line_color)
+ self.draw_peaks(self.pixel_cursor, peaks(samples), self.line_color)
self.pixel_cursor += 1
if self.pixel_cursor == self.image_width-1:
- self.draw_simple_peaks(self.pixel_cursor, peaks(samples), self.line_color)
+ self.draw_peaks(self.pixel_cursor, peaks(samples), self.line_color)
self.pixel_cursor += 1
return frames, eod
--- /dev/null
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2007-2010 Guillaume Pellerin <yomguy@parisson.com>
+# Copyright (c) 2010 Olivier Guilyardi <olivier@samalyse.com>
+
+# This file is part of TimeSide.
+
+# TimeSide is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+
+# TimeSide is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with TimeSide. If not, see <http://www.gnu.org/licenses/>.
+
+
+from timeside.core import Processor, implements, interfacedoc, FixedSizeInputAdapter
+from timeside.api import IGrapher
+from timeside.grapher.core import *
+from timeside.grapher.waveform_simple import Waveform
+
+class WaveformTransparent(Waveform):
+ """ Builds a PIL image representing a waveform of the audio stream.
+ Adds pixels iteratively thanks to the adapter providing fixed size frame buffers.
+ """
+
+ implements(IGrapher)
+
+ @interfacedoc
+ def __init__(self, width=1024, height=256, bg_color=None, color_scheme='default'):
+ super(WaveformTransparent, self).__init__(width, height, bg_color, color_scheme)
+ self.line_color = (255,255,255)
+
+ @staticmethod
+ @interfacedoc
+ def id():
+ return "waveform_transparent"
+
+ @staticmethod
+ @interfacedoc
+ def name():
+ return "Waveform transparent"
+
+ @interfacedoc
+ def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None):
+ super(WaveformTransparent, self).setup(channels, samplerate, blocksize, totalframes)
+
+ @interfacedoc
+ def process(self, frames, eod=False):
+ if len(frames) != 1:
+ buffer = frames[:,0]
+ buffer.shape = (len(buffer),1)
+ for samples, end in self.pixels_adapter.process(buffer, eod):
+ if self.pixel_cursor < self.image_width-1:
+ self.draw_peaks_inverted(self.pixel_cursor, peaks(samples), self.line_color)
+ self.pixel_cursor += 1
+ if self.pixel_cursor == self.image_width-1:
+ self.draw_peaks(self.pixel_cursor, peaks(samples), self.bg_color)
+ self.pixel_cursor += 1
+ return frames, eod
+
+ @interfacedoc
+ def render(self, output):
+ if output:
+ a = 1
+ for x in range(self.image_width):
+ self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2]))
+ self.image.save(output)
+ return self.image