from waveform_contour import *
from spectrogram_log import *
from spectrogram_lin import *
-from render_analyzers import *
\ No newline at end of file
+from render_analyzers import *
default_color_schemes = {
'default': {
- 'waveform': [(50,0,200), (0,220,80), (255,224,0), (255,0,0)],
- 'spectrogram': [(0, 0, 0), (58/4,68/4,65/4), (80/2,100/2,153/2), (90,180,100),
- (224,224,44), (255,60,30), (255,255,255)]
+ 'waveform': [(50, 0, 200), (0, 220, 80), (255, 224, 0), (255, 0, 0)],
+ 'spectrogram': [(0, 0, 0), (58 / 4, 68 / 4, 65 / 4), (80 / 2, 100 / 2, 153 / 2), (90, 180, 100),
+ (224, 224, 44), (255, 60, 30), (255, 255, 255)]
},
'iso': {
- 'waveform': [(0,0,255), (0,255,255), (255,255,0), (255,0,0)],
- 'spectrogram': [(0, 0, 0), (58/4,68/4,65/4), (80/2,100/2,153/2), (90,180,100),
- (224,224,44), (255,60,30), (255,255,255)]
+ 'waveform': [(0, 0, 255), (0, 255, 255), (255, 255, 0), (255, 0, 0)],
+ 'spectrogram': [(0, 0, 0), (58 / 4, 68 / 4, 65 / 4), (80 / 2, 100 / 2, 153 / 2), (90, 180, 100),
+ (224, 224, 44), (255, 60, 30), (255, 255, 255)]
},
'purple': {
- 'waveform': [(173,173,173), (147,149,196), (77,80,138), (108,66,0)],
- 'spectrogram': [(0, 0, 0), (58/4,68/4,65/4), (80/2,100/2,153/2), (90,180,100),
- (224,224,44), (255,60,30), (255,255,255)]
+ 'waveform': [(173, 173, 173), (147, 149, 196), (77, 80, 138), (108, 66, 0)],
+ 'spectrogram': [(0, 0, 0), (58 / 4, 68 / 4, 65 / 4), (80 / 2, 100 / 2, 153 / 2), (90, 180, 100),
+ (224, 224, 44), (255, 60, 30), (255, 255, 255)]
},
'awdio': {
- 'waveform': [(255,255,255), (255,255,255), (255,255,255), (255,255,255)],
- 'spectrogram': [(0, 0, 0), (58/4,68/4,65/4), (80/2,100/2,153/2), (90,180,100),
- (224,224,44), (255,60,30), (255,255,255)]
+ 'waveform': [(255, 255, 255), (255, 255, 255), (255, 255, 255), (255, 255, 255)],
+ 'spectrogram': [(0, 0, 0), (58 / 4, 68 / 4, 65 / 4), (80 / 2, 100 / 2, 153 / 2), (90, 180, 100),
+ (224, 224, 44), (255, 60, 30), (255, 255, 255)]
},
}
# Guillaume Pellerin <yomguy@parisson.com>
-import optparse, math, sys, numpy
+import optparse
+import math
+import sys
+import numpy
try:
from PIL import ImageFilter, ImageChops, Image, ImageDraw, ImageColor, ImageEnhance
except ImportError:
- import ImageFilter, ImageChops, Image, ImageDraw, ImageColor, ImageEnhance
+ import ImageFilter
+ import ImageChops
+ import Image
+ import ImageDraw
+ import ImageColor
+ import ImageEnhance
from timeside.core import *
from timeside.api import IGrapher
class Spectrum(object):
+
""" FFT based frequency analysis of audio frames."""
def __init__(self, fft_size, samplerate, blocksize, totalframes, lower, higher, window_function=None):
self.window_function = numpy.hanning
self.window = self.window_function(self.blocksize)
-
def process(self, frames, eod, spec_range=120.0):
""" Returns a tuple containing the spectral centroid and the spectrum (dB scales) of the input audio frames.
FFT window sizes are adatable to the input frame size."""
- samples = frames[:,0]
- nsamples = len(frames[:,0])
+ samples = frames[:, 0]
+ nsamples = len(frames[:, 0])
if nsamples != self.blocksize:
self.window = self.window_function(nsamples)
samples *= self.window
while nsamples > self.fft_size:
self.fft_size = 2 * self.fft_size
- zeros_p = numpy.zeros(self.fft_size/2-int(nsamples/2))
+ zeros_p = numpy.zeros(self.fft_size / 2 - int(nsamples / 2))
if nsamples % 2:
- zeros_n = numpy.zeros(self.fft_size/2-int(nsamples/2)-1)
+ zeros_n = numpy.zeros(self.fft_size / 2 - int(nsamples / 2) - 1)
else:
- zeros_n = numpy.zeros(self.fft_size/2-int(nsamples/2))
+ zeros_n = numpy.zeros(self.fft_size / 2 - int(nsamples / 2))
samples = numpy.concatenate((zeros_p, samples, zeros_n), axis=0)
fft = numpy.fft.fft(samples)
length = numpy.float64(spectrum.shape[0])
# scale the db spectrum from [- spec_range db ... 0 db] > [0..1]
- db_spectrum = ((20*(numpy.log10(spectrum + 1e-30))).clip(-spec_range, 0.0) + spec_range)/spec_range
+ db_spectrum = ((20 * (numpy.log10(spectrum + 1e-30)))
+ .clip(-spec_range, 0.0) + spec_range) / spec_range
energy = spectrum.sum()
spectral_centroid = 0
# calculate the spectral centroid
if self.spectrum_range == None:
self.spectrum_range = numpy.arange(length)
- spectral_centroid = (spectrum * self.spectrum_range).sum() / (energy * (length - 1)) * self.samplerate * 0.5
+ spectral_centroid = (spectrum * self.spectrum_range).sum() / \
+ (energy * (length - 1)) * \
+ self.samplerate * 0.5
# clip > log10 > scale between 0 and 1
- spectral_centroid = (math.log10(self.clip(spectral_centroid, self.lower, self.higher)) - \
- self.lower_log) / (self.higher_log - self.lower_log)
+ spectral_centroid = (math.log10(self.clip(spectral_centroid, self.lower, self.higher)) -
+ self.lower_log) / (self.higher_log - self.lower_log)
return (spectral_centroid, db_spectrum)
class Grapher(Processor):
+
'''
Generic abstract class for the graphers
'''
self.color_color_scheme = color_scheme
def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None):
- super(Grapher, self).setup(channels, samplerate, blocksize, totalframes)
+ super(Grapher, self).setup(
+ channels, samplerate, blocksize, totalframes)
self.sample_rate = samplerate
- self.higher_freq = self.sample_rate/2
+ self.higher_freq = self.sample_rate / 2
self.block_size = blocksize
self.total_frames = totalframes
- self.image = Image.new("RGBA", (self.image_width, self.image_height), self.bg_color)
+ self.image = Image.new(
+ "RGBA", (self.image_width, self.image_height), self.bg_color)
self.samples_per_pixel = self.total_frames / float(self.image_width)
self.buffer_size = int(round(self.samples_per_pixel, 0))
- self.pixels_adapter = FixedSizeInputAdapter(self.buffer_size, 1, pad=False)
- self.pixels_adapter_totalframes = self.pixels_adapter.blocksize(self.total_frames)
- self.spectrum = Spectrum(self.fft_size, self.sample_rate, self.block_size, self.total_frames,
- self.lower_freq, self.higher_freq, numpy.hanning)
+ self.pixels_adapter = FixedSizeInputAdapter(
+ self.buffer_size, 1, pad=False)
+ self.pixels_adapter_totalframes = self.pixels_adapter.blocksize(
+ self.total_frames)
+ self.spectrum = Spectrum(
+ self.fft_size, self.sample_rate, self.block_size, self.total_frames,
+ self.lower_freq, self.higher_freq, numpy.hanning)
self.pixel = self.image.load()
self.draw = ImageDraw.Draw(self.image)
return
return self.image
- def watermark(self, text, font=None, color=(255, 255, 255), opacity=.6, margin=(5,5)):
- self.image = im_watermark(self.image, text, color=color, opacity=opacity, margin=margin)
+ def watermark(self, text, font=None, color=(255, 255, 255), opacity=.6, margin=(5, 5)):
+ self.image = im_watermark(
+ self.image, text, color=color, opacity=opacity, margin=margin)
def draw_peaks(self, x, peaks, line_color):
"""Draw 2 peaks at x"""
y2 = self.image_height * 0.5 - peaks[1] * (self.image_height - 4) * 0.5
if self.previous_y:
- self.draw.line([self.previous_x, self.previous_y, x, y1, x, y2], line_color)
+ self.draw.line(
+ [self.previous_x, self.previous_y, x, y1, x, y2], line_color)
else:
self.draw.line([x, y1, x, y2], line_color)
y1 = self.image_height * 0.5 - peaks[0] * (self.image_height - 4) * 0.5
y2 = self.image_height * 0.5 - peaks[1] * (self.image_height - 4) * 0.5
- if self.previous_y and x < self.image_width-1:
+ if self.previous_y and x < self.image_width - 1:
if y1 < y2:
self.draw.line((x, 0, x, y1), line_color)
- self.draw.line((x, self.image_height , x, y2), line_color)
+ self.draw.line((x, self.image_height, x, y2), line_color)
else:
self.draw.line((x, 0, x, y2), line_color)
- self.draw.line((x, self.image_height , x, y1), line_color)
+ self.draw.line((x, self.image_height, x, y1), line_color)
else:
self.draw.line((x, 0, x, self.image_height), line_color)
self.draw_anti_aliased_pixels(x, y1, y2, line_color)
if alpha > 0.0 and alpha < 1.0 and y_max_int + 1 < self.image_height:
current_pix = self.pixel[int(x), y_max_int + 1]
- r = int((1-alpha)*current_pix[0] + alpha*color[0])
- g = int((1-alpha)*current_pix[1] + alpha*color[1])
- b = int((1-alpha)*current_pix[2] + alpha*color[2])
- self.pixel[x, y_max_int + 1] = (r,g,b)
+ r = int((1 - alpha) * current_pix[0] + alpha * color[0])
+ g = int((1 - alpha) * current_pix[1] + alpha * color[1])
+ b = int((1 - alpha) * current_pix[2] + alpha * color[2])
+ self.pixel[x, y_max_int + 1] = (r, g, b)
y_min = min(y1, y2)
y_min_int = int(y_min)
if alpha > 0.0 and alpha < 1.0 and y_min_int - 1 >= 0:
current_pix = self.pixel[x, y_min_int - 1]
- r = int((1-alpha)*current_pix[0] + alpha*color[0])
- g = int((1-alpha)*current_pix[1] + alpha*color[1])
- b = int((1-alpha)*current_pix[2] + alpha*color[2])
- self.pixel[x, y_min_int - 1] = (r,g,b)
+ r = int((1 - alpha) * current_pix[0] + alpha * color[0])
+ g = int((1 - alpha) * current_pix[1] + alpha * color[1])
+ b = int((1 - alpha) * current_pix[2] + alpha * color[2])
+ self.pixel[x, y_min_int - 1] = (r, g, b)
def draw_peaks_contour(self):
contour = self.contour.copy()
# Scaling
#ratio = numpy.mean(contour)/numpy.sqrt(2)
ratio = 1
- contour = normalize(numpy.expm1(contour/ratio))*(1-10**-6)
+ contour = normalize(numpy.expm1(contour / ratio)) * (1 - 10 ** -6)
# Spline
#contour = cspline1d(contour)
#contour = cspline1d_eval(contour, self.x, dx=self.dx1, x0=self.x[0])
if self.symetry:
- height = int(self.image_height/2)
+ height = int(self.image_height / 2)
else:
height = self.image_height
# Multicurve rotating
- for i in range(0,self.ndiv):
+ for i in range(0, self.ndiv):
self.previous_x, self.previous_y = None, None
- bright_color = int(255*(1-float(i)/(self.ndiv*2)))
- bright_color = 255-bright_color+self.color_offset
+ bright_color = int(255 * (1 - float(i) / (self.ndiv * 2)))
+ bright_color = 255 - bright_color + self.color_offset
#line_color = self.color_lookup[int(self.centroids[j]*255.0)]
- line_color = (bright_color,bright_color,bright_color)
+ line_color = (bright_color, bright_color, bright_color)
# Linear
#contour = contour*(1.0-float(i)/self.ndiv)
#contour = contour*(1-float(i)/self.ndiv)
# Cosinus
- contour = contour*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi
+ contour = contour * \
+ numpy.arccos(float(i) / self.ndiv) * 2 / numpy.pi
#contour = self.contour*(1-float(i)*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi/self.ndiv)
#contour = contour + ((1-contour)*2/numpy.pi*numpy.arcsin(float(i)/self.ndiv))
- curve = (height-1)*contour
+ curve = (height - 1) * contour
#curve = contour*(height-2)/2+height/2
for x in self.x:
y = curve[x]
if not x == 0:
if not self.symetry:
- self.draw.line([self.previous_x, self.previous_y, x, y], line_color)
+ self.draw.line(
+ [self.previous_x, self.previous_y, x, y], line_color)
self.draw_anti_aliased_pixels(x, y, y, line_color)
else:
- self.draw.line([self.previous_x, self.previous_y+height, x, y+height], line_color)
- self.draw_anti_aliased_pixels(x, y+height, y+height, line_color)
- self.draw.line([self.previous_x, -self.previous_y+height, x, -y+height], line_color)
- self.draw_anti_aliased_pixels(x, -y+height, -y+height, line_color)
+ self.draw.line(
+ [self.previous_x, self.previous_y + height, x, y + height], line_color)
+ self.draw_anti_aliased_pixels(
+ x, y + height, y + height, line_color)
+ self.draw.line(
+ [self.previous_x, -self.previous_y + height, x, -y + height], line_color)
+ self.draw_anti_aliased_pixels(
+ x, -y + height, -y + height, line_color)
else:
if not self.symetry:
self.draw.point((x, y), line_color)
else:
- self.draw.point((x, y+height), line_color)
+ self.draw.point((x, y + height), line_color)
self.previous_x, self.previous_y = x, y
class DisplayAnalyzer(Grapher):
+
"""
Builds a PIL image from analyzer result
This is an Abstract base class
color_scheme)
self.parents.append(analyzer)
- self._result_id = result_id # TODO : make it generic when analyzer will be "atomize"
+ # TODO : make it generic when analyzer will be "atomize"
+ self._result_id = result_id
@staticmethod
@interfacedoc
__doc__ = """Builds a PIL image representing """ + grapher_name
- NewGrapher.__name__ = 'Display'+result_id
+ NewGrapher.__name__ = 'Display' + result_id
return NewGrapher
grapher_name='Onset detection function')
wav = analyzer.Waveform()
DisplayWaveform = DisplayAnalyzer.create(analyzer=wav,
- result_id='waveform_analyzer',
- grapher_id='grapher_waveform',
- grapher_name='Waveform from Analyzer')
+ result_id='waveform_analyzer',
+ grapher_id='grapher_waveform',
+ grapher_name='Waveform from Analyzer')
irit4hz = analyzer.IRITSpeech4Hz()
Display4hzSpeechSegmentation = DisplayAnalyzer.create(analyzer=irit4hz,
result_id='irit_speech_4hz.segments',
class SpectrogramLinear(SpectrogramLog):
+
""" Builds a PIL image representing a spectrogram of the audio stream (level vs. frequency vs. time).
Adds pixels iteratively thanks to the adapter providing fixed size frame buffers."""
implements(IGrapher)
@interfacedoc
- def __init__(self, width=1024, height=256, bg_color=(0,0,0), color_scheme='default'):
- super(SpectrogramLinear, self).__init__(width, height, bg_color, color_scheme)
+ def __init__(self, width=1024, height=256, bg_color=(0, 0, 0), color_scheme='default'):
+ super(SpectrogramLinear, self).__init__(
+ width, height, bg_color, color_scheme)
@staticmethod
@interfacedoc
@interfacedoc
def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None):
- super(SpectrogramLinear, self).setup(channels, samplerate, blocksize, totalframes)
+ super(SpectrogramLinear, self).setup(
+ channels, samplerate, blocksize, totalframes)
def set_scale(self):
"""generate the lookup which translates y-coordinate to fft-bin"""
y_min = f_min
y_max = f_max
for y in range(self.image_height):
- freq = y_min + y / (self.image_height - 1.0) *(y_max - y_min)
- fft_bin = freq / f_max * (self.fft_size/2 + 1)
- if fft_bin < self.fft_size/2:
+ freq = y_min + y / (self.image_height - 1.0) * (y_max - y_min)
+ fft_bin = freq / f_max * (self.fft_size / 2 + 1)
+ if fft_bin < self.fft_size / 2:
alpha = fft_bin - int(fft_bin)
self.y_to_bin.append((int(fft_bin), alpha * 255))
class SpectrogramLog(Grapher):
+
""" Builds a PIL image representing a spectrogram of the audio stream (level vs. frequency vs. time).
Adds pixels iteratively thanks to the adapter providing fixed size frame buffers."""
implements(IGrapher)
@interfacedoc
- def __init__(self, width=1024, height=256, bg_color=(0,0,0), color_scheme='default'):
- super(SpectrogramLog, self).__init__(width, height, bg_color, color_scheme)
+ def __init__(self, width=1024, height=256, bg_color=(0, 0, 0), color_scheme='default'):
+ super(SpectrogramLog, self).__init__(
+ width, height, bg_color, color_scheme)
self.lower_freq = 100
self.colors = default_color_schemes[color_scheme]['spectrogram']
self.pixels = []
@interfacedoc
def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None):
- super(SpectrogramLog, self).setup(channels, samplerate, blocksize, totalframes)
+ super(SpectrogramLog, self).setup(
+ channels, samplerate, blocksize, totalframes)
self.image = self.image.convert("P")
self.image = self.image.transpose(Image.ROTATE_90)
self.image.putpalette(interpolate_colors(self.colors, True))
y_min = math.log10(f_min)
y_max = math.log10(f_max)
for y in range(self.image_height):
- freq = math.pow(10.0, y_min + y / (self.image_height - 1.0) *(y_max - y_min))
- fft_bin = freq / f_max * (self.fft_size/2 + 1)
- if fft_bin < self.fft_size/2:
+ freq = math.pow(
+ 10.0, y_min + y / (self.image_height - 1.0) * (y_max - y_min))
+ fft_bin = freq / f_max * (self.fft_size / 2 + 1)
+ if fft_bin < self.fft_size / 2:
alpha = fft_bin - int(fft_bin)
self.y_to_bin.append((int(fft_bin), alpha * 255))
def draw_spectrum(self, x, spectrum):
for (index, alpha) in self.y_to_bin:
- self.pixels.append( int( ((255.0-alpha) * spectrum[index] + alpha * spectrum[index + 1] )) )
+ self.pixels.append(
+ int(((255.0 - alpha) * spectrum[index] + alpha * spectrum[index + 1])))
for y in range(len(self.y_to_bin), self.image_height):
self.pixels.append(0)
@interfacedoc
def process(self, frames, eod=False):
if len(frames) != 1:
- chunk = frames[:,0].copy()
- chunk.shape = (len(chunk),1)
+ chunk = frames[:, 0].copy()
+ chunk.shape = (len(chunk), 1)
for samples, end in self.pixels_adapter.process(chunk, eod):
if self.pixel_cursor < self.image_width:
- (spectral_centroid, db_spectrum) = self.spectrum.process(samples, True)
+ (spectral_centroid, db_spectrum) = self.spectrum.process(
+ samples, True)
self.draw_spectrum(self.pixel_cursor, db_spectrum)
self.pixel_cursor += 1
return frames, eod
""" Apply last 2D transforms"""
self.image.putdata(self.pixels)
self.image = self.image.transpose(Image.ROTATE_90)
-
try:
from PIL import ImageFilter, ImageChops, Image, ImageDraw, ImageColor, ImageEnhance
except ImportError:
- import ImageFilter, ImageChops, Image, ImageDraw, ImageColor, ImageEnhance
+ import ImageFilter
+ import ImageChops
+ import Image
+ import ImageDraw
+ import ImageColor
+ import ImageEnhance
import numpy
palette = []
for i in range(num_colors):
- index = (i * (len(colors) - 1))/(num_colors - 1.0)
+ index = (i * (len(colors) - 1)) / (num_colors - 1.0)
index_int = int(index)
alpha = index - float(index_int)
if alpha > 0:
- r = (1.0 - alpha) * colors[index_int][0] + alpha * colors[index_int + 1][0]
- g = (1.0 - alpha) * colors[index_int][1] + alpha * colors[index_int + 1][1]
- b = (1.0 - alpha) * colors[index_int][2] + alpha * colors[index_int + 1][2]
+ r = (1.0 - alpha) * colors[index_int][
+ 0] + alpha * colors[index_int + 1][0]
+ g = (1.0 - alpha) * colors[index_int][
+ 1] + alpha * colors[index_int + 1][1]
+ b = (1.0 - alpha) * colors[index_int][
+ 2] + alpha * colors[index_int + 1][2]
else:
r = (1.0 - alpha) * colors[index_int][0]
g = (1.0 - alpha) * colors[index_int][1]
if (len(vector) % factor):
print "Length of 'vector' is not divisible by 'factor'=%d!" % factor
return 0
- vector.shape = (len(vector)/factor, factor)
+ vector.shape = (len(vector) / factor, factor)
return numpy.mean(vector, axis=1)
>>> plt.show() # doctest: +SKIP
"""
- # TODO: the window parameter could be the window itself if an array instead of a string
+ # TODO: the window parameter could be the window itself if an array
+ # instead of a string
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
- s = numpy.r_[2*x[0]-x[window_len:1:-1], x, 2*x[-1]-x[-1:-window_len:-1]]
+ s = numpy.r_[2 * x[0] - x[window_len:1:-1],
+ x, 2 * x[-1] - x[-1:-window_len:-1]]
- if window == 'flat': #moving average
- w = numpy.ones(window_len,'d')
+ if window == 'flat': # moving average
+ w = numpy.ones(window_len, 'd')
else:
w = getattr(numpy, window)(window_len)
- y = numpy.convolve(w/w.sum(), s, mode='same')
- return y[window_len-1:-window_len+1]
+ y = numpy.convolve(w / w.sum(), s, mode='same')
+ return y[window_len - 1:-window_len + 1]
def reduce_opacity(im, opacity):
return im
-def im_watermark(im, inputtext, font=None, color=None, opacity=.6, margin=(30,30)):
+def im_watermark(im, inputtext, font=None, color=None, opacity=.6, margin=(30, 30)):
"""imprints a PIL image with the indicated text in lower-right corner"""
if im.mode != "RGBA":
im = im.convert("RGBA")
- textlayer = Image.new("RGBA", im.size, (0,0,0,0))
+ textlayer = Image.new("RGBA", im.size, (0, 0, 0, 0))
textdraw = ImageDraw.Draw(textlayer)
textsize = textdraw.textsize(inputtext, font=font)
- textpos = [im.size[i]-textsize[i]-margin[i] for i in [0,1]]
+ textpos = [im.size[i] - textsize[i] - margin[i] for i in [0, 1]]
textdraw.text(textpos, inputtext, font=font, fill=color)
if opacity != 1:
- textlayer = reduce_opacity(textlayer,opacity)
+ textlayer = reduce_opacity(textlayer, opacity)
return Image.composite(textlayer, im, textlayer)
def color_from_value(self, value):
""" given a value between 0 and 1, return an (r,g,b) tuple """
- return ImageColor.getrgb("hsl(%d,%d%%,%d%%)" % (int( (1.0 - value) * 360 ), 80, 50))
+ return ImageColor.getrgb("hsl(%d,%d%%,%d%%)" % (int((1.0 - value) * 360), 80, 50))
def mean(samples):
def normalize(contour):
- contour = contour-min(contour)
- return contour/max(contour)
+ contour = contour - min(contour)
+ return contour / max(contour)
class WaveformCentroid(Waveform):
+
""" Builds a PIL image representing a waveform of the audio stream.
Peaks are colored relatively to the spectral centroids of each frame buffer. """
implements(IGrapher)
@interfacedoc
- def __init__(self, width=1024, height=256, bg_color=(0,0,0), color_scheme='default'):
- super(WaveformCentroid, self).__init__(width, height, bg_color, color_scheme)
+ def __init__(self, width=1024, height=256, bg_color=(0, 0, 0), color_scheme='default'):
+ super(WaveformCentroid, self).__init__(
+ width, height, bg_color, color_scheme)
self.lower_freq = 200
colors = default_color_schemes[color_scheme]['waveform']
self.color_lookup = interpolate_colors(colors)
@interfacedoc
def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None):
- super(WaveformCentroid, self).setup(channels, samplerate, blocksize, totalframes)
+ super(WaveformCentroid, self).setup(
+ channels, samplerate, blocksize, totalframes)
@interfacedoc
def process(self, frames, eod=False):
if len(frames) != 1:
- buffer = frames[:,0].copy()
- buffer.shape = (len(buffer),1)
+ buffer = frames[:, 0].copy()
+ buffer.shape = (len(buffer), 1)
for samples, end in self.pixels_adapter.process(buffer, eod):
if self.pixel_cursor < self.image_width:
- (spectral_centroid, db_spectrum) = self.spectrum.process(samples, True)
- line_color = self.color_lookup[int(spectral_centroid*255.0)]
- self.draw_peaks(self.pixel_cursor, peaks(samples), line_color)
+ (spectral_centroid, db_spectrum) = self.spectrum.process(
+ samples, True)
+ line_color = self.color_lookup[
+ int(spectral_centroid * 255.0)]
+ self.draw_peaks(
+ self.pixel_cursor, peaks(samples), line_color)
self.pixel_cursor += 1
return frames, eod
class WaveformContourBlack(Waveform):
+
""" Builds a PIL image representing an amplitude coutour (envelop) of the audio stream.
"""
implements(IGrapher)
@interfacedoc
- def __init__(self, width=1024, height=256, bg_color=(0,0,0), color_scheme='default'):
- super(WaveformContourBlack, self).__init__(width, height, bg_color, color_scheme)
+ def __init__(self, width=1024, height=256, bg_color=(0, 0, 0), color_scheme='default'):
+ super(WaveformContourBlack, self).__init__(
+ width, height, bg_color, color_scheme)
self.contour = numpy.zeros(self.image_width)
self.ndiv = 4
- self.x = numpy.r_[0:self.image_width-1:1]
+ self.x = numpy.r_[0:self.image_width - 1:1]
self.symetry = True
self.color_offset = 160
@interfacedoc
def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None):
- super(WaveformContourBlack, self).setup(channels, samplerate, blocksize, totalframes)
+ super(WaveformContourBlack, self).setup(
+ channels, samplerate, blocksize, totalframes)
@interfacedoc
def process(self, frames, eod=False):
if len(frames) != 1:
- buffer = frames[:,0].copy()
- buffer.shape = (len(buffer),1)
+ buffer = frames[:, 0].copy()
+ buffer.shape = (len(buffer), 1)
for samples, end in self.pixels_adapter.process(buffer, eod):
if self.pixel_cursor < self.image_width:
self.contour[self.pixel_cursor] = numpy.max(peaks(samples))
return frames, eod
-
class WaveformContourWhite(WaveformContourBlack):
""" Builds a PIL image representing an amplitude coutour (envelop) of the audio stream.
implements(IGrapher)
@interfacedoc
- def __init__(self, width=1024, height=256, bg_color=(255,255,255), color_scheme='default'):
- super(WaveformContourWhite, self).__init__(width, height, bg_color, color_scheme)
+ def __init__(self, width=1024, height=256, bg_color=(255, 255, 255), color_scheme='default'):
+ super(WaveformContourWhite, self).__init__(
+ width, height, bg_color, color_scheme)
self.color_offset = 60
@staticmethod
class Waveform(Grapher):
+
""" Builds a PIL image representing a simple waveform of the audio stream.
"""
implements(IGrapher)
@interfacedoc
- def __init__(self, width=1024, height=256, bg_color=(255,255,255), color_scheme='default'):
+ def __init__(self, width=1024, height=256, bg_color=(255, 255, 255), color_scheme='default'):
super(Waveform, self).__init__(width, height, bg_color, color_scheme)
- self.line_color = (0,0,0)
+ self.line_color = (0, 0, 0)
@staticmethod
@interfacedoc
@interfacedoc
def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None):
- super(Waveform, self).setup(channels, samplerate, blocksize, totalframes)
+ super(Waveform, self).setup(
+ channels, samplerate, blocksize, totalframes)
@interfacedoc
def process(self, frames, eod=False):
if len(frames) != 1:
if len(frames.shape) > 1:
- buffer = frames[:,0]
+ buffer = frames[:, 0]
else:
buffer = frames
- buffer.shape = (len(buffer),1)
+ buffer.shape = (len(buffer), 1)
for samples, end in self.pixels_adapter.process(buffer, eod):
- if self.pixel_cursor < self.image_width-1:
- self.draw_peaks(self.pixel_cursor, peaks(samples), self.line_color)
+ if self.pixel_cursor < self.image_width - 1:
+ self.draw_peaks(
+ self.pixel_cursor, peaks(samples), self.line_color)
self.pixel_cursor += 1
- if self.pixel_cursor == self.image_width-1:
- self.draw_peaks(self.pixel_cursor, peaks(samples), self.line_color)
+ if self.pixel_cursor == self.image_width - 1:
+ self.draw_peaks(
+ self.pixel_cursor, peaks(samples), self.line_color)
self.pixel_cursor += 1
return frames, eod
def post_process(self, output=None):
a = 1
for x in range(self.image_width):
- self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2]))
-
+ self.pixel[x, self.image_height / 2] = tuple(
+ map(lambda p: p + a, self.pixel[x, self.image_height / 2]))
class WaveformTransparent(Waveform):
+
""" Builds a PIL image representing a transparent waveform of the audio stream.
"""
@interfacedoc
def __init__(self, width=1024, height=256, bg_color=None, color_scheme='default'):
- super(WaveformTransparent, self).__init__(width, height, bg_color, color_scheme)
- self.line_color = (255,255,255)
+ super(WaveformTransparent, self).__init__(
+ width, height, bg_color, color_scheme)
+ self.line_color = (255, 255, 255)
@staticmethod
@interfacedoc
@interfacedoc
def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None):
- super(WaveformTransparent, self).setup(channels, samplerate, blocksize, totalframes)
+ super(WaveformTransparent, self).setup(
+ channels, samplerate, blocksize, totalframes)
@interfacedoc
def process(self, frames, eod=False):
if len(frames) != 1:
- buffer = frames[:,0]
- buffer.shape = (len(buffer),1)
+ buffer = frames[:, 0]
+ buffer.shape = (len(buffer), 1)
for samples, end in self.pixels_adapter.process(buffer, eod):
- if self.pixel_cursor < self.image_width-1:
- self.draw_peaks_inverted(self.pixel_cursor, peaks(samples), self.line_color)
+ if self.pixel_cursor < self.image_width - 1:
+ self.draw_peaks_inverted(
+ self.pixel_cursor, peaks(samples), self.line_color)
self.pixel_cursor += 1
- if self.pixel_cursor == self.image_width-1:
- self.draw_peaks_inverted(self.pixel_cursor, peaks(samples), self.line_color)
+ if self.pixel_cursor == self.image_width - 1:
+ self.draw_peaks_inverted(
+ self.pixel_cursor, peaks(samples), self.line_color)
self.pixel_cursor += 1
return frames, eod