From: Thomas Fillon Date: Tue, 22 Apr 2014 13:29:13 +0000 (+0200) Subject: Fix PEP8 on timeside/grapher/ with autopep8 X-Git-Tag: 0.5.5~1^2~38^2~5 X-Git-Url: https://git.parisson.com/?a=commitdiff_plain;h=52a21db3da68b25a323afa7523d3dbcdfd21b135;p=timeside.git Fix PEP8 on timeside/grapher/ with autopep8 --- diff --git a/timeside/grapher/__init__.py b/timeside/grapher/__init__.py index fcc7b2c..db44451 100644 --- a/timeside/grapher/__init__.py +++ b/timeside/grapher/__init__.py @@ -6,4 +6,4 @@ from waveform_transparent import * from waveform_contour import * from spectrogram_log import * from spectrogram_lin import * -from render_analyzers import * \ No newline at end of file +from render_analyzers import * diff --git a/timeside/grapher/color_schemes.py b/timeside/grapher/color_schemes.py index 8f41d0c..1cabb00 100644 --- a/timeside/grapher/color_schemes.py +++ b/timeside/grapher/color_schemes.py @@ -2,23 +2,23 @@ default_color_schemes = { 'default': { - 'waveform': [(50,0,200), (0,220,80), (255,224,0), (255,0,0)], - 'spectrogram': [(0, 0, 0), (58/4,68/4,65/4), (80/2,100/2,153/2), (90,180,100), - (224,224,44), (255,60,30), (255,255,255)] + 'waveform': [(50, 0, 200), (0, 220, 80), (255, 224, 0), (255, 0, 0)], + 'spectrogram': [(0, 0, 0), (58 / 4, 68 / 4, 65 / 4), (80 / 2, 100 / 2, 153 / 2), (90, 180, 100), + (224, 224, 44), (255, 60, 30), (255, 255, 255)] }, 'iso': { - 'waveform': [(0,0,255), (0,255,255), (255,255,0), (255,0,0)], - 'spectrogram': [(0, 0, 0), (58/4,68/4,65/4), (80/2,100/2,153/2), (90,180,100), - (224,224,44), (255,60,30), (255,255,255)] + 'waveform': [(0, 0, 255), (0, 255, 255), (255, 255, 0), (255, 0, 0)], + 'spectrogram': [(0, 0, 0), (58 / 4, 68 / 4, 65 / 4), (80 / 2, 100 / 2, 153 / 2), (90, 180, 100), + (224, 224, 44), (255, 60, 30), (255, 255, 255)] }, 'purple': { - 'waveform': [(173,173,173), (147,149,196), (77,80,138), (108,66,0)], - 'spectrogram': [(0, 0, 0), (58/4,68/4,65/4), (80/2,100/2,153/2), (90,180,100), - (224,224,44), (255,60,30), (255,255,255)] + 'waveform': [(173, 173, 173), (147, 149, 196), (77, 80, 138), (108, 66, 0)], + 'spectrogram': [(0, 0, 0), (58 / 4, 68 / 4, 65 / 4), (80 / 2, 100 / 2, 153 / 2), (90, 180, 100), + (224, 224, 44), (255, 60, 30), (255, 255, 255)] }, 'awdio': { - 'waveform': [(255,255,255), (255,255,255), (255,255,255), (255,255,255)], - 'spectrogram': [(0, 0, 0), (58/4,68/4,65/4), (80/2,100/2,153/2), (90,180,100), - (224,224,44), (255,60,30), (255,255,255)] + 'waveform': [(255, 255, 255), (255, 255, 255), (255, 255, 255), (255, 255, 255)], + 'spectrogram': [(0, 0, 0), (58 / 4, 68 / 4, 65 / 4), (80 / 2, 100 / 2, 153 / 2), (90, 180, 100), + (224, 224, 44), (255, 60, 30), (255, 255, 255)] }, } diff --git a/timeside/grapher/core.py b/timeside/grapher/core.py index 1c92afa..8708827 100644 --- a/timeside/grapher/core.py +++ b/timeside/grapher/core.py @@ -22,12 +22,20 @@ # Guillaume Pellerin -import optparse, math, sys, numpy +import optparse +import math +import sys +import numpy try: from PIL import ImageFilter, ImageChops, Image, ImageDraw, ImageColor, ImageEnhance except ImportError: - import ImageFilter, ImageChops, Image, ImageDraw, ImageColor, ImageEnhance + import ImageFilter + import ImageChops + import Image + import ImageDraw + import ImageColor + import ImageEnhance from timeside.core import * from timeside.api import IGrapher @@ -36,6 +44,7 @@ from utils import * class Spectrum(object): + """ FFT based frequency analysis of audio frames.""" def __init__(self, fft_size, samplerate, blocksize, totalframes, lower, higher, window_function=None): @@ -60,13 +69,12 @@ class Spectrum(object): self.window_function = numpy.hanning self.window = self.window_function(self.blocksize) - def process(self, frames, eod, spec_range=120.0): """ Returns a tuple containing the spectral centroid and the spectrum (dB scales) of the input audio frames. FFT window sizes are adatable to the input frame size.""" - samples = frames[:,0] - nsamples = len(frames[:,0]) + samples = frames[:, 0] + nsamples = len(frames[:, 0]) if nsamples != self.blocksize: self.window = self.window_function(nsamples) samples *= self.window @@ -74,11 +82,11 @@ class Spectrum(object): while nsamples > self.fft_size: self.fft_size = 2 * self.fft_size - zeros_p = numpy.zeros(self.fft_size/2-int(nsamples/2)) + zeros_p = numpy.zeros(self.fft_size / 2 - int(nsamples / 2)) if nsamples % 2: - zeros_n = numpy.zeros(self.fft_size/2-int(nsamples/2)-1) + zeros_n = numpy.zeros(self.fft_size / 2 - int(nsamples / 2) - 1) else: - zeros_n = numpy.zeros(self.fft_size/2-int(nsamples/2)) + zeros_n = numpy.zeros(self.fft_size / 2 - int(nsamples / 2)) samples = numpy.concatenate((zeros_p, samples, zeros_n), axis=0) fft = numpy.fft.fft(samples) @@ -87,7 +95,8 @@ class Spectrum(object): length = numpy.float64(spectrum.shape[0]) # scale the db spectrum from [- spec_range db ... 0 db] > [0..1] - db_spectrum = ((20*(numpy.log10(spectrum + 1e-30))).clip(-spec_range, 0.0) + spec_range)/spec_range + db_spectrum = ((20 * (numpy.log10(spectrum + 1e-30))) + .clip(-spec_range, 0.0) + spec_range) / spec_range energy = spectrum.sum() spectral_centroid = 0 @@ -95,15 +104,18 @@ class Spectrum(object): # calculate the spectral centroid if self.spectrum_range == None: self.spectrum_range = numpy.arange(length) - spectral_centroid = (spectrum * self.spectrum_range).sum() / (energy * (length - 1)) * self.samplerate * 0.5 + spectral_centroid = (spectrum * self.spectrum_range).sum() / \ + (energy * (length - 1)) * \ + self.samplerate * 0.5 # clip > log10 > scale between 0 and 1 - spectral_centroid = (math.log10(self.clip(spectral_centroid, self.lower, self.higher)) - \ - self.lower_log) / (self.higher_log - self.lower_log) + spectral_centroid = (math.log10(self.clip(spectral_centroid, self.lower, self.higher)) - + self.lower_log) / (self.higher_log - self.lower_log) return (spectral_centroid, db_spectrum) class Grapher(Processor): + ''' Generic abstract class for the graphers ''' @@ -140,18 +152,23 @@ class Grapher(Processor): self.color_color_scheme = color_scheme def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None): - super(Grapher, self).setup(channels, samplerate, blocksize, totalframes) + super(Grapher, self).setup( + channels, samplerate, blocksize, totalframes) self.sample_rate = samplerate - self.higher_freq = self.sample_rate/2 + self.higher_freq = self.sample_rate / 2 self.block_size = blocksize self.total_frames = totalframes - self.image = Image.new("RGBA", (self.image_width, self.image_height), self.bg_color) + self.image = Image.new( + "RGBA", (self.image_width, self.image_height), self.bg_color) self.samples_per_pixel = self.total_frames / float(self.image_width) self.buffer_size = int(round(self.samples_per_pixel, 0)) - self.pixels_adapter = FixedSizeInputAdapter(self.buffer_size, 1, pad=False) - self.pixels_adapter_totalframes = self.pixels_adapter.blocksize(self.total_frames) - self.spectrum = Spectrum(self.fft_size, self.sample_rate, self.block_size, self.total_frames, - self.lower_freq, self.higher_freq, numpy.hanning) + self.pixels_adapter = FixedSizeInputAdapter( + self.buffer_size, 1, pad=False) + self.pixels_adapter_totalframes = self.pixels_adapter.blocksize( + self.total_frames) + self.spectrum = Spectrum( + self.fft_size, self.sample_rate, self.block_size, self.total_frames, + self.lower_freq, self.higher_freq, numpy.hanning) self.pixel = self.image.load() self.draw = ImageDraw.Draw(self.image) @@ -166,8 +183,9 @@ class Grapher(Processor): return return self.image - def watermark(self, text, font=None, color=(255, 255, 255), opacity=.6, margin=(5,5)): - self.image = im_watermark(self.image, text, color=color, opacity=opacity, margin=margin) + def watermark(self, text, font=None, color=(255, 255, 255), opacity=.6, margin=(5, 5)): + self.image = im_watermark( + self.image, text, color=color, opacity=opacity, margin=margin) def draw_peaks(self, x, peaks, line_color): """Draw 2 peaks at x""" @@ -176,7 +194,8 @@ class Grapher(Processor): y2 = self.image_height * 0.5 - peaks[1] * (self.image_height - 4) * 0.5 if self.previous_y: - self.draw.line([self.previous_x, self.previous_y, x, y1, x, y2], line_color) + self.draw.line( + [self.previous_x, self.previous_y, x, y1, x, y2], line_color) else: self.draw.line([x, y1, x, y2], line_color) @@ -189,13 +208,13 @@ class Grapher(Processor): y1 = self.image_height * 0.5 - peaks[0] * (self.image_height - 4) * 0.5 y2 = self.image_height * 0.5 - peaks[1] * (self.image_height - 4) * 0.5 - if self.previous_y and x < self.image_width-1: + if self.previous_y and x < self.image_width - 1: if y1 < y2: self.draw.line((x, 0, x, y1), line_color) - self.draw.line((x, self.image_height , x, y2), line_color) + self.draw.line((x, self.image_height, x, y2), line_color) else: self.draw.line((x, 0, x, y2), line_color) - self.draw.line((x, self.image_height , x, y1), line_color) + self.draw.line((x, self.image_height, x, y1), line_color) else: self.draw.line((x, 0, x, self.image_height), line_color) self.draw_anti_aliased_pixels(x, y1, y2, line_color) @@ -210,10 +229,10 @@ class Grapher(Processor): if alpha > 0.0 and alpha < 1.0 and y_max_int + 1 < self.image_height: current_pix = self.pixel[int(x), y_max_int + 1] - r = int((1-alpha)*current_pix[0] + alpha*color[0]) - g = int((1-alpha)*current_pix[1] + alpha*color[1]) - b = int((1-alpha)*current_pix[2] + alpha*color[2]) - self.pixel[x, y_max_int + 1] = (r,g,b) + r = int((1 - alpha) * current_pix[0] + alpha * color[0]) + g = int((1 - alpha) * current_pix[1] + alpha * color[1]) + b = int((1 - alpha) * current_pix[2] + alpha * color[2]) + self.pixel[x, y_max_int + 1] = (r, g, b) y_min = min(y1, y2) y_min_int = int(y_min) @@ -221,10 +240,10 @@ class Grapher(Processor): if alpha > 0.0 and alpha < 1.0 and y_min_int - 1 >= 0: current_pix = self.pixel[x, y_min_int - 1] - r = int((1-alpha)*current_pix[0] + alpha*color[0]) - g = int((1-alpha)*current_pix[1] + alpha*color[1]) - b = int((1-alpha)*current_pix[2] + alpha*color[2]) - self.pixel[x, y_min_int - 1] = (r,g,b) + r = int((1 - alpha) * current_pix[0] + alpha * color[0]) + g = int((1 - alpha) * current_pix[1] + alpha * color[1]) + b = int((1 - alpha) * current_pix[2] + alpha * color[2]) + self.pixel[x, y_min_int - 1] = (r, g, b) def draw_peaks_contour(self): contour = self.contour.copy() @@ -234,36 +253,37 @@ class Grapher(Processor): # Scaling #ratio = numpy.mean(contour)/numpy.sqrt(2) ratio = 1 - contour = normalize(numpy.expm1(contour/ratio))*(1-10**-6) + contour = normalize(numpy.expm1(contour / ratio)) * (1 - 10 ** -6) # Spline #contour = cspline1d(contour) #contour = cspline1d_eval(contour, self.x, dx=self.dx1, x0=self.x[0]) if self.symetry: - height = int(self.image_height/2) + height = int(self.image_height / 2) else: height = self.image_height # Multicurve rotating - for i in range(0,self.ndiv): + for i in range(0, self.ndiv): self.previous_x, self.previous_y = None, None - bright_color = int(255*(1-float(i)/(self.ndiv*2))) - bright_color = 255-bright_color+self.color_offset + bright_color = int(255 * (1 - float(i) / (self.ndiv * 2))) + bright_color = 255 - bright_color + self.color_offset #line_color = self.color_lookup[int(self.centroids[j]*255.0)] - line_color = (bright_color,bright_color,bright_color) + line_color = (bright_color, bright_color, bright_color) # Linear #contour = contour*(1.0-float(i)/self.ndiv) #contour = contour*(1-float(i)/self.ndiv) # Cosinus - contour = contour*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi + contour = contour * \ + numpy.arccos(float(i) / self.ndiv) * 2 / numpy.pi #contour = self.contour*(1-float(i)*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi/self.ndiv) #contour = contour + ((1-contour)*2/numpy.pi*numpy.arcsin(float(i)/self.ndiv)) - curve = (height-1)*contour + curve = (height - 1) * contour #curve = contour*(height-2)/2+height/2 for x in self.x: @@ -271,18 +291,23 @@ class Grapher(Processor): y = curve[x] if not x == 0: if not self.symetry: - self.draw.line([self.previous_x, self.previous_y, x, y], line_color) + self.draw.line( + [self.previous_x, self.previous_y, x, y], line_color) self.draw_anti_aliased_pixels(x, y, y, line_color) else: - self.draw.line([self.previous_x, self.previous_y+height, x, y+height], line_color) - self.draw_anti_aliased_pixels(x, y+height, y+height, line_color) - self.draw.line([self.previous_x, -self.previous_y+height, x, -y+height], line_color) - self.draw_anti_aliased_pixels(x, -y+height, -y+height, line_color) + self.draw.line( + [self.previous_x, self.previous_y + height, x, y + height], line_color) + self.draw_anti_aliased_pixels( + x, y + height, y + height, line_color) + self.draw.line( + [self.previous_x, -self.previous_y + height, x, -y + height], line_color) + self.draw_anti_aliased_pixels( + x, -y + height, -y + height, line_color) else: if not self.symetry: self.draw.point((x, y), line_color) else: - self.draw.point((x, y+height), line_color) + self.draw.point((x, y + height), line_color) self.previous_x, self.previous_y = x, y diff --git a/timeside/grapher/render_analyzers.py b/timeside/grapher/render_analyzers.py index 70caa58..a100d83 100644 --- a/timeside/grapher/render_analyzers.py +++ b/timeside/grapher/render_analyzers.py @@ -27,6 +27,7 @@ from timeside import analyzer class DisplayAnalyzer(Grapher): + """ Builds a PIL image from analyzer result This is an Abstract base class @@ -73,7 +74,8 @@ class DisplayAnalyzer(Grapher): color_scheme) self.parents.append(analyzer) - self._result_id = result_id # TODO : make it generic when analyzer will be "atomize" + # TODO : make it generic when analyzer will be "atomize" + self._result_id = result_id @staticmethod @interfacedoc @@ -87,7 +89,7 @@ class DisplayAnalyzer(Grapher): __doc__ = """Builds a PIL image representing """ + grapher_name - NewGrapher.__name__ = 'Display'+result_id + NewGrapher.__name__ = 'Display' + result_id return NewGrapher @@ -108,9 +110,9 @@ DisplayOnsetDetectionFunction = DisplayAnalyzer.create(analyzer=odf, grapher_name='Onset detection function') wav = analyzer.Waveform() DisplayWaveform = DisplayAnalyzer.create(analyzer=wav, - result_id='waveform_analyzer', - grapher_id='grapher_waveform', - grapher_name='Waveform from Analyzer') + result_id='waveform_analyzer', + grapher_id='grapher_waveform', + grapher_name='Waveform from Analyzer') irit4hz = analyzer.IRITSpeech4Hz() Display4hzSpeechSegmentation = DisplayAnalyzer.create(analyzer=irit4hz, result_id='irit_speech_4hz.segments', diff --git a/timeside/grapher/spectrogram_lin.py b/timeside/grapher/spectrogram_lin.py index dbf84bd..6e51268 100644 --- a/timeside/grapher/spectrogram_lin.py +++ b/timeside/grapher/spectrogram_lin.py @@ -26,14 +26,16 @@ from timeside.grapher.spectrogram_log import SpectrogramLog class SpectrogramLinear(SpectrogramLog): + """ Builds a PIL image representing a spectrogram of the audio stream (level vs. frequency vs. time). Adds pixels iteratively thanks to the adapter providing fixed size frame buffers.""" implements(IGrapher) @interfacedoc - def __init__(self, width=1024, height=256, bg_color=(0,0,0), color_scheme='default'): - super(SpectrogramLinear, self).__init__(width, height, bg_color, color_scheme) + def __init__(self, width=1024, height=256, bg_color=(0, 0, 0), color_scheme='default'): + super(SpectrogramLinear, self).__init__( + width, height, bg_color, color_scheme) @staticmethod @interfacedoc @@ -47,7 +49,8 @@ class SpectrogramLinear(SpectrogramLog): @interfacedoc def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None): - super(SpectrogramLinear, self).setup(channels, samplerate, blocksize, totalframes) + super(SpectrogramLinear, self).setup( + channels, samplerate, blocksize, totalframes) def set_scale(self): """generate the lookup which translates y-coordinate to fft-bin""" @@ -57,8 +60,8 @@ class SpectrogramLinear(SpectrogramLog): y_min = f_min y_max = f_max for y in range(self.image_height): - freq = y_min + y / (self.image_height - 1.0) *(y_max - y_min) - fft_bin = freq / f_max * (self.fft_size/2 + 1) - if fft_bin < self.fft_size/2: + freq = y_min + y / (self.image_height - 1.0) * (y_max - y_min) + fft_bin = freq / f_max * (self.fft_size / 2 + 1) + if fft_bin < self.fft_size / 2: alpha = fft_bin - int(fft_bin) self.y_to_bin.append((int(fft_bin), alpha * 255)) diff --git a/timeside/grapher/spectrogram_log.py b/timeside/grapher/spectrogram_log.py index a71dde9..b3a8e96 100644 --- a/timeside/grapher/spectrogram_log.py +++ b/timeside/grapher/spectrogram_log.py @@ -25,14 +25,16 @@ from timeside.grapher.core import * class SpectrogramLog(Grapher): + """ Builds a PIL image representing a spectrogram of the audio stream (level vs. frequency vs. time). Adds pixels iteratively thanks to the adapter providing fixed size frame buffers.""" implements(IGrapher) @interfacedoc - def __init__(self, width=1024, height=256, bg_color=(0,0,0), color_scheme='default'): - super(SpectrogramLog, self).__init__(width, height, bg_color, color_scheme) + def __init__(self, width=1024, height=256, bg_color=(0, 0, 0), color_scheme='default'): + super(SpectrogramLog, self).__init__( + width, height, bg_color, color_scheme) self.lower_freq = 100 self.colors = default_color_schemes[color_scheme]['spectrogram'] self.pixels = [] @@ -50,7 +52,8 @@ class SpectrogramLog(Grapher): @interfacedoc def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None): - super(SpectrogramLog, self).setup(channels, samplerate, blocksize, totalframes) + super(SpectrogramLog, self).setup( + channels, samplerate, blocksize, totalframes) self.image = self.image.convert("P") self.image = self.image.transpose(Image.ROTATE_90) self.image.putpalette(interpolate_colors(self.colors, True)) @@ -64,26 +67,29 @@ class SpectrogramLog(Grapher): y_min = math.log10(f_min) y_max = math.log10(f_max) for y in range(self.image_height): - freq = math.pow(10.0, y_min + y / (self.image_height - 1.0) *(y_max - y_min)) - fft_bin = freq / f_max * (self.fft_size/2 + 1) - if fft_bin < self.fft_size/2: + freq = math.pow( + 10.0, y_min + y / (self.image_height - 1.0) * (y_max - y_min)) + fft_bin = freq / f_max * (self.fft_size / 2 + 1) + if fft_bin < self.fft_size / 2: alpha = fft_bin - int(fft_bin) self.y_to_bin.append((int(fft_bin), alpha * 255)) def draw_spectrum(self, x, spectrum): for (index, alpha) in self.y_to_bin: - self.pixels.append( int( ((255.0-alpha) * spectrum[index] + alpha * spectrum[index + 1] )) ) + self.pixels.append( + int(((255.0 - alpha) * spectrum[index] + alpha * spectrum[index + 1]))) for y in range(len(self.y_to_bin), self.image_height): self.pixels.append(0) @interfacedoc def process(self, frames, eod=False): if len(frames) != 1: - chunk = frames[:,0].copy() - chunk.shape = (len(chunk),1) + chunk = frames[:, 0].copy() + chunk.shape = (len(chunk), 1) for samples, end in self.pixels_adapter.process(chunk, eod): if self.pixel_cursor < self.image_width: - (spectral_centroid, db_spectrum) = self.spectrum.process(samples, True) + (spectral_centroid, db_spectrum) = self.spectrum.process( + samples, True) self.draw_spectrum(self.pixel_cursor, db_spectrum) self.pixel_cursor += 1 return frames, eod @@ -93,4 +99,3 @@ class SpectrogramLog(Grapher): """ Apply last 2D transforms""" self.image.putdata(self.pixels) self.image = self.image.transpose(Image.ROTATE_90) - diff --git a/timeside/grapher/utils.py b/timeside/grapher/utils.py index d09aeb5..e3c525c 100644 --- a/timeside/grapher/utils.py +++ b/timeside/grapher/utils.py @@ -28,7 +28,12 @@ try: from PIL import ImageFilter, ImageChops, Image, ImageDraw, ImageColor, ImageEnhance except ImportError: - import ImageFilter, ImageChops, Image, ImageDraw, ImageColor, ImageEnhance + import ImageFilter + import ImageChops + import Image + import ImageDraw + import ImageColor + import ImageEnhance import numpy @@ -42,14 +47,17 @@ def interpolate_colors(colors, flat=False, num_colors=256): palette = [] for i in range(num_colors): - index = (i * (len(colors) - 1))/(num_colors - 1.0) + index = (i * (len(colors) - 1)) / (num_colors - 1.0) index_int = int(index) alpha = index - float(index_int) if alpha > 0: - r = (1.0 - alpha) * colors[index_int][0] + alpha * colors[index_int + 1][0] - g = (1.0 - alpha) * colors[index_int][1] + alpha * colors[index_int + 1][1] - b = (1.0 - alpha) * colors[index_int][2] + alpha * colors[index_int + 1][2] + r = (1.0 - alpha) * colors[index_int][ + 0] + alpha * colors[index_int + 1][0] + g = (1.0 - alpha) * colors[index_int][ + 1] + alpha * colors[index_int + 1][1] + b = (1.0 - alpha) * colors[index_int][ + 2] + alpha * colors[index_int + 1][2] else: r = (1.0 - alpha) * colors[index_int][0] g = (1.0 - alpha) * colors[index_int][1] @@ -71,7 +79,7 @@ def downsample(vector, factor): if (len(vector) % factor): print "Length of 'vector' is not divisible by 'factor'=%d!" % factor return 0 - vector.shape = (len(vector)/factor, factor) + vector.shape = (len(vector) / factor, factor) return numpy.mean(vector, axis=1) @@ -122,7 +130,8 @@ def smooth(x, window_len=10, window='hanning'): >>> plt.show() # doctest: +SKIP """ - # TODO: the window parameter could be the window itself if an array instead of a string + # TODO: the window parameter could be the window itself if an array + # instead of a string if x.ndim != 1: raise ValueError, "smooth only accepts 1 dimension arrays." @@ -133,15 +142,16 @@ def smooth(x, window_len=10, window='hanning'): if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'" - s = numpy.r_[2*x[0]-x[window_len:1:-1], x, 2*x[-1]-x[-1:-window_len:-1]] + s = numpy.r_[2 * x[0] - x[window_len:1:-1], + x, 2 * x[-1] - x[-1:-window_len:-1]] - if window == 'flat': #moving average - w = numpy.ones(window_len,'d') + if window == 'flat': # moving average + w = numpy.ones(window_len, 'd') else: w = getattr(numpy, window)(window_len) - y = numpy.convolve(w/w.sum(), s, mode='same') - return y[window_len-1:-window_len+1] + y = numpy.convolve(w / w.sum(), s, mode='same') + return y[window_len - 1:-window_len + 1] def reduce_opacity(im, opacity): @@ -157,17 +167,17 @@ def reduce_opacity(im, opacity): return im -def im_watermark(im, inputtext, font=None, color=None, opacity=.6, margin=(30,30)): +def im_watermark(im, inputtext, font=None, color=None, opacity=.6, margin=(30, 30)): """imprints a PIL image with the indicated text in lower-right corner""" if im.mode != "RGBA": im = im.convert("RGBA") - textlayer = Image.new("RGBA", im.size, (0,0,0,0)) + textlayer = Image.new("RGBA", im.size, (0, 0, 0, 0)) textdraw = ImageDraw.Draw(textlayer) textsize = textdraw.textsize(inputtext, font=font) - textpos = [im.size[i]-textsize[i]-margin[i] for i in [0,1]] + textpos = [im.size[i] - textsize[i] - margin[i] for i in [0, 1]] textdraw.text(textpos, inputtext, font=font, fill=color) if opacity != 1: - textlayer = reduce_opacity(textlayer,opacity) + textlayer = reduce_opacity(textlayer, opacity) return Image.composite(textlayer, im, textlayer) @@ -189,7 +199,7 @@ def peaks(samples): def color_from_value(self, value): """ given a value between 0 and 1, return an (r,g,b) tuple """ - return ImageColor.getrgb("hsl(%d,%d%%,%d%%)" % (int( (1.0 - value) * 360 ), 80, 50)) + return ImageColor.getrgb("hsl(%d,%d%%,%d%%)" % (int((1.0 - value) * 360), 80, 50)) def mean(samples): @@ -197,5 +207,5 @@ def mean(samples): def normalize(contour): - contour = contour-min(contour) - return contour/max(contour) + contour = contour - min(contour) + return contour / max(contour) diff --git a/timeside/grapher/waveform_centroid.py b/timeside/grapher/waveform_centroid.py index 6746dfc..dca091a 100644 --- a/timeside/grapher/waveform_centroid.py +++ b/timeside/grapher/waveform_centroid.py @@ -26,14 +26,16 @@ from timeside.grapher.waveform_simple import Waveform class WaveformCentroid(Waveform): + """ Builds a PIL image representing a waveform of the audio stream. Peaks are colored relatively to the spectral centroids of each frame buffer. """ implements(IGrapher) @interfacedoc - def __init__(self, width=1024, height=256, bg_color=(0,0,0), color_scheme='default'): - super(WaveformCentroid, self).__init__(width, height, bg_color, color_scheme) + def __init__(self, width=1024, height=256, bg_color=(0, 0, 0), color_scheme='default'): + super(WaveformCentroid, self).__init__( + width, height, bg_color, color_scheme) self.lower_freq = 200 colors = default_color_schemes[color_scheme]['waveform'] self.color_lookup = interpolate_colors(colors) @@ -50,17 +52,21 @@ class WaveformCentroid(Waveform): @interfacedoc def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None): - super(WaveformCentroid, self).setup(channels, samplerate, blocksize, totalframes) + super(WaveformCentroid, self).setup( + channels, samplerate, blocksize, totalframes) @interfacedoc def process(self, frames, eod=False): if len(frames) != 1: - buffer = frames[:,0].copy() - buffer.shape = (len(buffer),1) + buffer = frames[:, 0].copy() + buffer.shape = (len(buffer), 1) for samples, end in self.pixels_adapter.process(buffer, eod): if self.pixel_cursor < self.image_width: - (spectral_centroid, db_spectrum) = self.spectrum.process(samples, True) - line_color = self.color_lookup[int(spectral_centroid*255.0)] - self.draw_peaks(self.pixel_cursor, peaks(samples), line_color) + (spectral_centroid, db_spectrum) = self.spectrum.process( + samples, True) + line_color = self.color_lookup[ + int(spectral_centroid * 255.0)] + self.draw_peaks( + self.pixel_cursor, peaks(samples), line_color) self.pixel_cursor += 1 return frames, eod diff --git a/timeside/grapher/waveform_contour.py b/timeside/grapher/waveform_contour.py index 6fe2e38..1237dde 100644 --- a/timeside/grapher/waveform_contour.py +++ b/timeside/grapher/waveform_contour.py @@ -26,17 +26,19 @@ from timeside.grapher.waveform_simple import Waveform class WaveformContourBlack(Waveform): + """ Builds a PIL image representing an amplitude coutour (envelop) of the audio stream. """ implements(IGrapher) @interfacedoc - def __init__(self, width=1024, height=256, bg_color=(0,0,0), color_scheme='default'): - super(WaveformContourBlack, self).__init__(width, height, bg_color, color_scheme) + def __init__(self, width=1024, height=256, bg_color=(0, 0, 0), color_scheme='default'): + super(WaveformContourBlack, self).__init__( + width, height, bg_color, color_scheme) self.contour = numpy.zeros(self.image_width) self.ndiv = 4 - self.x = numpy.r_[0:self.image_width-1:1] + self.x = numpy.r_[0:self.image_width - 1:1] self.symetry = True self.color_offset = 160 @@ -52,13 +54,14 @@ class WaveformContourBlack(Waveform): @interfacedoc def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None): - super(WaveformContourBlack, self).setup(channels, samplerate, blocksize, totalframes) + super(WaveformContourBlack, self).setup( + channels, samplerate, blocksize, totalframes) @interfacedoc def process(self, frames, eod=False): if len(frames) != 1: - buffer = frames[:,0].copy() - buffer.shape = (len(buffer),1) + buffer = frames[:, 0].copy() + buffer.shape = (len(buffer), 1) for samples, end in self.pixels_adapter.process(buffer, eod): if self.pixel_cursor < self.image_width: self.contour[self.pixel_cursor] = numpy.max(peaks(samples)) @@ -68,7 +71,6 @@ class WaveformContourBlack(Waveform): return frames, eod - class WaveformContourWhite(WaveformContourBlack): """ Builds a PIL image representing an amplitude coutour (envelop) of the audio stream. @@ -77,8 +79,9 @@ class WaveformContourWhite(WaveformContourBlack): implements(IGrapher) @interfacedoc - def __init__(self, width=1024, height=256, bg_color=(255,255,255), color_scheme='default'): - super(WaveformContourWhite, self).__init__(width, height, bg_color, color_scheme) + def __init__(self, width=1024, height=256, bg_color=(255, 255, 255), color_scheme='default'): + super(WaveformContourWhite, self).__init__( + width, height, bg_color, color_scheme) self.color_offset = 60 @staticmethod diff --git a/timeside/grapher/waveform_simple.py b/timeside/grapher/waveform_simple.py index 6dd7916..c918f8b 100644 --- a/timeside/grapher/waveform_simple.py +++ b/timeside/grapher/waveform_simple.py @@ -25,15 +25,16 @@ from timeside.grapher.core import * class Waveform(Grapher): + """ Builds a PIL image representing a simple waveform of the audio stream. """ implements(IGrapher) @interfacedoc - def __init__(self, width=1024, height=256, bg_color=(255,255,255), color_scheme='default'): + def __init__(self, width=1024, height=256, bg_color=(255, 255, 255), color_scheme='default'): super(Waveform, self).__init__(width, height, bg_color, color_scheme) - self.line_color = (0,0,0) + self.line_color = (0, 0, 0) @staticmethod @interfacedoc @@ -47,22 +48,25 @@ class Waveform(Grapher): @interfacedoc def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None): - super(Waveform, self).setup(channels, samplerate, blocksize, totalframes) + super(Waveform, self).setup( + channels, samplerate, blocksize, totalframes) @interfacedoc def process(self, frames, eod=False): if len(frames) != 1: if len(frames.shape) > 1: - buffer = frames[:,0] + buffer = frames[:, 0] else: buffer = frames - buffer.shape = (len(buffer),1) + buffer.shape = (len(buffer), 1) for samples, end in self.pixels_adapter.process(buffer, eod): - if self.pixel_cursor < self.image_width-1: - self.draw_peaks(self.pixel_cursor, peaks(samples), self.line_color) + if self.pixel_cursor < self.image_width - 1: + self.draw_peaks( + self.pixel_cursor, peaks(samples), self.line_color) self.pixel_cursor += 1 - if self.pixel_cursor == self.image_width-1: - self.draw_peaks(self.pixel_cursor, peaks(samples), self.line_color) + if self.pixel_cursor == self.image_width - 1: + self.draw_peaks( + self.pixel_cursor, peaks(samples), self.line_color) self.pixel_cursor += 1 return frames, eod @@ -70,5 +74,5 @@ class Waveform(Grapher): def post_process(self, output=None): a = 1 for x in range(self.image_width): - self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2])) - + self.pixel[x, self.image_height / 2] = tuple( + map(lambda p: p + a, self.pixel[x, self.image_height / 2])) diff --git a/timeside/grapher/waveform_transparent.py b/timeside/grapher/waveform_transparent.py index 2c19f6e..1c4e9a5 100644 --- a/timeside/grapher/waveform_transparent.py +++ b/timeside/grapher/waveform_transparent.py @@ -26,6 +26,7 @@ from timeside.grapher.waveform_simple import Waveform class WaveformTransparent(Waveform): + """ Builds a PIL image representing a transparent waveform of the audio stream. """ @@ -33,8 +34,9 @@ class WaveformTransparent(Waveform): @interfacedoc def __init__(self, width=1024, height=256, bg_color=None, color_scheme='default'): - super(WaveformTransparent, self).__init__(width, height, bg_color, color_scheme) - self.line_color = (255,255,255) + super(WaveformTransparent, self).__init__( + width, height, bg_color, color_scheme) + self.line_color = (255, 255, 255) @staticmethod @interfacedoc @@ -48,18 +50,21 @@ class WaveformTransparent(Waveform): @interfacedoc def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None): - super(WaveformTransparent, self).setup(channels, samplerate, blocksize, totalframes) + super(WaveformTransparent, self).setup( + channels, samplerate, blocksize, totalframes) @interfacedoc def process(self, frames, eod=False): if len(frames) != 1: - buffer = frames[:,0] - buffer.shape = (len(buffer),1) + buffer = frames[:, 0] + buffer.shape = (len(buffer), 1) for samples, end in self.pixels_adapter.process(buffer, eod): - if self.pixel_cursor < self.image_width-1: - self.draw_peaks_inverted(self.pixel_cursor, peaks(samples), self.line_color) + if self.pixel_cursor < self.image_width - 1: + self.draw_peaks_inverted( + self.pixel_cursor, peaks(samples), self.line_color) self.pixel_cursor += 1 - if self.pixel_cursor == self.image_width-1: - self.draw_peaks_inverted(self.pixel_cursor, peaks(samples), self.line_color) + if self.pixel_cursor == self.image_width - 1: + self.draw_peaks_inverted( + self.pixel_cursor, peaks(samples), self.line_color) self.pixel_cursor += 1 return frames, eod