From: yomguy Date: Sat, 16 Oct 2010 07:45:25 +0000 (+0000) Subject: rearrange waveform objects, fix batch background big bug X-Git-Tag: 0.3.2~100 X-Git-Url: https://git.parisson.com/?a=commitdiff_plain;h=5d48e82fbb9808929676690bc53666a1823f02b6;p=timeside.git rearrange waveform objects, fix batch background big bug --- diff --git a/timeside/grapher/core.py b/timeside/grapher/core.py index 48f46d8..8acd28e 100644 --- a/timeside/grapher/core.py +++ b/timeside/grapher/core.py @@ -28,7 +28,6 @@ import ImageFilter, ImageChops, Image, ImageDraw, ImageColor import numpy from timeside.core import FixedSizeInputAdapter - default_color_schemes = { 'default': { 'waveform': [(50,0,200), (0,220,80), (255,224,0), (255,0,0)], @@ -251,6 +250,117 @@ class WaveformImage(object): self.image.save(filename) +class WaveformImageJoyContour(WaveformImage): + + def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme, ndiv=1, symetry=None): + WaveformImage.__init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme) + self.contour = numpy.zeros(self.image_width) + self.centroids = numpy.zeros(self.image_width) + self.ndiv = ndiv + self.x = numpy.r_[0:self.image_width-1:1] + self.dx1 = self.x[1]-self.x[0] + self.symetry = symetry + + def get_peaks_contour(self, x, peaks, spectral_centroid=None): + self.contour[x] = numpy.max(peaks) + self.centroids[x] = spectral_centroid + + def mean(self, samples): + return numpy.mean(samples) + + def normalize(self, contour): + contour = contour-min(contour) + return contour/max(contour) + + def draw_peaks_contour(self): + contour = self.contour.copy() + + # Smoothing + contour = smooth(contour, window_len=16) + + # Normalize + contour = self.normalize(contour) + + # Scaling + #ratio = numpy.mean(contour)/numpy.sqrt(2) + ratio = 1 + contour = self.normalize(numpy.expm1(contour/ratio))*(1-10**-6) + + # Spline + #contour = cspline1d(contour) + #contour = cspline1d_eval(contour, self.x, dx=self.dx1, x0=self.x[0]) + + if self.symetry: + height = int(self.image_height/2) + else: + height = self.image_height + + # Multicurve rotating + for i in range(0,self.ndiv): + self.previous_x, self.previous_y = None, None + + #bright_color = 255 + bright_color = int(255*(1-float(i)/(self.ndiv*2))) + bright_color = 255-bright_color+160 + #line_color = self.color_lookup[int(self.centroids[j]*255.0)] + line_color = (bright_color,bright_color,bright_color) + + # Linear + #contour = contour*(1.0-float(i)/self.ndiv) + #contour = contour*(1-float(i)/self.ndiv) + + # Cosine + contour = contour*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi + #contour = self.contour*(1-float(i)*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi/self.ndiv) + + # Negative Sine + #contour = contour + ((1-contour)*2/numpy.pi*numpy.arcsin(float(i)/self.ndiv)) + + curve = (height-1)*contour + #curve = contour*(height-2)/2+height/2 + + for x in self.x: + x = int(x) + y = curve[x] + if not x == 0: + if not self.symetry: + self.draw.line([self.previous_x, self.previous_y, x, y], line_color) + self.draw_anti_aliased_pixels(x, y, y, line_color) + else: + self.draw.line([self.previous_x, self.previous_y+height, x, y+height], line_color) + self.draw_anti_aliased_pixels(x, y+height, y+height, line_color) + self.draw.line([self.previous_x, -self.previous_y+height, x, -y+height], line_color) + self.draw_anti_aliased_pixels(x, -y+height, -y+height, line_color) + else: + if not self.symetry: + self.draw.point((x, y), line_color) + else: + self.draw.point((x, y+height), line_color) + self.previous_x, self.previous_y = x, y + + def process(self, frames, eod): + if len(frames) != 1: + buffer = frames[:,0].copy() + buffer.shape = (len(buffer),1) + for samples, end in self.pixels_adapter.process(buffer, eod): + if self.pixel_cursor < self.image_width: + #(spectral_centroid, db_spectrum) = self.spectrum.process(buffer, True) + peaks = self.peaks(samples) + self.get_peaks_contour(self.pixel_cursor, peaks) + self.pixel_cursor += 1 + if eod: + self.draw_peaks_contour() + + def save(self, filename): + """ Apply last 2D transforms and write all pixels to the file. """ + # middle line (0 for none) + a = 1 + for x in range(self.image_width): + self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2])) + #self.image = self.image.transpose(Image.FLIP_TOP_BOTTOM) + self.image.save(filename) + + class WaveformImageSimple(object): """ Builds a PIL image representing a waveform of the audio stream. Adds pixels iteratively thanks to the adapter providing fixed size frame buffers. @@ -283,6 +393,10 @@ class WaveformImageSimple(object): self.frame_cursor = 0 self.pixel_cursor = 0 + def normalize(self, contour): + contour = contour-min(contour) + return contour/max(contour) + def peaks(self, samples): """ Find the minimum and maximum peak of the samples. Returns that pair in the order they were found. @@ -305,7 +419,7 @@ class WaveformImageSimple(object): y1 = self.image_height * 0.5 - peaks[0] * (self.image_height - 4) * 0.5 y2 = self.image_height * 0.5 - peaks[1] * (self.image_height - 4) * 0.5 - if self.previous_y and x < self.image_width-2 and self.pixel_cursor % 2: + if self.previous_y and x < self.image_width-1 and self.pixel_cursor % 2: if y1 < y2: self.draw.line((x, 0, x, y1), self.line_color) self.draw.line((x, self.image_height , x, y2), self.line_color) @@ -317,8 +431,6 @@ class WaveformImageSimple(object): self.previous_x, self.previous_y = x, y1 -# self.draw_anti_aliased_pixels(x, y1, y2, self.line_color) - def draw_anti_aliased_pixels(self, x, y1, y2, color): """ vertical anti-aliasing at y1 and y2 """ @@ -353,14 +465,14 @@ class WaveformImageSimple(object): buffer = frames[:,0].copy() buffer.shape = (len(buffer),1) for samples, end in self.pixels_adapter.process(buffer, eod): - if self.pixel_cursor < self.image_width: + if self.pixel_cursor < self.image_width-1: self.draw_peaks(self.pixel_cursor, self.peaks(samples)) self.pixel_cursor += 1 - if self.pixel_cursor == self.image_width-2: + if self.pixel_cursor == self.image_width-1: self.draw_peaks(self.pixel_cursor, (0, 0)) self.pixel_cursor += 1 - else: - pass +# else: +# pass def save(self, filename): """ Apply last 2D transforms and write all pixels to the file. """ @@ -370,7 +482,7 @@ class WaveformImageSimple(object): for x in range(self.image_width): self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2])) self.image.save(filename) - + class SpectrogramImage(object): """ Builds a PIL image representing a spectrogram of the audio stream (level vs. frequency vs. time). diff --git a/timeside/grapher/waveform_joydiv.py b/timeside/grapher/waveform_joydiv.py index 5bd65cf..575d007 100644 --- a/timeside/grapher/waveform_joydiv.py +++ b/timeside/grapher/waveform_joydiv.py @@ -23,116 +23,6 @@ from timeside.core import Processor, implements, interfacedoc, FixedSizeInputAda from timeside.api import IGrapher from timeside.grapher.core import * -class WaveformImageJoyContour(WaveformImage): - - def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme, ndiv=1, symetry=None): - WaveformImage.__init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme) - self.contour = numpy.zeros(self.image_width) - self.centroids = numpy.zeros(self.image_width) - self.ndiv = ndiv - self.x = numpy.r_[0:self.image_width-1:1] - self.dx1 = self.x[1]-self.x[0] - self.symetry = symetry - - def get_peaks_contour(self, x, peaks, spectral_centroid=None): - self.contour[x] = numpy.max(peaks) - self.centroids[x] = spectral_centroid - - def mean(self, samples): - return numpy.mean(samples) - - def normalize(self, contour): - contour = contour-min(contour) - return contour/max(contour) - - def draw_peaks_contour(self): - contour = self.contour.copy() - - # Smoothing - contour = smooth(contour, window_len=16) - - # Normalize - contour = self.normalize(contour) - - # Scaling - #ratio = numpy.mean(contour)/numpy.sqrt(2) - ratio = 1 - contour = self.normalize(numpy.expm1(contour/ratio))*(1-10**-6) - - # Spline - #contour = cspline1d(contour) - #contour = cspline1d_eval(contour, self.x, dx=self.dx1, x0=self.x[0]) - - if self.symetry: - height = int(self.image_height/2) - else: - height = self.image_height - - # Multicurve rotating - for i in range(0,self.ndiv): - self.previous_x, self.previous_y = None, None - - #bright_color = 255 - bright_color = int(255*(1-float(i)/(self.ndiv*2))) - bright_color = 255-bright_color+160 -# line_color = self.color_lookup[int(self.centroids[j]*255.0)] - line_color = (bright_color,bright_color,bright_color) - - # Linear - #contour = contour*(1.0-float(i)/self.ndiv) - #contour = contour*(1-float(i)/self.ndiv) - - # Cosine - contour = contour*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi - #contour = self.contour*(1-float(i)*numpy.arccos(float(i)/self.ndiv)*2/numpy.pi/self.ndiv) - - # Negative Sine - #contour = contour + ((1-contour)*2/numpy.pi*numpy.arcsin(float(i)/self.ndiv)) - - curve = (height-1)*contour -# curve = contour*(height-2)/2+height/2 - - for x in self.x: - x = int(x) - y = curve[x] - if not x == 0: - if not self.symetry: - self.draw.line([self.previous_x, self.previous_y, x, y], line_color) - self.draw_anti_aliased_pixels(x, y, y, line_color) - else: - self.draw.line([self.previous_x, self.previous_y+height, x, y+height], line_color) - self.draw_anti_aliased_pixels(x, y+height, y+height, line_color) - self.draw.line([self.previous_x, -self.previous_y+height, x, -y+height], line_color) - self.draw_anti_aliased_pixels(x, -y+height, -y+height, line_color) - else: - if not self.symetry: - self.draw.point((x, y), line_color) - else: - self.draw.point((x, y+height), line_color) - self.previous_x, self.previous_y = x, y - - def process(self, frames, eod): - if len(frames) != 1: - buffer = frames[:,0].copy() - buffer.shape = (len(buffer),1) - for samples, end in self.pixels_adapter.process(buffer, eod): - if self.pixel_cursor < self.image_width: - #(spectral_centroid, db_spectrum) = self.spectrum.process(buffer, True) - peaks = self.peaks(samples) - self.get_peaks_contour(self.pixel_cursor, peaks) - self.pixel_cursor += 1 - if eod: - self.draw_peaks_contour() - - def save(self, filename): - """ Apply last 2D transforms and write all pixels to the file. """ - # middle line (0 for none) - a = 1 - for x in range(self.image_width): - self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2])) -# self.image = self.image.transpose(Image.FLIP_TOP_BOTTOM) - self.image.save(filename) - class WaveformJoyDiv(Processor): implements(IGrapher) diff --git a/timeside/tools/logger.py b/timeside/tools/logger.py new file mode 100644 index 0000000..2f68c4d --- /dev/null +++ b/timeside/tools/logger.py @@ -0,0 +1,22 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import logging + +class Logger: + """A logging object""" + + def __init__(self, file): + self.logger = logging.getLogger('myapp') + self.hdlr = logging.FileHandler(file) + self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') + self.hdlr.setFormatter(self.formatter) + self.logger.addHandler(self.hdlr) + self.logger.setLevel(logging.INFO) + + def write_info(self, message): + self.logger.info(message) + + def write_error(self, message): + self.logger.error(message) + diff --git a/timeside/tools/waveform_batch.py b/timeside/tools/waveform_batch.py index b46d6d5..91bc90c 100644 --- a/timeside/tools/waveform_batch.py +++ b/timeside/tools/waveform_batch.py @@ -54,8 +54,8 @@ class GrapherScheme: class Media2Waveform(object): def __init__(self, media_dir, img_dir): - self.root_dir = media_dir - self.img_dir = img_dir + self.root_dir = os.path.join(os.path.dirname(__file__), media_dir) + self.img_dir = os.path.join(os.path.dirname(__file__), img_dir) self.scheme = GrapherScheme() self.width = self.scheme.width self.height = self.scheme.height diff --git a/timeside/tools/waveform_batch_awdio.py b/timeside/tools/waveform_batch_awdio.py index 6cc97f9..37a6406 100644 --- a/timeside/tools/waveform_batch_awdio.py +++ b/timeside/tools/waveform_batch_awdio.py @@ -25,6 +25,7 @@ version = '0.2' import os import sys import timeside +from logger import * class GrapherScheme: @@ -53,23 +54,24 @@ class GrapherScheme: self.force = True -class Media2Waveform(object): +class Media2Waveform: - def __init__(self, media_dir, img_dir): - self.root_dir = media_dir - self.img_dir = img_dir + def __init__(self, media_dir, img_dir, log_file): + self.root_dir = os.path.join(os.path.dirname(__file__), media_dir) + self.img_dir = os.path.join(os.path.dirname(__file__), img_dir) self.scheme = GrapherScheme() self.width = self.scheme.width self.height = self.scheme.height self.bg_color = self.scheme.bg_color self.color_scheme = self.scheme.color_scheme self.force = self.scheme.force - + self.logger = Logger(log_file) + self.media_list = self.get_media_list() if not os.path.exists(self.img_dir): os.makedirs(self.img_dir) self.path_dict = self.get_path_dict() - + def get_media_list(self): media_list = [] for root, dirs, files in os.walk(self.root_dir): @@ -84,30 +86,34 @@ class Media2Waveform(object): for media in self.media_list: filename = media.split(os.sep)[-1] name, ext = os.path.splitext(filename) - path_dict[media] = self.img_dir + os.sep + filename.replace('.', '_') + '.png' + path_dict[media] = self.img_dir + os.sep + name + '.png' return path_dict def process(self): - for source, image in self.path_dict.iteritems(): + for audio, image in self.path_dict.iteritems(): if not os.path.exists(image) or self.force: - print 'Processing ', source - audio = os.path.join(os.path.dirname(__file__), source) - decoder = timeside.decoder.FileDecoder(audio) - analyzer = timeside.analyzer.Duration() - waveform = timeside.grapher.WaveformAwdio(width=self.width, height=self.height, - bg_color=self.bg_color, color_scheme=self.color_scheme) - (decoder | analyzer | waveform).run() - duration = analyzer.result() - img_name = os.path.split(image)[1] - image = os.path.split(image)[0]+os.sep+os.path.splitext(img_name)[0] + '_' +\ - '_'.join([str(self.width), str(self.height), str(int(duration))])+os.path.splitext(img_name)[1] - waveform.graph.filename = image - print 'Rendering ', source, ' to ', waveform.graph.filename, '...' - print 'frames per pixel = ', waveform.graph.samples_per_pixel + mess = 'Processing ' + audio + self.logger.write_info(mess) + pipe = PipeWaveform() + waveform = pipe.process(audio, self.width, self.height, self.bg_color, self.color_scheme) if os.path.exists(image): os.remove(image) + mess = 'Rendering ' + image + self.logger.write_info(mess) waveform.render(output=image) - + mess = 'frames per pixel = ' + str(waveform.graph.samples_per_pixel) + self.logger.write_info(mess) + + +class PipeWaveform: + + def process(self, audio, width, height, bg_color, color_scheme): + decoder = timeside.decoder.FileDecoder(audio) + waveform = timeside.grapher.WaveformAwdio(width=width, height=height, + bg_color=bg_color, color_scheme=color_scheme) + (decoder | waveform).run() + return waveform + if __name__ == '__main__': if len(sys.argv) <= 2: @@ -118,7 +124,8 @@ if __name__ == '__main__': See http://code.google.com/p/timeside/ for more information. """ else: - media_dir = sys.argv[-2] - img_dir = sys.argv[-1] - m = Media2Waveform(media_dir, img_dir) + media_dir = sys.argv[-3] + img_dir = sys.argv[-2] + log_file = sys.argv[-1] + m = Media2Waveform(media_dir, img_dir, log_file) m.process()