From: yomguy Date: Fri, 9 Oct 2009 13:04:24 +0000 (+0000) Subject: grapher testing X-Git-Tag: 0.3.2~237 X-Git-Url: https://git.parisson.com/?a=commitdiff_plain;h=609d6ee81ef4fa2b00fbf57dbf6903059adbaf2c;p=timeside.git grapher testing --- diff --git a/analyze/core.py b/analyze/core.py index 9c2a2ce..fdbcb41 100644 --- a/analyze/core.py +++ b/analyze/core.py @@ -27,7 +27,7 @@ import numpy import scikits.audiolab as audiolab class AudioProcessor(Component): - + def __init__(self): self.fft_size = 2048 self.window_function = numpy.ones @@ -40,7 +40,7 @@ class AudioProcessor(Component): self.clip = lambda val, low, high: min(high, max(low, val)) def pre_process(self, media_item): - wav_file = media_item.file.path + wav_file = media_item self.audio_file = audiolab.sndfile(wav_file, 'read') self.frames = self.audio_file.get_nframes() self.samplerate = self.audio_file.get_samplerate() @@ -51,7 +51,7 @@ class AudioProcessor(Component): def get_samples(self): samples = self.audio_file.read_frames(self.frames) return samples - + def get_mono_samples(self): # convert to mono by selecting left channel only samples = self.get_samples() @@ -63,11 +63,11 @@ class AudioProcessor(Component): def read(self, start, size, resize_if_less=False): """ read size samples starting at start, if resize_if_less is True and less than size samples are read, resize the array to size and fill with zeros """ - + # number of zeros to add to start and end of the buffer add_to_start = 0 add_to_end = 0 - + if start < 0: # the first FFT window starts centered around zero if size + start <= 0: @@ -86,12 +86,12 @@ class AudioProcessor(Component): to_read = self.frames else: self.audio_file.seek(start) - + to_read = size if start + to_read >= self.frames: to_read = self.frames - start add_to_end = size - to_read - + try: samples = self.audio_file.read_frames(to_read) except IOError: @@ -108,41 +108,41 @@ class AudioProcessor(Component): if resize_if_less and (add_to_start > 0 or add_to_end > 0): if add_to_start > 0: samples = numpy.concatenate((numpy.zeros(add_to_start), samples), axis=1) - + if add_to_end > 0: samples = numpy.resize(samples, size) samples[size - add_to_end:] = 0 - + return samples def spectral_centroid(self, seek_point, spec_range=120.0): """ starting at seek_point read fft_size samples, and calculate the spectral centroid """ - + samples = self.read(seek_point - self.fft_size/2, self.fft_size, True) samples *= self.window fft = numpy.fft.fft(samples) spectrum = numpy.abs(fft[:fft.shape[0] / 2 + 1]) / float(self.fft_size) # normalized abs(FFT) between 0 and 1 length = numpy.float64(spectrum.shape[0]) - + # scale the db spectrum from [- spec_range db ... 0 db] > [0..1] db_spectrum = ((20*(numpy.log10(spectrum + 1e-30))).clip(-spec_range, 0.0) + spec_range)/spec_range - + energy = spectrum.sum() spectral_centroid = 0 - + if energy > 1e-20: # calculate the spectral centroid - + if self.spectrum_range == None: self.spectrum_range = numpy.arange(length) - + spectral_centroid = (spectrum * self.spectrum_range).sum() / (energy * (length - 1)) * self.samplerate * 0.5 - + # clip > log10 > scale between 0 and 1 spectral_centroid = (math.log10(self.clip(spectral_centroid, self.lower, self.higher)) - self.lower_log) / (self.higher_log - self.lower_log) - + return (spectral_centroid, db_spectrum) @@ -150,42 +150,42 @@ class AudioProcessor(Component): """ read all samples between start_seek and end_seek, then find the minimum and maximum peak in that range. Returns that pair in the order they were found. So if min was found first, it returns (min, max) else the other way around. """ - + # larger blocksizes are faster but take more mem... # Aha, Watson, a clue, a tradeof! block_size = 4096 - + max_index = -1 max_value = -1 min_index = -1 min_value = 1 - + if end_seek > self.frames: end_seek = self.frames - + if block_size > end_seek - start_seek: block_size = end_seek - start_seek - + if block_size <= 1: samples = self.read(start_seek, 1) return samples[0], samples[0] elif block_size == 2: samples = self.read(start_seek, True) return samples[0], samples[1] - + for i in range(start_seek, end_seek, block_size): samples = self.read(i, block_size) - + local_max_index = numpy.argmax(samples) local_max_value = samples[local_max_index] - + if local_max_value > max_value: max_value = local_max_value max_index = local_max_index - + local_min_index = numpy.argmin(samples) local_min_value = samples[local_min_index] - + if local_min_value < min_value: min_value = local_min_value min_index = local_min_index @@ -196,4 +196,4 @@ class AudioProcessor(Component): return (max_value, min_value) - + diff --git a/graph/spectrogram_audiolab.py b/graph/spectrogram_audiolab.py index d561980..9687a25 100644 --- a/graph/spectrogram_audiolab.py +++ b/graph/spectrogram_audiolab.py @@ -45,7 +45,7 @@ class SpectrogramGrapherAudiolab(Component): def render(self, media_item, width=None, height=None, options=None): """Generator that streams the spectrogram as a PNG image with a python method""" - wav_file = media_item.file.path + wav_file = media_item pngFile = NamedTemporaryFile(suffix='.png') if not width == None: diff --git a/graph/waveform_audiolab.py b/graph/waveform_audiolab.py index f9b9201..8a7dd0e 100644 --- a/graph/waveform_audiolab.py +++ b/graph/waveform_audiolab.py @@ -45,7 +45,7 @@ class WaveFormGrapherAudiolab(Component): def render(self, media_item, width=None, height=None, options=None): """Generator that streams the waveform as a PNG image with a python method""" - wav_file = media_item.file.path + wav_file = media_item pngFile = NamedTemporaryFile(suffix='.png') if not width == None: diff --git a/tests/samples/sweep.wav b/tests/samples/sweep.wav new file mode 100644 index 0000000..9c2febe Binary files /dev/null and b/tests/samples/sweep.wav differ diff --git a/tests/test.py b/tests/test.py index cc1f6b4..d069edd 100755 --- a/tests/test.py +++ b/tests/test.py @@ -4,10 +4,11 @@ import timeside from timeside.core import Component, ExtensionPoint, ComponentManager + class TestAnalyzers(Component): analyzers = ExtensionPoint(timeside.analyze.IAnalyzer) - def run(self): + def list(self): analyzers = [] for analyzer in self.analyzers: analyzers.append({'name':analyzer.name(), @@ -16,10 +17,17 @@ class TestAnalyzers(Component): }) print analyzers + def run(self, media): + print '\n=== Analyzer testing ===\n' + for analyzer in self.analyzers: + id = analyzer.id() + value = analyzer.render(media) + print id + ' = ' + str(value) + ' ' + analyzer.unit() + class TestDecoders(Component): decoders = ExtensionPoint(timeside.decode.IDecoder) - def run(self): + def list(self): decoders = [] for decoder in self.decoders: decoders.append({'format':decoder.format(), @@ -31,7 +39,7 @@ class TestDecoders(Component): class TestEncoders(Component): encoders = ExtensionPoint(timeside.encode.IEncoder) - def run(self): + def list(self): encoders = [] for encoder in self.encoders: encoders.append({'format':encoder.format(), @@ -42,7 +50,7 @@ class TestEncoders(Component): class TestGraphers(Component): graphers = ExtensionPoint(timeside.graph.IGrapher) - def run(self): + def list(self): graphers = [] for grapher in self.graphers: graphers.append({'id':grapher.id(), @@ -50,14 +58,32 @@ class TestGraphers(Component): }) print graphers + def run(self, media): + print '\n=== Grapher testing ===\n' + for grapher in self.graphers: + id = grapher.id() + image = grapher.render(media) + file_path = 'results/'+id+'.png' + file = open(file_path, 'w') + for chunk in image: + file.write(chunk) + print 'Image exported to :' + file_path + file.close() + if __name__ == '__main__': + sample = 'samples/sweep.wav' comp_mgr = ComponentManager() a = TestAnalyzers(comp_mgr) d = TestDecoders(comp_mgr) e = TestEncoders(comp_mgr) g = TestGraphers(comp_mgr) - a.run() - d.run() - e.run() - g.run() + a.list() + d.list() + e.list() + g.list() + a.run(sample) + #d.run() + #e.run() + g.run(sample) +