From 5019de412df1b436862bd5a8bb8cb766de858f34 Mon Sep 17 00:00:00 2001 From: yomguy Date: Thu, 27 Sep 2012 00:03:33 +0200 Subject: [PATCH] fix adapter and all graphers --- tests/testinputadapter.py | 4 ++-- timeside/core.py | 18 +++++++++--------- timeside/grapher/core.py | 10 +++++----- timeside/grapher/spectrogram.py | 9 +++++---- timeside/grapher/waveform_contour_bk.py | 6 +++--- timeside/grapher/waveform_contour_wh.py | 6 +++--- timeside/grapher/waveform_simple.py | 6 +++--- 7 files changed, 30 insertions(+), 29 deletions(-) diff --git a/tests/testinputadapter.py b/tests/testinputadapter.py index 9dd400f..5879955 100644 --- a/tests/testinputadapter.py +++ b/tests/testinputadapter.py @@ -27,7 +27,7 @@ class TestFixedSizeInputAdapter(TestCase): "Test simple stream with two channels" adapter = FixedSizeInputAdapter(4, 2) - self.assertEquals(len(self.data), adapter.nframes(len(self.data))) + self.assertEquals(len(self.data), adapter.totalframes(len(self.data))) self.assertIOEquals(adapter, self.data[0:1], False, []) self.assertIOEquals(adapter, self.data[1:5], False, [self.data[0:4]], False) @@ -43,7 +43,7 @@ class TestFixedSizeInputAdapter(TestCase): "Test automatic padding support" adapter = FixedSizeInputAdapter(4, 2, pad=True) - self.assertEquals(len(self.data) + 2, adapter.nframes(len(self.data))) + self.assertEquals(len(self.data) + 2, adapter.totalframes(len(self.data))) self.assertIOEquals(adapter, self.data[0:21], False, [self.data[0:4], self.data[4:8], self.data[8:12], self.data[12:16], self.data[16:20]], diff --git a/timeside/core.py b/timeside/core.py index 3d02115..b183f0b 100644 --- a/timeside/core.py +++ b/timeside/core.py @@ -112,23 +112,23 @@ class FixedSizeInputAdapter(object): self.len = 0 self.pad = pad - def blocksize(self, input_blocksize): + def totalframes(self, input_totalframes): """Return the total number of frames that this adapter will output according to the - input_blocksize argument""" + input_totalframes argument""" - blocksize = input_blocksize + totalframes = input_totalframes if self.pad: - mod = input_blocksize % self.buffer_size + mod = input_totalframes % self.buffer_size if mod: - blocksize += self.buffer_size - mod + totalframes += self.buffer_size - mod - return blocksize + return totalframes - def totalframes(self, input_totalframes): + def blocksize(self, blocksize): """Return the total number of frames that this adapter will output according to the input_blocksize argument""" - return input_totalframes + return blocksize def process(self, frames, eod): """Returns an iterator over tuples of the form (buffer, eod) where buffer is a @@ -216,7 +216,7 @@ class ProcessPipe(object): # setup/reset processors and configure channels and samplerate throughout the pipe source.setup() #FIXME: wait for decoder mainloop - time.sleep(0.1) + time.sleep(0.2) last = source for item in items: diff --git a/timeside/grapher/core.py b/timeside/grapher/core.py index 8082f72..14811aa 100644 --- a/timeside/grapher/core.py +++ b/timeside/grapher/core.py @@ -135,10 +135,11 @@ class WaveformImage(object): Adds pixels iteratively thanks to the adapter providing fixed size frame buffers. Peaks are colored relative to the spectral centroids of each frame packet. """ - def __init__(self, image_width, image_height, totalframes, samplerate, fft_size, bg_color, color_scheme): + def __init__(self, image_width, image_height, nframes, samplerate, + fft_size, bg_color, color_scheme): self.image_width = image_width self.image_height = image_height - self.nframes = totalframes + self.nframes = nframes self.samplerate = samplerate self.fft_size = fft_size self.bg_color = bg_color @@ -171,7 +172,6 @@ class WaveformImage(object): """ Find the minimum and maximum peak of the samples. Returns that pair in the order they were found. So if min was found first, it returns (min, max) else the other way around. """ - max_index = numpy.argmax(samples) max_value = samples[max_index] @@ -402,7 +402,7 @@ class WaveformImageSimple(object): self.samples_per_pixel = self.nframes / float(self.image_width) self.buffer_size = int(round(self.samples_per_pixel, 0)) self.pixels_adapter = FixedSizeInputAdapter(self.buffer_size, 1, pad=False) - self.pixels_adapter_nframes = self.pixels_adapter.nframes(self.nframes) + self.pixels_adapter_nframes = self.pixels_adapter.totalframes(self.nframes) self.image = Image.new("RGBA", (self.image_width, self.image_height)) self.pixel = self.image.load() @@ -503,7 +503,7 @@ class SpectrogramImage(object): self.samples_per_pixel = self.nframes / float(self.image_width) self.buffer_size = int(round(self.samples_per_pixel, 0)) self.pixels_adapter = FixedSizeInputAdapter(self.buffer_size, 1, pad=False) - self.pixels_adapter_nframes = self.pixels_adapter.nframes(self.nframes) + self.pixels_adapter_nframes = self.pixels_adapter.totalframes(self.nframes) self.lower = 100 self.higher = 22050 diff --git a/timeside/grapher/spectrogram.py b/timeside/grapher/spectrogram.py index 6ad66ff..8a01666 100644 --- a/timeside/grapher/spectrogram.py +++ b/timeside/grapher/spectrogram.py @@ -53,9 +53,10 @@ class Spectrogram(Processor): self.color_scheme = scheme @interfacedoc - def setup(self, channels=None, samplerate=None, nframes=None): - super(Spectrogram, self).setup(channels, samplerate, nframes) - self.graph = SpectrogramImage(self.width, self.height, self.nframes(), self.samplerate(), self.FFT_SIZE, + def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None): + super(Spectrogram, self).setup(channels, samplerate, blocksize, totalframes) + self.graph = SpectrogramImage(self.width, self.height, totalframes, + self.samplerate(), self.FFT_SIZE, bg_color=self.bg_color, color_scheme=self.color_scheme) @interfacedoc @@ -68,7 +69,7 @@ class Spectrogram(Processor): if output: self.graph.save(output) return self.graph.image - + def watermark(self, text, font=None, color=(255, 255, 255), opacity=.6, margin=(5,5)): self.graph.watermark(text, color=color, opacity=0.9, margin=margin) diff --git a/timeside/grapher/waveform_contour_bk.py b/timeside/grapher/waveform_contour_bk.py index 072511a..6c16707 100644 --- a/timeside/grapher/waveform_contour_bk.py +++ b/timeside/grapher/waveform_contour_bk.py @@ -55,9 +55,9 @@ class WaveformContourBlack(Processor): self.color_scheme = scheme @interfacedoc - def setup(self, channels=None, samplerate=None, nframes=None): - super(WaveformContourBlack, self).setup(channels, samplerate, nframes) - self.graph = WaveformImageJoyContour(self.width, self.height, self.nframes(), + def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None): + super(WaveformContourBlack, self).setup(channels, samplerate, blocksize, totalframes) + self.graph = WaveformImageJoyContour(self.width, self.height, totalframes, self.samplerate(), self.FFT_SIZE, bg_color=self.bg_color, color_scheme=self.color_scheme, diff --git a/timeside/grapher/waveform_contour_wh.py b/timeside/grapher/waveform_contour_wh.py index a49ac6c..c4681b4 100644 --- a/timeside/grapher/waveform_contour_wh.py +++ b/timeside/grapher/waveform_contour_wh.py @@ -55,9 +55,9 @@ class WaveformContourWhite(Processor): self.color_scheme = scheme @interfacedoc - def setup(self, channels=None, samplerate=None, nframes=None): - super(WaveformContourWhite, self).setup(channels, samplerate, nframes) - self.graph = WaveformImageJoyContour(self.width, self.height, self.nframes(), + def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None): + super(WaveformContourWhite, self).setup(channels, samplerate, blocksize, totalframes) + self.graph = WaveformImageJoyContour(self.width, self.height, totalframes, self.samplerate(), self.FFT_SIZE, bg_color=self.bg_color, color_scheme=self.color_scheme, diff --git a/timeside/grapher/waveform_simple.py b/timeside/grapher/waveform_simple.py index 2dd5e3e..9d3cef8 100644 --- a/timeside/grapher/waveform_simple.py +++ b/timeside/grapher/waveform_simple.py @@ -52,9 +52,9 @@ class WaveformAwdio(Processor): self.color_scheme = scheme @interfacedoc - def setup(self, channels=None, samplerate=None, nframes=None): - super(WaveformAwdio, self).setup(channels, samplerate, nframes) - self.graph = WaveformImageSimple(self.width, self.height, self.nframes(), + def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None): + super(WaveformAwdio, self).setup(channels, samplerate, blocksize, totalframes) + self.graph = WaveformImageSimple(self.width, self.height, self.totalframes(), self.samplerate(), self.FFT_SIZE, bg_color=self.bg_color, color_scheme=self.color_scheme) -- 2.39.5