"Test simple stream with two channels"
adapter = FixedSizeInputAdapter(4, 2)
- self.assertEquals(len(self.data), adapter.nframes(len(self.data)))
+ self.assertEquals(len(self.data), adapter.totalframes(len(self.data)))
self.assertIOEquals(adapter, self.data[0:1], False, [])
self.assertIOEquals(adapter, self.data[1:5], False, [self.data[0:4]], False)
"Test automatic padding support"
adapter = FixedSizeInputAdapter(4, 2, pad=True)
- self.assertEquals(len(self.data) + 2, adapter.nframes(len(self.data)))
+ self.assertEquals(len(self.data) + 2, adapter.totalframes(len(self.data)))
self.assertIOEquals(adapter, self.data[0:21], False,
[self.data[0:4], self.data[4:8], self.data[8:12], self.data[12:16], self.data[16:20]],
self.len = 0
self.pad = pad
- def blocksize(self, input_blocksize):
+ def totalframes(self, input_totalframes):
"""Return the total number of frames that this adapter will output according to the
- input_blocksize argument"""
+ input_totalframes argument"""
- blocksize = input_blocksize
+ totalframes = input_totalframes
if self.pad:
- mod = input_blocksize % self.buffer_size
+ mod = input_totalframes % self.buffer_size
if mod:
- blocksize += self.buffer_size - mod
+ totalframes += self.buffer_size - mod
- return blocksize
+ return totalframes
- def totalframes(self, input_totalframes):
+ def blocksize(self, blocksize):
"""Return the total number of frames that this adapter will output according to the
input_blocksize argument"""
- return input_totalframes
+ return blocksize
def process(self, frames, eod):
"""Returns an iterator over tuples of the form (buffer, eod) where buffer is a
# setup/reset processors and configure channels and samplerate throughout the pipe
source.setup()
#FIXME: wait for decoder mainloop
- time.sleep(0.1)
+ time.sleep(0.2)
last = source
for item in items:
Adds pixels iteratively thanks to the adapter providing fixed size frame buffers.
Peaks are colored relative to the spectral centroids of each frame packet. """
- def __init__(self, image_width, image_height, totalframes, samplerate, fft_size, bg_color, color_scheme):
+ def __init__(self, image_width, image_height, nframes, samplerate,
+ fft_size, bg_color, color_scheme):
self.image_width = image_width
self.image_height = image_height
- self.nframes = totalframes
+ self.nframes = nframes
self.samplerate = samplerate
self.fft_size = fft_size
self.bg_color = bg_color
""" Find the minimum and maximum peak of the samples.
Returns that pair in the order they were found.
So if min was found first, it returns (min, max) else the other way around. """
-
max_index = numpy.argmax(samples)
max_value = samples[max_index]
self.samples_per_pixel = self.nframes / float(self.image_width)
self.buffer_size = int(round(self.samples_per_pixel, 0))
self.pixels_adapter = FixedSizeInputAdapter(self.buffer_size, 1, pad=False)
- self.pixels_adapter_nframes = self.pixels_adapter.nframes(self.nframes)
+ self.pixels_adapter_nframes = self.pixels_adapter.totalframes(self.nframes)
self.image = Image.new("RGBA", (self.image_width, self.image_height))
self.pixel = self.image.load()
self.samples_per_pixel = self.nframes / float(self.image_width)
self.buffer_size = int(round(self.samples_per_pixel, 0))
self.pixels_adapter = FixedSizeInputAdapter(self.buffer_size, 1, pad=False)
- self.pixels_adapter_nframes = self.pixels_adapter.nframes(self.nframes)
+ self.pixels_adapter_nframes = self.pixels_adapter.totalframes(self.nframes)
self.lower = 100
self.higher = 22050
self.color_scheme = scheme
@interfacedoc
- def setup(self, channels=None, samplerate=None, nframes=None):
- super(Spectrogram, self).setup(channels, samplerate, nframes)
- self.graph = SpectrogramImage(self.width, self.height, self.nframes(), self.samplerate(), self.FFT_SIZE,
+ def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None):
+ super(Spectrogram, self).setup(channels, samplerate, blocksize, totalframes)
+ self.graph = SpectrogramImage(self.width, self.height, totalframes,
+ self.samplerate(), self.FFT_SIZE,
bg_color=self.bg_color, color_scheme=self.color_scheme)
@interfacedoc
if output:
self.graph.save(output)
return self.graph.image
-
+
def watermark(self, text, font=None, color=(255, 255, 255), opacity=.6, margin=(5,5)):
self.graph.watermark(text, color=color, opacity=0.9, margin=margin)
self.color_scheme = scheme
@interfacedoc
- def setup(self, channels=None, samplerate=None, nframes=None):
- super(WaveformContourBlack, self).setup(channels, samplerate, nframes)
- self.graph = WaveformImageJoyContour(self.width, self.height, self.nframes(),
+ def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None):
+ super(WaveformContourBlack, self).setup(channels, samplerate, blocksize, totalframes)
+ self.graph = WaveformImageJoyContour(self.width, self.height, totalframes,
self.samplerate(), self.FFT_SIZE,
bg_color=self.bg_color,
color_scheme=self.color_scheme,
self.color_scheme = scheme
@interfacedoc
- def setup(self, channels=None, samplerate=None, nframes=None):
- super(WaveformContourWhite, self).setup(channels, samplerate, nframes)
- self.graph = WaveformImageJoyContour(self.width, self.height, self.nframes(),
+ def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None):
+ super(WaveformContourWhite, self).setup(channels, samplerate, blocksize, totalframes)
+ self.graph = WaveformImageJoyContour(self.width, self.height, totalframes,
self.samplerate(), self.FFT_SIZE,
bg_color=self.bg_color,
color_scheme=self.color_scheme,
self.color_scheme = scheme
@interfacedoc
- def setup(self, channels=None, samplerate=None, nframes=None):
- super(WaveformAwdio, self).setup(channels, samplerate, nframes)
- self.graph = WaveformImageSimple(self.width, self.height, self.nframes(),
+ def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None):
+ super(WaveformAwdio, self).setup(channels, samplerate, blocksize, totalframes)
+ self.graph = WaveformImageSimple(self.width, self.height, self.totalframes(),
self.samplerate(), self.FFT_SIZE,
bg_color=self.bg_color,
color_scheme=self.color_scheme)