"Test simple stream with two channels"
adapter = FixedSizeInputAdapter(4, 2)
- self.assertEquals(len(self.data), adapter.totalframes(len(self.data)))
+ self.assertEquals(len(self.data), adapter.blocksize(len(self.data)))
self.assertIOEquals(adapter, self.data[0:1], False, [])
self.assertIOEquals(adapter, self.data[1:5], False, [self.data[0:4]], False)
"Test automatic padding support"
adapter = FixedSizeInputAdapter(4, 2, pad=True)
- self.assertEquals(len(self.data) + 2, adapter.totalframes(len(self.data)))
+ self.assertEquals(len(self.data) + 2, adapter.blocksize(len(self.data)))
- self.assertIOEquals(adapter, self.data[0:21], False,
- [self.data[0:4], self.data[4:8], self.data[8:12], self.data[12:16], self.data[16:20]],
+ self.assertIOEquals(adapter, self.data[0:21], False,
+ [self.data[0:4], self.data[4:8], self.data[8:12], self.data[12:16], self.data[16:20]],
False)
self.assertIOEquals(adapter, self.data[21:22], True, [[
"Test a stream which contain a multiple number of buffers"
adapter = FixedSizeInputAdapter(4, 2)
- self.assertIOEquals(adapter, self.data[0:20], True,
- [self.data[0:4], self.data[4:8], self.data[8:12], self.data[12:16], self.data[16:20]],
+ self.assertIOEquals(adapter, self.data[0:20], True,
+ [self.data[0:4], self.data[4:8], self.data[8:12], self.data[12:16], self.data[16:20]],
True)
self.len = 0
self.pad = pad
- def totalframes(self, input_totalframes):
+ def blocksize(self, input_totalframes):
"""Return the total number of frames that this adapter will output according to the
input_totalframes argument"""
- totalframes = input_totalframes
+ blocksize = input_totalframes
if self.pad:
mod = input_totalframes % self.buffer_size
if mod:
- totalframes += self.buffer_size - mod
-
- return totalframes
-
- def blocksize(self, blocksize):
- """Return the total number of frames that this adapter will output according to the
- input_blocksize argument"""
+ blocksize += self.buffer_size - mod
return blocksize
# setup/reset processors and configure channels and samplerate throughout the pipe
source.setup()
#FIXME: wait for decoder mainloop
- time.sleep(0.2)
+ time.sleep(0.1)
last = source
for item in items:
samplerate = last.samplerate(),
blocksize = last.blocksize(),
totalframes = last.totalframes())
+
last = item
# now stream audio data along the pipe
self.samples_per_pixel = self.nframes / float(self.image_width)
self.buffer_size = int(round(self.samples_per_pixel, 0))
self.pixels_adapter = FixedSizeInputAdapter(self.buffer_size, 1, pad=False)
- self.pixels_adapter_nframes = self.pixels_adapter.totalframes(self.nframes)
+ self.pixels_adapter_nframes = self.pixels_adapter.blocksize(self.nframes)
self.lower = 800
self.higher = 12000
self.samples_per_pixel = self.nframes / float(self.image_width)
self.buffer_size = int(round(self.samples_per_pixel, 0))
self.pixels_adapter = FixedSizeInputAdapter(self.buffer_size, 1, pad=False)
- self.pixels_adapter_nframes = self.pixels_adapter.totalframes(self.nframes)
+ self.pixels_adapter_nframes = self.pixels_adapter.blocksize(self.nframes)
+ print self.pixels_adapter_nframes
self.image = Image.new("RGBA", (self.image_width, self.image_height))
self.pixel = self.image.load()
self.samples_per_pixel = self.nframes / float(self.image_width)
self.buffer_size = int(round(self.samples_per_pixel, 0))
self.pixels_adapter = FixedSizeInputAdapter(self.buffer_size, 1, pad=False)
- self.pixels_adapter_nframes = self.pixels_adapter.totalframes(self.nframes)
+ self.pixels_adapter_nframes = self.pixels_adapter.blocksize(self.nframes)
self.lower = 100
self.higher = 22050
@interfacedoc
def setup(self, channels=None, samplerate=None, blocksize=None, totalframes=None):
super(Waveform, self).setup(channels, samplerate, blocksize, totalframes)
- self.graph = WaveformImage(self.width, self.height, self.totalframes(),
+ self.graph = WaveformImage(self.width, self.height, totalframes,
self.samplerate(), self.FFT_SIZE,
bg_color=self.bg_color,
color_scheme=self.color_scheme)