Say 'YES' to all questions. Then::
$ sudo aptitude install -t lenny-backports vim subversion python python-setuptools python-xml python-mutagen \
- python-imaging python-numpy python-gst0.10 gstreamer0.10-plugins-base \
- gstreamer0.10-fluendo-mp3 gstreamer0.10-plugins-good gstreamer0.10-plugins-bad
+ python-imaging python-numpy python-gst0.10 gstreamer0.10-plugins-base \
+ gstreamer0.10-fluendo-mp3 gstreamer0.10-plugins-good gstreamer0.10-plugins-bad
2. Install TimeSide
self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2]))
self.image.save(filename)
+ def release(self):
+ self.pixel = 0
+ self.image = 0
+ self.draw = 0
+ self.spectrum = 0
class WaveformImageJoyContour(WaveformImage):
self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2]))
#self.image = self.image.transpose(Image.FLIP_TOP_BOTTOM)
self.image.save(filename)
-
-
+
+
class WaveformImageSimple(WaveformImage):
""" Builds a PIL image representing a waveform of the audio stream.
Adds pixels iteratively thanks to the adapter providing fixed size frame buffers.
Peaks are colored relative to the spectral centroids of each frame packet. """
def __init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme):
- WaveformImage.__init__(self, image_width, image_height, nframes, samplerate, fft_size, bg_color, color_scheme)
+ self.image_width = image_width
+ self.image_height = image_height
+ self.nframes = nframes
+ self.samplerate = samplerate
+ self.fft_size = fft_size
+ self.bg_color = bg_color
+ self.color_scheme = color_scheme
+
if isinstance(color_scheme, dict):
colors = color_scheme['waveform']
else:
colors = default_color_schemes[color_scheme]['waveform']
self.line_color = colors[0]
+
+ self.samples_per_pixel = self.nframes / float(self.image_width)
+ self.buffer_size = int(round(self.samples_per_pixel, 0))
+ self.pixels_adapter = FixedSizeInputAdapter(self.buffer_size, 1, pad=False)
+ self.pixels_adapter_nframes = self.pixels_adapter.nframes(self.nframes)
+
self.image = Image.new("RGBA", (self.image_width, self.image_height))
self.pixel = self.image.load()
self.draw = ImageDraw.Draw(self.image)
-
+ self.previous_x, self.previous_y = None, None
+ self.frame_cursor = 0
+ self.pixel_cursor = 0
+
def normalize(self, contour):
contour = contour-min(contour)
return contour/max(contour)
def process(self, frames, eod):
if len(frames) != 1:
- buffer = frames[:,0].copy()
+ buffer = frames[:,0]
buffer.shape = (len(buffer),1)
for samples, end in self.pixels_adapter.process(buffer, eod):
if self.pixel_cursor < self.image_width-1:
self.draw_peaks(self.pixel_cursor, self.peaks(samples))
self.pixel_cursor += 1
+ if end:
+ samples = 0
+ buffer = 0
+ break
if self.pixel_cursor == self.image_width-1:
self.draw_peaks(self.pixel_cursor, (0, 0))
self.pixel_cursor += 1
""" Apply last 2D transforms and write all pixels to the file. """
# middle line (0 for none)
- a = 0
+ a = 1
for x in range(self.image_width):
self.pixel[x, self.image_height/2] = tuple(map(lambda p: p+a, self.pixel[x, self.image_height/2]))
self.image.save(filename)
-
+
+ def release(self):
+ self.pixels_adapter.process = 0
+ self.pixel = 0
+ self.image = 0
+ self.draw = 0
+ self.spectrum = 0
class SpectrogramImage(object):
""" Builds a PIL image representing a spectrogram of the audio stream (level vs. frequency vs. time).
self.graph = WaveformImageSimple(self.width, self.height, self.nframes(), self.samplerate(), self.FFT_SIZE,
bg_color=self.bg_color, color_scheme=self.color_scheme)
- @interfacedoc
- def release(self):
- pass
-
@interfacedoc
def process(self, frames, eod=False):
self.graph.process(frames, eod)
return frames, eod
+#
+# def release(self):
+# self.graph.release()
@interfacedoc
def render(self, output):
if output:
self.graph.save(output)
- return self.graph.image
+# return self.graph.image
self.force = False
# Nb of threads
- self.threads = 2
+ # FIXME: memory leak for > 1 !
+ self.threads = 1
-class Media2Waveform:
+class Media2Waveform(object):
def __init__(self, media_dir, img_dir, log_file):
self.root_dir = os.path.join(os.path.dirname(__file__), media_dir)
self.threads = self.scheme.threads
self.logger = Logger(log_file)
self.counter = 0
- self.workers = []
-
+
self.media_list = self.get_media_list()
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
for root, dirs, files in os.walk(self.root_dir):
if root:
for file in files:
- ext = file.split('.')[-1]
- media_list.append(root+os.sep+file)
+ if file[0] != '.':
+ ext = file.split('.')[-1]
+ media_list.append(root+os.sep+file)
return media_list
def get_path_dict(self):
path_dict[media] = image
return path_dict
- def processing_workers(self):
- processing = 0
- for worker in self.workers:
- if worker.processing:
- processing += 1
- print 'procs : ', processing
- return processing
-
def process(self):
- q = Queue.Queue(1)
- p = Producer(q)
- p.start()
-
- for media, image in self.path_dict.iteritems():
- self.workers.append(Worker(media, image, self.width, self.height,
- self.bg_color, self.color_scheme, q, self.logger))
- print self.workers
- for worker in self.workers:
- if self.counter != 0:
- while not self.processing_workers() % self.threads:
- time.sleep(10)
+ q = Queue.Queue()
+ for i in range(0, self.threads):
+ worker = Thread(target=Worker, args=(self.width, self.height, self.bg_color, self.color_scheme, q, self.logger))
+ worker.setDaemon(True)
worker.start()
- time.sleep(1)
- print self.counter
- self.counter += 1
-
-class Worker(Thread):
-
- def __init__(self, media, image, width, height, bg_color, color_scheme, q, logger):
- Thread.__init__(self)
- self.media = media
- self.image = image
- self.q = q
- self.logger = logger
- self.width = width
- self.height = height
- self.bg_color = bg_color
- self.color_scheme = color_scheme
- self.decoder = timeside.decoder.FileDecoder(self.media)
- self.waveform = timeside.grapher.WaveformAwdio(width=self.width, height=self.height,
- bg_color=self.bg_color, color_scheme=self.color_scheme)
- self.processing = 0
-
- def run(self):
- self.q.get(1)
- mess = 'Processing ' + self.media
+ mess = str(self.threads) + ' thread(s) started'
self.logger.write_info(mess)
- self.processing = 1
- (self.decoder | self.waveform).run()
- self.q.task_done()
+
+ for media, image in self.path_dict.iteritems():
+ q.put((media, image))
+ print media, image
- self.q.get(1)
- if os.path.exists(self.image):
- os.remove(self.image)
- mess = 'Rendering ' + self.image
- self.logger.write_info(mess)
- self.waveform.render(output=self.image)
- mess = 'frames per pixel = ' + str(self.waveform.graph.samples_per_pixel)
- self.logger.write_info(mess)
- self.waveform.release()
- self.decoder.release()
- self.processing = 0
- self.q.task_done()
-
-
-class Producer(Thread):
- """a Producer master thread"""
-
- def __init__(self, q):
- Thread.__init__(self)
- self.q = q
-
- def run(self):
- i=0
- q = self.q
- while True:
- q.put(i,1)
- i+=1
-
-
+ q.join()
+
+
+def Worker(width, height, bg_color, color_scheme, q, logger):
+ while True:
+ media, image = q.get()
+ mess = 'Processing ' + media
+ logger.write_info(mess)
+ decoder = timeside.decoder.FileDecoder(media)
+ grapher = timeside.grapher.WaveformAwdio(width=width, height=height, bg_color=bg_color, color_scheme=color_scheme)
+ (decoder | grapher).run()
+ mess = 'Rendering ' + image
+ logger.write_info(mess)
+ grapher.render(output=image)
+ mess = 'Frames / Pixel = ' + str(grapher.graph.samples_per_pixel)
+ logger.write_info(mess)
+ grapher.release()
+ q.task_done()
+ return
+
if __name__ == '__main__':
if len(sys.argv) <= 2:
print """