--- /dev/null
+
+import gobject
+gobject.threads_init()
+import pygst
+pygst.require("0.10")
+import gst
+from threading import Thread
+import sys
+import liblo
+
+class OSCController(Thread):
+
+ def __init__(self, port):
+ Thread.__init__(self)
+ import liblo
+ self.port = port
+ try:
+ self.server = liblo.Server(self.port)
+ except liblo.ServerError, err:
+ print str(err)
+
+ def run(self):
+ while True:
+ self.server.recv(100)
+
+
+class AudioPlayer(Thread):
+
+ def __init__(self, uri):
+ Thread.__init__(self)
+ self.uri = uri
+ self.controller = OSCController(12345)
+ self.controller.server.add_method('/play', 'i', self.play_stop_cb)
+ self.controller.start()
+
+ self.mainloop = gobject.MainLoop()
+ self.player = gst.element_factory_make("playbin", "player")
+ self.player.set_property('uri', self.uri)
+
+ def play_stop_cb(self, path, value):
+ value = value[0]
+ if value:
+ print 'play'
+ self.player.set_state(gst.STATE_NULL)
+ self.player.set_state(gst.STATE_PLAYING)
+ else:
+ print 'stop'
+ self.player.set_state(gst.STATE_NULL)
+
+ def run(self):
+ self.mainloop.run()
+
+if __name__ == '__main__':
+ path = sys.argv[-1]
+ player = AudioPlayer(path)
+ player.start()
+
--- /dev/null
+# -*- coding: utf-8 -*-
+
+import sys, os
+from PyQt4 import QtCore, QtGui, uic
+from PyQt4.phonon import Phonon
+
+class AudioPlayer(QtGui.QWidget):
+ def __init__(self, url, parent = None):
+
+ self.url = url
+
+ QtGui.QWidget.__init__(self, parent)
+ self.setSizePolicy(QtGui.QSizePolicy.Expanding,
+ QtGui.QSizePolicy.Preferred)
+
+
+ self.player = Phonon.createPlayer(Phonon.MusicCategory,
+ Phonon.MediaSource(url))
+ self.player.setTickInterval(100)
+ self.player.tick.connect(self.tock)
+
+ self.play_pause = QtGui.QPushButton(self)
+ self.play_pause.setIcon(QtGui.QIcon(':/icons/player_play.svg'))
+ self.play_pause.clicked.connect(self.playClicked)
+ self.player.stateChanged.connect(self.stateChanged)
+
+ self.slider = Phonon.SeekSlider(self.player , self)
+
+ self.status = QtGui.QLabel(self)
+ self.status.setAlignment(QtCore.Qt.AlignRight |
+ QtCore.Qt.AlignVCenter)
+
+ self.download = QtGui.QPushButton("Download", self)
+ self.download.clicked.connect(self.fetch)
+
+ layout = QtGui.QHBoxLayout(self)
+ layout.addWidget(self.play_pause)
+ layout.addWidget(self.slider)
+ layout.addWidget(self.status)
+ layout.addWidget(self.download)
+
+ def playClicked(self):
+ if self.player.state() == Phonon.PlayingState:
+ self.player.pause()
+ else:
+ self.player.play()
+
+ def stateChanged(self, new, old):
+ if new == Phonon.PlayingState:
+ self.play_pause.setIcon(QtGui.QIcon(':/icons/player_pause.svg'))
+ else:
+ self.play_pause.setIcon(QtGui.QIcon(':/icons/player_play.svg'))
+
+ def tock(self, time):
+ time = time/1000
+ h = time/3600
+ m = (time-3600*h) / 60
+ s = (time-3600*h-m*60)
+ self.status.setText('%02d:%02d:%02d'%(h,m,s))
+
+ def fetch(self):
+ print 'Should download %s'%self.url
+
+def main():
+ app = QtGui.QApplication(sys.argv)
+ window=AudioPlayer(sys.argv[1])
+ window.show()
+ # It's exec_ because exec is a reserved word in Python
+ sys.exit(app.exec_())
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/usr/bin/env python
+
+"""A short Audio-Video example"""
+import gobject
+gobject.threads_init()
+import gst
+import pygtk
+pygtk.require("2.0")
+import gtk
+gtk.gdk.threads_init()
+import sys
+import os
+from demo import Demo
+
+def create_decodebin():
+ try:
+ return gst.element_factory_make("decodebin2")
+ except:
+ return gst.element_factory_make("decodebin")
+
+class DemoException(Exception):
+ """Base exception class for errors which occur during demos"""
+
+ def __init__(self, reason):
+ self.reason = reason
+
+class AVDemo(Demo):
+ """Extends base demo with both audio and video sinks
+ * a window containing a drawing area and basic media controls
+ * a basic gstreamer pipeline using an ximagesink and an autoaudiosink
+ * connects the ximagesink to the window's drawing area
+
+ Derived classes need only override magic(), __name__,
+ and __usage__ to create new demos."""
+
+ __name__ = "AV Demo"
+ __usage__ = "python audio_video.py <filename>"
+ __def_win_size__ = (320, 240)
+
+ # this commment allows us to include only a portion of the file
+ # in the tutorial for this demo
+
+ def magic(self, pipeline, (videosink, audiosink), args):
+ """This is where the magic happens"""
+
+ def onPadAdded(source, pad):
+ # first we see if we can link to the videosink
+ tpad = videoqueue.get_compatible_pad(pad)
+ if tpad:
+ pad.link(tpad)
+ return
+ # if not, we try the audio sink
+ tpad = audioqueue.get_compatible_pad(pad)
+ if tpad:
+ pad.link(tpad)
+ return
+
+ src = gst.element_factory_make("filesrc", "src")
+ src.props.location = args[0]
+ dcd = create_decodebin()
+ audioqueue = gst.element_factory_make("queue")
+ videoqueue = gst.element_factory_make("queue")
+ pipeline.add(src, dcd, audioqueue, videoqueue)
+
+ src.link(dcd)
+ videoqueue.link(videosink)
+ audioqueue.link(audiosink)
+ dcd.connect("pad-added", onPadAdded)
+
+ def createPipeline(self, w):
+ """Given a window, creates a pipeline and connects it to the window"""
+
+ # code will make the ximagesink output in the specified window
+ def set_xid(window):
+ gtk.gdk.threads_enter()
+ videosink.set_xwindow_id(window.window.xid)
+ videosink.expose()
+ gtk.gdk.threads_leave()
+
+ # this code receives the messages from the pipeline. if we
+ # need to set X11 id, then we call set_xid
+ def bus_handler(unused_bus, message):
+ if message.type == gst.MESSAGE_ELEMENT:
+ if message.structure.get_name() == 'prepare-xwindow-id':
+ set_xid(w)
+ return gst.BUS_PASS
+
+ # create our pipeline, and connect our bus_handler
+ self.pipeline = gst.Pipeline()
+ bus = self.pipeline.get_bus()
+ bus.set_sync_handler(bus_handler)
+
+ videosink = gst.element_factory_make("ximagesink", "sink")
+ videosink.set_property("force-aspect-ratio", True)
+ videosink.set_property("handle-expose", True)
+ scale = gst.element_factory_make("videoscale", "scale")
+ cspace = gst.element_factory_make("ffmpegcolorspace", "cspace")
+
+ audiosink = gst.element_factory_make("autoaudiosink")
+ audioconvert = gst.element_factory_make("audioconvert")
+
+ # pipeline looks like: ... ! cspace ! scale ! sink
+ # ... ! audioconvert ! autoaudiosink
+ self.pipeline.add(cspace, scale, videosink, audiosink,
+ audioconvert)
+ scale.link(videosink)
+ cspace.link(scale)
+ audioconvert.link(audiosink)
+ return (self.pipeline, (cspace, audioconvert))
+
+# if this file is being run directly, create the demo and run it
+if __name__ == '__main__':
+ AVDemo().run()
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python
+
+"""A short Audio-Video example"""
+import gobject
+gobject.threads_init()
+import gst
+import pygtk
+pygtk.require("2.0")
+import gtk
+import sys
+import os
+from audio_video import AVDemo, create_decodebin
+
+class AVCrossfade(AVDemo):
+ """Base class implementing boring, boiler-plate code.
+ Sets up a basic gstreamer environment which includes:
+
+ * a window containing a drawing area and basic media controls
+ * a basic gstreamer pipeline using an ximagesink and an autoaudiosink
+ * connects the ximagesink to the window's drawing area
+
+ Derived classes need only override magic(), __name__,
+ and __usage__ to create new demos."""
+
+ __name__ = "AV Demo"
+ __usage__ = "python audio_video.py <filename>"
+ __def_win_size__ = (320, 240)
+
+ # this commment allows us to include only a portion of the file
+ # in the tutorial for this demo
+
+ def onPad(self, decoder, pad, target):
+ tpad = target.get_compatible_pad(pad)
+ if tpad:
+ pad.link(tpad)
+
+ def addVideoChain(self, pipeline, name, decoder, mixer):
+ alpha = gst.element_factory_make("alpha")
+ alpha.props.alpha = 1.0
+ videoscale = gst.element_factory_make("videoscale")
+ videorate = gst.element_factory_make("videorate")
+ colorspace = gst.element_factory_make("ffmpegcolorspace")
+ queue = gst.element_factory_make("queue")
+
+ pipeline.add(alpha, videoscale, videorate, colorspace, queue)
+ decoder.connect("pad-added", self.onPad, videorate)
+ videorate.link(videoscale)
+ videoscale.link(colorspace)
+ colorspace.link(queue)
+ queue.link(alpha)
+ alpha.link(mixer)
+
+ setattr(self, "alpha%s" % name, alpha)
+
+ def addAudioChain(self, pipeline, name, decoder, adder):
+ volume = gst.element_factory_make("volume")
+ volume.props.volume = 0.5
+ audioconvert = gst.element_factory_make("audioconvert")
+ audiorate = gst.element_factory_make("audioresample")
+ queue = gst.element_factory_make("queue")
+
+ pipeline.add(volume, audioconvert, audiorate, queue)
+ decoder.connect("pad-added", self.onPad, audioconvert)
+ audioconvert.link(audiorate)
+ audiorate.link(queue)
+ queue.link(volume)
+ volume.link(adder)
+
+ setattr(self, "vol%s" % name, volume)
+
+ def addSourceChain(self, pipeline, name, filename, mixer, adder):
+ src = gst.element_factory_make("filesrc")
+ src.props.location = filename
+ dcd = create_decodebin()
+
+ pipeline.add(src, dcd)
+ src.link(dcd)
+ self.addVideoChain(pipeline, name, dcd, mixer)
+ self.addAudioChain(pipeline, name, dcd, adder)
+
+ def magic(self, pipeline, (videosink, audiosink), args):
+ """This is where the magic happens"""
+ mixer = gst.element_factory_make("videomixer")
+ adder = gst.element_factory_make("adder")
+ pipeline.add(mixer, adder)
+
+ mixer.link(videosink)
+ adder.link(audiosink)
+ self.addSourceChain(pipeline, "A", args[0], mixer, adder)
+ self.addSourceChain(pipeline, "B", args[1], mixer, adder)
+ self.alphaB.props.alpha = 0.5
+
+ def onValueChanged(self, adjustment):
+ balance = self.balance.get_value()
+ crossfade = self.crossfade.get_value()
+ self.volA.props.volume = (2 - balance) * (1 - crossfade)
+ self.volB.props.volume = balance * crossfade
+ self.alphaB.props.alpha = crossfade
+
+ def customWidgets(self):
+ self.crossfade = gtk.Adjustment(0.5, 0, 1.0)
+ self.balance = gtk.Adjustment(1.0, 0.0, 2.0)
+ crossfadeslider = gtk.HScale(self.crossfade)
+ balanceslider = gtk.HScale(self.balance)
+ self.crossfade.connect("value-changed", self.onValueChanged)
+ self.balance.connect("value-changed", self.onValueChanged)
+
+ ret = gtk.Table()
+ ret.attach(gtk.Label("Crossfade"), 0, 1, 0, 1)
+ ret.attach(crossfadeslider, 1, 2, 0, 1)
+ ret.attach(gtk.Label("Balance"), 0, 1, 1, 2)
+ ret.attach(balanceslider, 1, 2, 1, 2)
+ return ret
+
+# if this file is being run directly, create the demo and run it
+if __name__ == '__main__':
+ AVCrossfade().run()
\ No newline at end of file
--- /dev/null
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+
+p = gst.parse_launch ("""videomixer name=mix0 ! ffmpegcolorspace ! xvimagesink
+ videotestsrc ! video/x-raw-yuv, framerate=24/1, width=640, height=360 ! mix0.sink_0
+ videomixer name=mix1 ! mix0.sink_1
+ videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix1.sink_0
+ videomixer name=mix2 ! mix1.sink_1
+ videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix2.sink_0
+ videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix2.sink_1
+""")
+
+m1 = p.get_by_name ("mix1")
+s1_0 = m1.get_pad ("sink_0")
+s1_0.set_property ("xpos", 100)
+s1_1 = m1.get_pad ("sink_1")
+s1_1.set_property ("xpos", 250)
+
+m2 = p.get_by_name ("mix2")
+s2_0 = m2.get_pad ("sink_0")
+s2_0.set_property ("xpos", 200)
+s2_1 = m2.get_pad ("sink_1")
+s2_1.set_property ("xpos", 250)
+
+c1_0 = gst.Controller(s1_0, "ypos", "alpha")
+c1_0.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c1_0.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c1_0.set("ypos", 0, 0)
+c1_0.set("ypos", 5 * gst.SECOND, 200)
+c1_0.set("alpha", 0, 0)
+c1_0.set("alpha", 5 * gst.SECOND, 1.0)
+
+c1_1 = gst.Controller(s1_1, "ypos", "alpha")
+c1_1.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c1_1.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c1_1.set("ypos", 0, 0)
+c1_1.set("ypos", 5 * gst.SECOND, 200)
+c1_1.set("alpha", 0, 0)
+c1_1.set("alpha", 5 * gst.SECOND, 1.0)
+
+c2_0 = gst.Controller(s2_0, "ypos", "alpha")
+c2_0.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c2_0.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c2_0.set("ypos", 0, 0)
+c2_0.set("ypos", 5 * gst.SECOND, 200)
+c2_0.set("alpha", 0, 0)
+c2_0.set("alpha", 5 * gst.SECOND, 1.0)
+
+c2_1 = gst.Controller(s2_1, "ypos", "alpha")
+c2_1.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c2_1.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c2_1.set("ypos", 0, 0)
+c2_1.set("ypos", 5 * gst.SECOND, 200)
+c2_1.set("alpha", 0, 0)
+c2_1.set("alpha", 5 * gst.SECOND, 1.0)
+
+p.set_state (gst.STATE_PLAYING)
+
+gobject.MainLoop().run()
--- /dev/null
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+from threading import Thread
+
+
+class OSCController(Thread):
+
+ def __init__(self, port):
+ Thread.__init__(self)
+ import liblo
+ self.port = port
+ try:
+ self.server = liblo.Server(self.port)
+ except liblo.ServerError, err:
+ print str(err)
+
+ def add_method(self, path, type, method):
+ self.server.add_method(path, type, method)
+
+ def run(self):
+ while True:
+ self.server.recv(100)
+
+
+class GSTSrcVideo(object):
+
+ def __init__(self, pipe=None, framerate='24/1', width=160, height=90, xpos=0, ypos=0):
+ self.framerate = framerate
+ self.width = width
+ self.height = height
+ self.xpos = xpos
+ self.ypos = ypos
+ if not pipe:
+ pipe = 'videotestsrc pattern="snow"'
+ self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
+ % (self.framerate, str(self.width), str(self.height))
+
+class GSTMixer(object):
+
+ def __init__(self, osc_port=13000):
+ self.name = 'mixer'
+ self.pipe = ['videomixer name=mixer ! ffmpegcolorspace ! xvimagesink']
+ self.srcs = []
+ self.i= 0
+
+ self.osc_port = osc_port
+ self.osc = OSCController(self.osc_port)
+
+ def osc_callback(self, path, value):
+ paths = path.split('/')
+ sink = paths[1]
+ param = paths[2]
+ for src in self.srcs:
+ if src['sink'] == sink:
+ break
+ src['control'].set(param, 5 * gst.SECOND, value[0])
+
+ def add_src(self, src):
+ self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
+ self.i += 1
+
+ def setup(self):
+ self.srcs.reverse()
+
+ for src in self.srcs:
+ self.pipe.append(' '.join([src['src'].pipe, '! ' + self.name + '.' + src['sink']]))
+
+ print ' '.join(self.pipe)
+ self.process = gst.parse_launch(' '.join(self.pipe))
+ mixer = self.process.get_by_name("mixer")
+
+ for src in self.srcs:
+ src['pad'] = mixer.get_pad(src['sink'])
+ src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
+
+ src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
+ src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
+ self.osc.add_method('/'+src['sink']+'/xpos', 'i', self.osc_callback)
+
+ src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+ src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
+ self.osc.add_method('/'+src['sink']+'/ypos', 'i', self.osc_callback)
+
+ src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+ src['control'].set("alpha", 5 * gst.SECOND, 1.0)
+ self.osc.add_method('/'+src['sink']+'/alpha', 'f', self.osc_callback)
+
+
+ def run(self):
+ self.osc.start()
+ self.process.set_state(gst.STATE_PLAYING)
+ gobject.MainLoop().run()
+
+
+if __name__ == '__main__':
+ src1 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc')
+ src2 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=100, ypos=50)
+ src3 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=200, ypos=150)
+ src4 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=300, ypos=250)
+ mixer = GSTMixer()
+ mixer.add_src(src1)
+ mixer.add_src(src2)
+ mixer.add_src(src3)
+ mixer.add_src(src4)
+ mixer.setup()
+ mixer.run()
--- /dev/null
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+from threading import Thread
+
+
+class OSCController(Thread):
+
+ def __init__(self, port):
+ Thread.__init__(self)
+ import liblo
+ self.port = port
+ try:
+ self.server = liblo.Server(self.port)
+ except liblo.ServerError, err:
+ print str(err)
+
+ def add_method(self, path, type, method):
+ self.server.add_method(path, type, method)
+
+ def run(self):
+ while True:
+ self.server.recv(100)
+
+
+class GSTSrcVideo(object):
+
+ def __init__(self, pipe=None, framerate='24/1', width=160, height=90, xpos=0, ypos=0):
+ self.framerate = framerate
+ self.width = width
+ self.height = height
+ self.xpos = xpos
+ self.ypos = ypos
+ if not pipe:
+ pipe = 'videotestsrc pattern="snow"'
+ self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
+ % (self.framerate, str(self.width), str(self.height))
+
+class GSTMixer(object):
+
+ def __init__(self, osc_port=8338):
+ self.name = 'mixer'
+ self.pipe = ['videomixer name=mixer ! ffmpegcolorspace ! xvimagesink']
+ self.srcs = []
+ self.i= 0
+
+ self.osc_port = osc_port
+ self.osc = OSCController(self.osc_port)
+
+ def osc_callback(self, path, value):
+ paths = path.split('/')
+ sink = paths[1]
+ param = paths[2]
+ for src in self.srcs:
+ if src['sink'] == sink:
+ break
+ src['control'].set(param, 5 * gst.SECOND, value[0])
+
+ def osc_alpha_callback(self, path, value):
+ paths = path.split('/')
+ layer = paths[1]
+ param = paths[2]
+ id = int(param[-1])-1
+ for src in self.srcs:
+ if src['id'] == id:
+ break
+ src['control'].set('alpha', 5 * gst.SECOND, value[0])
+
+ def osc_xy_callback(self, path, value):
+ for src in self.srcs:
+ if src['id'] == 2:
+ break
+ src['control'].set("xpos", 5 * gst.SECOND, int(value[0]*480))
+ src['control'].set("ypos", 5 * gst.SECOND, int(value[1]*270))
+
+ def add_src(self, src):
+ self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
+ self.i += 1
+
+ def setup(self):
+ self.srcs.reverse()
+
+ for src in self.srcs:
+ self.pipe.append(' '.join([src['src'].pipe, '! ' + self.name + '.' + src['sink']]))
+
+ print ' '.join(self.pipe)
+ self.process = gst.parse_launch(' '.join(self.pipe))
+ mixer = self.process.get_by_name("mixer")
+
+ for src in self.srcs:
+ src['pad'] = mixer.get_pad(src['sink'])
+ src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
+
+ src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
+ src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
+
+ src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+ src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
+
+ src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+ src['control'].set("alpha", 5 * gst.SECOND, 1.0)
+
+ self.osc.add_method('/1/fader'+str(src['id']+1), 'f', self.osc_alpha_callback)
+
+ self.osc.add_method('/3/xy', 'ff', self.osc_xy_callback)
+
+ def run(self):
+ self.osc.start()
+ self.process.set_state(gst.STATE_PLAYING)
+ gobject.MainLoop().run()
+
+
+if __name__ == '__main__':
+ src1 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc pattern="black" ')
+ src2 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc ')
+ src3 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=200, ypos=150)
+ src4 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=300, ypos=250)
+ mixer = GSTMixer()
+ mixer.add_src(src1)
+ mixer.add_src(src2)
+ mixer.add_src(src3)
+ mixer.add_src(src4)
+ mixer.setup()
+ mixer.run()
--- /dev/null
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+from threading import Thread
+
+
+class OSCController(Thread):
+
+ def __init__(self, port):
+ Thread.__init__(self)
+ import liblo
+ self.port = port
+ try:
+ self.server = liblo.Server(self.port)
+ except liblo.ServerError, err:
+ print str(err)
+
+ def add_method(self, path, type, method):
+ self.server.add_method(path, type, method)
+
+ def run(self):
+ while True:
+ self.server.recv(100)
+
+
+class GSTSrcVideo(object):
+
+ def __init__(self, pipe=None, framerate='{30/1}', width=160, height=90, xpos=0, ypos=0):
+ self.framerate = framerate
+ self.width = width
+ self.height = height
+ self.xpos = xpos
+ self.ypos = ypos
+ if not pipe:
+ pipe = 'videotestsrc pattern="snow"'
+ self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
+ % (self.framerate, str(self.width), str(self.height))
+
+
+class GSTWebmHttpStreamer(object):
+
+ def __init__(self, protocol='tcp', port=9000):
+ self.protocol = protocol
+ self.port = port
+
+
+class GSTMixer(object):
+
+ def __init__(self, osc_port=8338):
+ self.name = 'mixer'
+ self.pipe = ['videomixer name=mixer ! queue ! ffmpegcolorspace ! xvimagesink sync=false']
+ self.srcs = []
+ self.i= 0
+
+ self.osc_port = osc_port
+ self.osc = OSCController(self.osc_port)
+
+ def osc_callback(self, path, value):
+ paths = path.split('/')
+ sink = paths[1]
+ param = paths[2]
+ for src in self.srcs:
+ if src['sink'] == sink:
+ break
+ src['control'].set(param, 5 * gst.SECOND, value[0])
+
+ def osc_alpha_callback(self, path, value):
+ paths = path.split('/')
+ layer = paths[1]
+ param = paths[2]
+ id = int(param[-1])-1
+ for src in self.srcs:
+ if src['id'] == id:
+ break
+ src['control'].set('alpha', 5 * gst.SECOND, value[0])
+
+ def osc_xy_callback(self, path, value):
+ for src in self.srcs:
+ if src['id'] == 2:
+ break
+ src['control'].set("xpos", 5 * gst.SECOND, int(value[0]*480))
+ src['control'].set("ypos", 5 * gst.SECOND, int(value[1]*270))
+
+ def add_src(self, src):
+ self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
+ self.i += 1
+
+ def setup(self):
+ self.srcs.reverse()
+
+ for src in self.srcs:
+ self.pipe.append(' '.join([src['src'].pipe, '! queue ! ' + self.name + '.' + src['sink']]))
+
+ print ' '.join(self.pipe)
+ self.process = gst.parse_launch(' '.join(self.pipe))
+ mixer = self.process.get_by_name("mixer")
+
+ for src in self.srcs:
+ src['pad'] = mixer.get_pad(src['sink'])
+ src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
+
+ src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
+ src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
+
+ src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+ src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
+
+ src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+ src['control'].set("alpha", 5 * gst.SECOND, 1.0)
+
+ self.osc.add_method('/1/fader'+str(src['id']+1), 'f', self.osc_alpha_callback)
+
+ self.osc.add_method('/3/xy', 'ff', self.osc_xy_callback)
+
+ def run(self):
+ self.osc.start()
+ self.process.set_state(gst.STATE_PLAYING)
+ gobject.MainLoop().run()
+
+
+if __name__ == '__main__':
+ src1 = GSTSrcVideo(width=800, height=600, pipe='videotestsrc pattern="black"')
+ src2 = GSTSrcVideo(width=800, height=600, pipe='videotestsrc ')
+ src3 = GSTSrcVideo(width=640, height=480, xpos=200, ypos=150, pipe='v4l2src device=/dev/video0')
+ src4 = GSTSrcVideo(width=160, height=90, xpos=300, ypos=250)
+ mixer = GSTMixer()
+ mixer.add_src(src1)
+ mixer.add_src(src2)
+ mixer.add_src(src3)
+ mixer.add_src(src4)
+ mixer.setup()
+ mixer.run()
--- /dev/null
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+from threading import Thread
+
+
+class OSCController(Thread):
+
+ def __init__(self, port):
+ Thread.__init__(self)
+ import liblo
+ self.port = port
+ try:
+ self.server = liblo.Server(self.port)
+ except liblo.ServerError, err:
+ print str(err)
+
+ def add_method(self, path, type, method):
+ self.server.add_method(path, type, method)
+
+ def run(self):
+ while True:
+ self.server.recv(100)
+
+
+class GSTSrcVideo(object):
+
+ def __init__(self, pipe=None, framerate='{30/1}', width=160, height=90, xpos=0, ypos=0, queue_option=''):
+ self.framerate = framerate
+ self.width = width
+ self.height = height
+ self.xpos = xpos
+ self.ypos = ypos
+ self.queue_option = queue_option
+
+ if not pipe:
+ pipe = 'videotestsrc pattern="snow"'
+ self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
+ % (self.framerate, str(self.width), str(self.height))
+
+class GSTWebmHttpStreamer(object):
+
+ def __init__(self, protocol='tcp', port=9000):
+ self.protocol = protocol
+ self.port = port
+
+
+class GSTMixer(object):
+
+ def __init__(self, osc_port=8338):
+ self.name = 'mixer'
+ self.pipe = ['videomixer name=mixer ! queue ! ffmpegcolorspace ! xvimagesink sync=false']
+ self.srcs = []
+ self.i= 0
+
+ self.osc_port = osc_port
+ self.osc = OSCController(self.osc_port)
+
+ def osc_callback(self, path, value):
+ paths = path.split('/')
+ sink = paths[1]
+ param = paths[2]
+ for src in self.srcs:
+ if src['sink'] == sink:
+ break
+ src['control'].set(param, 5 * gst.SECOND, value[0])
+
+ def osc_alpha_callback(self, path, value):
+ paths = path.split('/')
+ layer = paths[1]
+ param = paths[2]
+ id = int(param[-1])-1
+ for src in self.srcs:
+ if src['id'] == id:
+ break
+ src['control'].set('alpha', 5 * gst.SECOND, value[0])
+
+ def osc_xy_callback(self, path, value):
+ for src in self.srcs:
+ if src['id'] == 2:
+ break
+ src['control'].set("xpos", 5 * gst.SECOND, int(value[0]*480))
+ src['control'].set("ypos", 5 * gst.SECOND, int(value[1]*270))
+
+ def add_src(self, src):
+ self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
+ self.i += 1
+
+ def setup(self):
+ self.srcs.reverse()
+
+ for src in self.srcs:
+ queue = 'queue'
+ if src['src'].queue_option:
+ # queue = 'timeoverlay ! queue'
+ queue += ' ' + src['src'].queue_option
+ self.pipe.append(' '.join([src['src'].pipe, '! ' + queue + ' ! ' + self.name + '.' + src['sink']]))
+
+ print ' '.join(self.pipe)
+ self.process = gst.parse_launch(' '.join(self.pipe))
+ mixer = self.process.get_by_name("mixer")
+
+ for src in self.srcs:
+ src['pad'] = mixer.get_pad(src['sink'])
+ src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
+
+ src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
+ src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
+
+ src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+ src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
+
+ src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+ src['control'].set("alpha", 5 * gst.SECOND, 1.0)
+
+ self.osc.add_method('/1/fader'+str(src['id']+1), 'f', self.osc_alpha_callback)
+
+ self.osc.add_method('/3/xy', 'ff', self.osc_xy_callback)
+
+ def run(self):
+ self.osc.start()
+ self.process.set_state(gst.STATE_PLAYING)
+ gobject.MainLoop().run()
+
+
+if __name__ == '__main__':
+ src1 = GSTSrcVideo(width=640, height=480, pipe='videotestsrc pattern="black"')
+ src4 = GSTSrcVideo(width=640, height=480, pipe='videotestsrc ')
+ src3 = GSTSrcVideo(width=640, height=480, xpos=0, ypos=0, pipe='v4l2src device=/dev/video0 do-timestamp=true', queue_option='leaky=upstream min-threshold-time=10000000000')
+ src2 = GSTSrcVideo(width=640, height=480, xpos=0, ypos=0, pipe='souphttpsrc location=http://192.168.0.15:8080/videofeed do-timestamp=true ! jpegdec ! queue ! ffmpegcolorspace ! videorate')
+ mixer = GSTMixer()
+ mixer.add_src(src1)
+ mixer.add_src(src2)
+ mixer.add_src(src3)
+ mixer.add_src(src4)
+ mixer.setup()
+ mixer.run()
--- /dev/null
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+from threading import Thread
+
+
+class OSCController(Thread):
+
+ def __init__(self, port):
+ Thread.__init__(self)
+ import liblo
+ self.port = port
+ try:
+ self.server = liblo.Server(self.port)
+ except liblo.ServerError, err:
+ print str(err)
+
+ def add_method(self, path, type, method):
+ self.server.add_method(path, type, method)
+
+ def run(self):
+ while True:
+ self.server.recv(100)
+
+
+class GSTSrcVideo(object):
+
+ def __init__(self, pipe=None, framerate='24/1', width=160, height=90, xpos=0, ypos=0):
+ self.framerate = framerate
+ self.width = width
+ self.height = height
+ self.xpos = xpos
+ self.ypos = ypos
+ if not pipe:
+ pipe = 'videotestsrc pattern="snow"'
+ self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
+ % (self.framerate, str(self.width), str(self.height))
+
+
+class GSTWebmHttpStreamer(object):
+
+ def __init__(self, protocol='tcp', port=9000):
+ self.protocol = protocol
+ self.port = port
+
+
+class GSTMixer(object):
+
+ def __init__(self, osc_port=8338):
+ self.name = 'mixer'
+ self.pipe = ['videomixer name=mixer ! ffmpegcolorspace ! xvimagesink']
+ self.srcs = []
+ self.i= 0
+
+ self.osc_port = osc_port
+ self.osc = OSCController(self.osc_port)
+
+ def osc_callback(self, path, value):
+ paths = path.split('/')
+ sink = paths[1]
+ param = paths[2]
+ for src in self.srcs:
+ if src['sink'] == sink:
+ break
+ src['control'].set(param, 5 * gst.SECOND, value[0])
+
+ def osc_alpha_callback(self, path, value):
+ paths = path.split('/')
+ layer = paths[1]
+ param = paths[2]
+ id = int(param[-1])-1
+ for src in self.srcs:
+ if src['id'] == id:
+ break
+ src['control'].set('alpha', 5 * gst.SECOND, value[0])
+
+ def osc_xy_callback(self, path, value):
+ for src in self.srcs:
+ if src['id'] == 2:
+ break
+ src['control'].set("xpos", 5 * gst.SECOND, int(value[0]*480))
+ src['control'].set("ypos", 5 * gst.SECOND, int(value[1]*270))
+
+ def add_src(self, src):
+ self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
+ self.i += 1
+
+ def setup(self):
+ self.srcs.reverse()
+
+ for src in self.srcs:
+ self.pipe.append(' '.join([src['src'].pipe, '! ' + self.name + '.' + src['sink']]))
+
+ print ' '.join(self.pipe)
+ self.process = gst.parse_launch(' '.join(self.pipe))
+ mixer = self.process.get_by_name("mixer")
+
+ for src in self.srcs:
+ src['pad'] = mixer.get_pad(src['sink'])
+ src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
+
+ src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
+ src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
+
+ src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+ src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
+
+ src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+ src['control'].set("alpha", 5 * gst.SECOND, 1.0)
+
+ self.osc.add_method('/1/fader'+str(src['id']+1), 'f', self.osc_alpha_callback)
+
+ self.osc.add_method('/3/xy', 'ff', self.osc_xy_callback)
+
+ def run(self):
+ self.osc.start()
+ self.process.set_state(gst.STATE_PLAYING)
+ gobject.MainLoop().run()
+
+
+if __name__ == '__main__':
+ src1 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc pattern="black" ')
+ src2 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc ')
+ src3 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=200, ypos=150)
+ src4 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=300, ypos=250)
+ mixer = GSTMixer()
+ mixer.add_src(src1)
+ mixer.add_src(src2)
+ mixer.add_src(src3)
+ mixer.add_src(src4)
+ mixer.setup()
+ mixer.run()
--- /dev/null
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+from threading import Thread
+
+
+class OSCController(Thread):
+
+ def __init__(self, port):
+ Thread.__init__(self)
+ import liblo
+ self.port = port
+ try:
+ self.server = liblo.Server(self.port)
+ except liblo.ServerError, err:
+ print str(err)
+
+ def add_method(self, path, type, method):
+ self.server.add_method(path, type, method)
+
+ def run(self):
+ while True:
+ self.server.recv(100)
+
+
+class GSTSrcVideo(object):
+
+ def __init__(self, pipe=None, framerate='24/1', width=160, height=90, xpos=0, ypos=0):
+ self.framerate = framerate
+ self.width = width
+ self.height = height
+ self.xpos = xpos
+ self.ypos = ypos
+ if not pipe:
+ pipe = 'videotestsrc pattern="snow"'
+ self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
+ % (self.framerate, str(self.width), str(self.height))
+
+class GSTMixer(object):
+
+ def __init__(self, osc_port=13000):
+ self.name = 'mixer'
+ self.pipe = ['videomixer name=mixer ! ffmpegcolorspace ! xvimagesink']
+ self.srcs = []
+ self.i= 0
+
+ self.osc_port = osc_port
+ self.osc = OSCController(self.osc_port)
+
+ def osc_callback(self, path, value):
+ paths = path.split('/')
+ sink = paths[1]
+ param = paths[2]
+ for src in self.srcs:
+ if src['sink'] == sink:
+ break
+ src['control'].set(param, 5 * gst.SECOND, value[0])
+
+ def add_src(self, src):
+ self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
+ self.i += 1
+
+ def setup(self):
+ self.srcs.reverse()
+
+ for src in self.srcs:
+ self.pipe.append(' '.join([src['src'].pipe, '! ' + self.name + '.' + src['sink']]))
+
+ print ' '.join(self.pipe)
+ self.process = gst.parse_launch(' '.join(self.pipe))
+ mixer = self.process.get_by_name("mixer")
+
+ for src in self.srcs:
+ src['pad'] = mixer.get_pad(src['sink'])
+ src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
+
+ src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
+ src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
+ self.osc.add_method('/'+src['sink']+'/xpos', 'i', self.osc_callback)
+
+ src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+ src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
+ self.osc.add_method('/'+src['sink']+'/ypos', 'i', self.osc_callback)
+
+ src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+ src['control'].set("alpha", 5 * gst.SECOND, 1.0)
+ self.osc.add_method('/'+src['sink']+'/alpha', 'f', self.osc_callback)
+
+
+ def run(self):
+ self.osc.start()
+ self.process.set_state(gst.STATE_PLAYING)
+ gobject.MainLoop().run()
+
+
+if __name__ == '__main__':
+ src1 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc')
+ src2 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=100, ypos=50)
+ src3 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=200, ypos=150)
+ src4 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=300, ypos=250)
+ mixer = GSTMixer()
+ mixer.add_src(src1)
+ mixer.add_src(src2)
+ mixer.add_src(src3)
+ mixer.add_src(src4)
+ mixer.setup()
+ mixer.run()
--- /dev/null
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+
+p = gst.parse_launch ("""videomixer name=mix0 ! ffmpegcolorspace ! xvimagesink
+ videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix0.sink_3
+ videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix0.sink_2
+ videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix0.sink_1
+ videotestsrc ! video/x-raw-yuv, framerate=24/1, width=640, height=360 ! mix0.sink_0
+""")
+
+m1 = p.get_by_name("mix0")
+
+s1_1 = m1.get_pad("sink_1")
+c1_1 = gst.Controller(s1_1, "xpos", "ypos", "alpha")
+c1_1.set("xpos", 0, 0)
+c1_1.set("ypos", 0, 0)
+c1_1.set("alpha", 0, 1.0)
+
+s1_2 = m1.get_pad("sink_2")
+c1_2 = gst.Controller(s1_2, "xpos", "ypos", "alpha")
+c1_2.set("xpos", 0, 200)
+c1_2.set("ypos", 0, 200)
+c1_2.set("alpha", 0, 1.0)
+
+s1_3 = m1.get_pad("sink_3")
+c1_3 = gst.Controller(s1_3, "xpos", "ypos", "alpha")
+c1_3.set("xpos", 0, 400)
+c1_3.set("ypos", 0, 0)
+c1_3.set("alpha", 0, 1.0)
+
+p.set_state(gst.STATE_PLAYING)
+
+gobject.MainLoop().run()
--- /dev/null
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+
+p = gst.parse_launch ("""videomixer name=mix0 ! ffmpegcolorspace ! xvimagesink
+ videotestsrc ! video/x-raw-yuv, framerate=24/1, width=640, height=360 ! mix0.sink_0
+ videomixer name=mix1 ! mix0.sink_1
+ videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix1.sink_0
+ videomixer name=mix2 ! mix1.sink_1
+ videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix2.sink_0
+ videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix2.sink_1
+""")
+
+m1 = p.get_by_name ("mix1")
+s1_0 = m1.get_pad ("sink_0")
+s1_0.set_property ("xpos", 100)
+s1_1 = m1.get_pad ("sink_1")
+s1_1.set_property ("xpos", 250)
+
+m2 = p.get_by_name ("mix2")
+s2_0 = m2.get_pad ("sink_0")
+s2_0.set_property ("xpos", 200)
+s2_1 = m2.get_pad ("sink_1")
+s2_1.set_property ("xpos", 250)
+
+c1_0 = gst.Controller(s1_0, "ypos", "alpha")
+c1_0.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c1_0.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c1_0.set("ypos", 0, 0)
+c1_0.set("ypos", 5 * gst.SECOND, 200)
+c1_0.set("alpha", 0, 0)
+c1_0.set("alpha", 5 * gst.SECOND, 1.0)
+
+c1_1 = gst.Controller(s1_1, "ypos", "alpha")
+c1_1.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c1_1.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c1_1.set("ypos", 0, 0)
+c1_1.set("ypos", 5 * gst.SECOND, 200)
+c1_1.set("alpha", 0, 0)
+c1_1.set("alpha", 5 * gst.SECOND, 1.0)
+
+c2_0 = gst.Controller(s2_0, "ypos", "alpha")
+c2_0.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c2_0.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c2_0.set("ypos", 0, 0)
+c2_0.set("ypos", 5 * gst.SECOND, 200)
+c2_0.set("alpha", 0, 0)
+c2_0.set("alpha", 5 * gst.SECOND, 1.0)
+
+c2_1 = gst.Controller(s2_1, "ypos", "alpha")
+c2_1.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c2_1.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c2_1.set("ypos", 0, 0)
+c2_1.set("ypos", 5 * gst.SECOND, 200)
+c2_1.set("alpha", 0, 0)
+c2_1.set("alpha", 5 * gst.SECOND, 1.0)
+
+p.set_state (gst.STATE_PLAYING)
+
+gobject.MainLoop().run()
--- /dev/null
+# This file is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This file is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with This file. If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+
+import zlib
+
+import gst
+
+def main(path):
+ pipeline = gst.parse_launch('''
+ filesrc location="%s" !
+ decodebin ! audio/x-raw-int !
+ appsink name=sink sync=False''' % path)
+ sink = pipeline.get_by_name('sink')
+
+ pipeline.set_state(gst.STATE_PLAYING)
+ crc = 0
+
+ while True:
+ try:
+ buf = sink.emit('pull-buffer')
+ except SystemError, e:
+ # it's probably a bug that emits triggers a SystemError
+ print 'SystemError', e
+ break
+
+ # should be coming from a CD
+ assert len(buf) % 4 == 0, "buffer is not a multiple of 4 bytes"
+ crc = zlib.crc32(buf, crc)
+
+ crc = crc % 2 ** 32
+ print "CRC: %08X" % crc
+
+
+path = 'test.flac'
+
+try:
+ path = sys.argv[1]
+except IndexError:
+ pass
+
+main(path)
--- /dev/null
+#!/usr/bin/env python
+"""Extends basic demo with a gnl composition"""
+import gobject
+gobject.threads_init()
+from demo import Demo, DemoException
+import gtk
+import gst
+import sys
+import os
+
+def create_decodebin():
+ try:
+ return gst.element_factory_make("decodebin2")
+ except:
+ return gst.element_factory_make("decodebin")
+
+class SimpleCrossfadeDemo(Demo):
+ __name__ = "Demo of crosfade without using gnonlin"
+ __usage__ = '''python %s sourceA sourceB
+ live crossfading between two sources''' % sys.argv[0]
+ __def_size__ = (320, 420)
+
+ def magic(self, pipeline, sink, args):
+
+ def onPad(obj, pad, target):
+ sinkpad = target.get_compatible_pad(pad, pad.get_caps())
+ if sinkpad:
+ pad.link(sinkpad)
+ return True
+
+ assert len(sys.argv) == 3
+ assert os.path.exists(sys.argv[-1])
+ assert os.path.exists(sys.argv[-2])
+
+ # <excerpt 1>
+ src = gst.element_factory_make("filesrc")
+ src.set_property("location", sys.argv[-1])
+
+ srcAdecode = create_decodebin()
+ srcAconvert = gst.element_factory_make("ffmpegcolorspace")
+ srcAalpha = gst.element_factory_make("alpha")
+ srcAalpha.set_property("alpha", 1.0)
+
+ srcB = gst.element_factory_make("filesrc")
+ srcB.set_property("location", sys.argv[-2])
+ srcBdecode = create_decodebin()
+ srcBconvert = gst.element_factory_make("ffmpegcolorspace")
+ srcBalpha = gst.element_factory_make("alpha")
+ srcBalpha.set_property("alpha", 0.5)
+
+ mixer = gst.element_factory_make("videomixer")
+ mixer.set_property("background", "black")
+ # </excerpt>
+
+ # <excerpt 2>
+ pipeline.add(mixer)
+
+ pipeline.add(src, srcAdecode, srcAconvert, srcAalpha)
+ src.link(srcAdecode)
+ srcAdecode.connect("pad-added", onPad, srcAconvert)
+ srcAconvert.link(srcAalpha)
+ srcAalpha.link(mixer)
+
+ pipeline.add(srcB, srcBdecode, srcBconvert, srcBalpha)
+ srcB.link(srcBdecode)
+ srcBdecode.connect("pad-added", onPad, srcBconvert)
+ srcBconvert.link(srcBalpha)
+ srcBalpha.link(mixer)
+
+ mixer.link(sink)
+
+ # remember the alpha elements
+ self.srcBalpha = srcBalpha
+ # </excerpt>
+
+
+ # overriding from parent
+ def customWidgets(self):
+ """Create a control for each property in the videobalance
+ widget"""
+
+ # <excerpt 3>
+ # to be called a property value needs to change
+ def onValueChanged(widget):
+ if self.srcBalpha:
+ self.srcBalpha.set_property("alpha", widget.get_value())
+ # </excerpt>
+
+ lower = 0
+ upper = 1
+ default = 0.5
+
+ # create a place to hold our controls
+ controls = gtk.VBox()
+ labels = gtk.VBox()
+
+ widget = gtk.HScale(); label = gtk.Label("Crossfade")
+
+ # set appropriate atributes
+ widget.set_update_policy(gtk.UPDATE_CONTINUOUS)
+ widget.set_draw_value(True)
+ widget.set_range(lower, upper)
+ widget.set_value(default)
+
+ # connect to our signal handler, specifying the property
+ # to adjust
+ widget.connect("value-changed", onValueChanged)
+
+ # pack widget into box
+ controls.pack_start(widget, True, True)
+ labels.pack_start(label, True, False)
+
+ layout = gtk.HBox()
+ layout.pack_start(labels, False, False)
+ layout.pack_end(controls, True, True)
+ return layout
+
+if __name__ == '__main__':
+ SimpleCrossfadeDemo().run()
\ No newline at end of file
--- /dev/null
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+
+p = gst.parse_launch ("""videomixer name=mix ! ffmpegcolorspace ! xvimagesink
+ videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=10/1, width=200, height=150 ! mix.sink_0
+ videotestsrc ! video/x-raw-yuv, framerate=10/1, width=640, height=360 ! mix.sink_1
+""")
+
+m = p.get_by_name ("mix")
+s0 = m.get_pad ("sink_0")
+s0.set_property ("xpos", 100)
+
+control = gst.Controller(s0, "ypos", "alpha")
+control.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+control.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+control.set("ypos", 0, 0); control.set("ypos", 5 * gst.SECOND, 200)
+control.set("alpha", 0, 0); control.set("alpha", 5 * gst.SECOND, 1.0)
+
+p.set_state (gst.STATE_PLAYING)
+
+gobject.MainLoop().run()
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python
+
+"""Basic Framework for writing GStreamer Demos in Python"""
+#<excerpt 2>
+import gobject
+gobject.threads_init()
+import gst
+#</excerpt>
+import pygtk
+pygtk.require("2.0")
+import gtk
+gtk.gdk.threads_init()
+import sys
+import os
+
+
+class DemoException(Exception):
+ """Base exception class for errors which occur during demos"""
+
+ def __init__(self, reason):
+ self.reason = reason
+
+class Demo:
+ """Base class implementing boring, boiler-plate code.
+ Sets up a basic gstreamer environment which includes:
+
+ * a window containing a drawing area and basic media controls
+ * a basic gstreamer pipeline using an ximagesink
+ * connects the ximagesink to the window's drawing area
+
+ Derived classes need only override magic(), __name__,
+ and __usage__ to create new demos."""
+
+ __name__ = "Basic Demo"
+ __usage__ = "python demo.py -- runs a simple test demo"
+ __def_win_size__ = (320, 240)
+
+ # this commment allows us to include only a portion of the file
+ # in the tutorial for this demo
+ # <excerpt 1> ...
+
+ def magic(self, pipeline, sink, args):
+ """This is where the magic happens"""
+ src = gst.element_factory_make("videotestsrc", "src")
+ pipeline.add(src)
+ src.link(sink)
+
+
+ def createPipeline(self, w):
+ """Given a window, creates a pipeline and connects it to the window"""
+
+ # code will make the ximagesink output in the specified window
+ def set_xid(window):
+ gtk.gdk.threads_enter()
+ sink.set_xwindow_id(window.window.xid)
+ sink.expose()
+ gtk.gdk.threads_leave()
+
+ # this code receives the messages from the pipeline. if we
+ # need to set X11 id, then we call set_xid
+ def bus_handler(unused_bus, message):
+ if message.type == gst.MESSAGE_ELEMENT:
+ if message.structure.get_name() == 'prepare-xwindow-id':
+ set_xid(w)
+ return gst.BUS_PASS
+
+ # create our pipeline, and connect our bus_handler
+ self.pipeline = gst.Pipeline()
+ bus = self.pipeline.get_bus()
+ bus.set_sync_handler(bus_handler)
+
+ sink = gst.element_factory_make("ximagesink", "sink")
+ sink.set_property("force-aspect-ratio", True)
+ sink.set_property("handle-expose", True)
+ scale = gst.element_factory_make("videoscale", "scale")
+ cspace = gst.element_factory_make("ffmpegcolorspace", "cspace")
+
+ # our pipeline looks like this: ... ! cspace ! scale ! sink
+ self.pipeline.add(cspace, scale, sink)
+ scale.link(sink)
+ cspace.link(scale)
+ return (self.pipeline, cspace)
+
+ # ... end of excerpt </excerpt>
+
+ # subclasses can override this method to provide custom controls
+ def customWidgets(self):
+ return gtk.HBox()
+
+ def createWindow(self):
+ """Creates a top-level window, sets various boring attributes,
+ creates a place to put the video sink, adds some and finally
+ connects some basic signal handlers. Really, really boring.
+ """
+
+ # create window, set basic attributes
+ w = gtk.Window()
+ w.set_size_request(*self.__def_win_size__)
+ w.set_title("Gstreamer " + self.__name__)
+ w.connect("destroy", gtk.main_quit)
+
+ # declare buttons and their associated handlers
+ controls = (
+ ("play_button", gtk.ToolButton(gtk.STOCK_MEDIA_PLAY), self.onPlay),
+ ("stop_button", gtk.ToolButton(gtk.STOCK_MEDIA_STOP), self.onStop),
+ ("quit_button", gtk.ToolButton(gtk.STOCK_QUIT), gtk.main_quit)
+ )
+
+ # as well as the container in which to put them
+ box = gtk.HButtonBox()
+
+ # for every widget, connect to its clicked signal and add it
+ # to the enclosing box
+ for name, widget, handler in controls:
+ widget.connect("clicked", handler)
+ box.pack_start(widget, True)
+ setattr(self, name, widget)
+
+ viewer = gtk.DrawingArea()
+ viewer.modify_bg(gtk.STATE_NORMAL, viewer.style.black)
+
+ # we will need this later
+ self.xid = None
+
+ # now finally do the top-level layout for the window
+ layout = gtk.VBox(False)
+ layout.pack_start(viewer)
+
+ # subclasses can override childWidgets() to supply
+ # custom controls
+ layout.pack_start(self.customWidgets(), False, False)
+ layout.pack_end(box, False, False)
+ w.add(layout)
+ w.show_all()
+
+ # we want to return only the portion of the window which will
+ # be used to display the video, not the whole top-level
+ # window. a DrawingArea widget is, in fact, an X11 window.
+ return viewer
+
+ def onPlay(self, unused_button):
+ self.pipeline.set_state(gst.STATE_PLAYING)
+
+ def onStop(self, unused_button):
+ self.pipeline.set_state(gst.STATE_READY)
+
+ def run(self):
+ w = self.createWindow()
+ p, s = self.createPipeline(w)
+ try:
+ self.magic(p, s, sys.argv[1:])
+ gtk.main()
+ except DemoException, e:
+ print e.reason
+ print self.__usage__
+ sys.exit(-1)
+
+# if this file is being run directly, create the demo and run it
+if __name__ == '__main__':
+ Demo().run()
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python
+
+import sys, os
+import pygtk, gtk, gobject
+import pygst
+pygst.require("0.10")
+import gst
+
+class GTK_Main:
+
+ def __init__(self):
+ window = gtk.Window(gtk.WINDOW_TOPLEVEL)
+ window.set_title("Vorbis-Player")
+ window.set_default_size(500, 200)
+ window.connect("destroy", gtk.main_quit, "WM destroy")
+ vbox = gtk.VBox()
+ window.add(vbox)
+ self.entry = gtk.Entry()
+ vbox.pack_start(self.entry, False)
+ self.button = gtk.Button("Start")
+ vbox.add(self.button)
+ self.button.connect("clicked", self.start_stop)
+ window.show_all()
+
+ self.player = gst.Pipeline("player")
+ source = gst.element_factory_make("filesrc", "file-source")
+ demuxer = gst.element_factory_make("oggdemux", "demuxer")
+ demuxer.connect("pad-added", self.demuxer_callback)
+ self.audio_decoder = gst.element_factory_make("vorbisdec", "vorbis-decoder")
+ audioconv = gst.element_factory_make("audioconvert", "converter")
+ audiosink = gst.element_factory_make("autoaudiosink", "audio-output")
+
+ self.player.add(source, demuxer, self.audio_decoder, audioconv, audiosink)
+ gst.element_link_many(source, demuxer)
+ gst.element_link_many(self.audio_decoder, audioconv, audiosink)
+
+ bus = self.player.get_bus()
+ bus.add_signal_watch()
+ bus.connect("message", self.on_message)
+
+ def start_stop(self, w):
+ if self.button.get_label() == "Start":
+ filepath = self.entry.get_text()
+ if os.path.isfile(filepath):
+ self.button.set_label("Stop")
+ self.player.get_by_name("file-source").set_property("location", filepath)
+ self.player.set_state(gst.STATE_PLAYING)
+ else:
+ self.player.set_state(gst.STATE_NULL)
+ self.button.set_label("Start")
+
+ def on_message(self, bus, message):
+ t = message.type
+ if t == gst.MESSAGE_EOS:
+ self.player.set_state(gst.STATE_NULL)
+ self.button.set_label("Start")
+ elif t == gst.MESSAGE_ERROR:
+ err, debug = message.parse_error()
+ print "Error: %s" % err, debug
+ self.player.set_state(gst.STATE_NULL)
+ self.button.set_label("Start")
+
+ def demuxer_callback(self, demuxer, pad):
+ adec_pad = self.audio_decoder.get_pad("sink")
+ pad.link(adec_pad)
+
+GTK_Main()
+gtk.gdk.threads_init()
+gtk.main()
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import liblo, sys
+
+# send all messages to port 1234 on the local machine
+try:
+ target = liblo.Address(12345)
+except liblo.AddressError, err:
+ print str(err)
+ sys.exit()
+
+# send message "/foo/message1" with int, float and string arguments
+liblo.send(target, "/play", 1)
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import liblo, sys
+
+# send all messages to port 1234 on the local machine
+try:
+ target = liblo.Address(12345)
+except liblo.AddressError, err:
+ print str(err)
+ sys.exit()
+
+# send message "/foo/message1" with int, float and string arguments
+liblo.send(target, "/play", 0)
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import liblo, sys
+
+# send all messages to port 1234 on the local machine
+try:
+ target = liblo.Address(13000)
+except liblo.AddressError, err:
+ print str(err)
+ sys.exit()
+
+# send message "/foo/message1" with int, float and string arguments
+liblo.send(target, "/sink_2/xpos", 200)
--- /dev/null
+import pygst
+pygst.require("0.10")
+import gst
+
+#pipeline = gst.Pipeline()
+playbin = gst.element_factory_make("playbin2", 'player')
+#sink = gst.element_factory_make("autoaudiosink", None)
+
+playbin.set_property("uri", "/home/momo/music_local/test/sweep.wav")
+#playbin.set_property("uri", "/home/momo/video_local/webm/ocean-clip.webm")
+#playbin.set_property("audio-sink", sink)
+
+#pipeline.add(playbin)
+
+import time
+playbin.set_state(gst.STATE_PLAYING)
+time.sleep(200)
--- /dev/null
+#!/usr/bin/python
+
+import pygst
+pygst.require("0.10")
+import gst
+import pygtk
+import gtk
+import sys
+
+class Main:
+ def __init__(self):
+ #this just reads the command line args
+ try:
+ DELAY = float(sys.argv[1])
+ DELAY = long(DELAY * 1000000000)
+ print DELAY
+ except IndexError:
+ DELAY = 0
+
+ self.delay_pipeline = gst.Pipeline("mypipeline")
+ #ALSA
+ self.audiosrc = gst.element_factory_make("alsasrc", "audio")
+ self.audiosrc.set_property("device","default")
+ self.delay_pipeline.add(self.audiosrc)
+ #Queue
+ self.audioqueue = gst.element_factory_make("queue","queue1")
+ self.audioqueue.set_property("max-size-time",0)
+ self.audioqueue.set_property("max-size-buffers",0)
+ self.audioqueue.set_property("max-size-bytes",0)
+ self.audioqueue.set_property("min-threshold-time",DELAY)
+ self.audioqueue.set_property("leaky","no")
+ self.delay_pipeline.add(self.audioqueue)
+ #Audio Output
+ self.sink = gst.element_factory_make("autoaudiosink", "sink")
+ self.delay_pipeline.add(self.sink)
+ #Link the elements
+ self.audiosrc.link(self.audioqueue)
+ self.audioqueue.link(self.sink)
+ #Begin Playing
+ self.delay_pipeline.set_state(gst.STATE_PLAYING)
+
+start=Main()
+gtk.main()
+
--- /dev/null
+#!/bin/sh
+
+gst-launch -v gstrtpbin name=rtpbin \
+ v4l2src \
+ ! queue ! videoscale method=1 ! video/x-raw-yuv,width=640,height=360 \
+ ! queue ! x264enc byte-stream=true bitrate=1000 bframes=4 ref=4 me=hex subme=4 weightb=true threads=4 ! rtph264pay \
+ ! rtpbin.send_rtp_sink_0 \
+ rtpbin.send_rtp_src_0 ! udpsink port=5000 host=127.0.0.1 \
+ rtpbin.send_rtcp_src_0 ! udpsink port=5001 host=127.0.0.1 sync=false async=false \
+ udpsrc port=5002 ! rtpbin.recv_rtcp_sink_0 > /dev/null &
+
\ No newline at end of file
--- /dev/null
+#!/bin/sh
+
+gst-launch -v gstrtpbin name=rtpbin latency=200 \
+ udpsrc caps="application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96" port=5000 \
+ ! rtpbin.recv_rtp_sink_0 \
+ rtpbin. ! rtph264depay ! tee name=t ! ffdec_h264 ! xvimagesink \
+ udpsrc port=5001 ! rtpbin.recv_rtcp_sink_0 \
+ rtpbin.send_rtcp_src_0 ! udpsink port=5002 host=127.0.0.1 sync=false async=false \
+ t. ! filesink location=/tmp/video.mp4
--- /dev/null
+#!/usr/bin/env python
+"""Extends basic demo with a gnl composition"""
+
+from demo import Demo, DemoException
+import gtk
+import gst
+import sys
+import os
+
+def create_decodebin():
+ try:
+ return gst.element_factory_make("decodebin2")
+ except:
+ return gst.element_factory_make("decodebin")
+
+class SimpleEffectDemo(Demo):
+ __name__ = "Basic GStreamer Effect Demo"
+ __usage__ = '''python %s file
+ display file with a color_balance effect''' % sys.argv[0]
+ __def_win_size__ = (320, 500)
+ # <excerpt 1>
+ def magic(self, pipeline, sink, args):
+
+ def onPad(obj, pad, target):
+ sinkpad = target.get_compatible_pad(pad, pad.get_caps())
+ pad.link(sinkpad)
+ return True
+
+ assert os.path.exists(sys.argv[-1])
+
+ # create the following pipeline
+ # filesrc location = sys.argv[1] ! decodebin ! videobalance ! ...
+ src = gst.element_factory_make("filesrc")
+ src.set_property("location", sys.argv[-1])
+ decode = create_decodebin()
+
+ self.balance = gst.element_factory_make("videobalance")
+
+ pipeline.add(src, decode, self.balance)
+ src.link(decode)
+ decode.connect("pad-added", onPad, self.balance)
+ self.balance.link(sink)
+
+ return
+ # </excerpt>
+
+ # <excerpt 2>
+ # overriding from parent
+ def customWidgets(self):
+ """Create a control for each property in the videobalance
+ widget"""
+
+ # to be called a property value needs to change
+ def onValueChanged(widget, prop):
+ # set the corresponding property of the videobalance element
+ self.balance.set_property(prop, widget.get_value())
+
+ # videobalance has several properties, with the following range
+ # and defaults
+ properties = [("contrast", 0, 2, 1),
+ ("brightness", -1, 1, 0),
+ ("hue", -1, 1, 0),
+ ("saturation", 0, 2, 1)]
+
+ # create a place to hold our controls
+ controls = gtk.VBox()
+ labels = gtk.VBox()
+ # for every propety, create a control and set its attributes
+ for prop, lower, upper, default in properties:
+ widget = gtk.HScale(); label = gtk.Label(prop)
+
+ # set appropriate atributes
+ widget.set_update_policy(gtk.UPDATE_CONTINUOUS)
+ widget.set_value(default)
+ widget.set_draw_value(True)
+ widget.set_range(lower, upper)
+
+ # connect to our signal handler, specifying the property
+ # to adjust
+ widget.connect("value-changed", onValueChanged, prop)
+
+ # pack widget into box
+ controls.pack_start(widget, True, True)
+ labels.pack_start(label, True, False)
+
+ layout = gtk.HBox()
+ layout.pack_start(labels, False, False)
+ layout.pack_end(controls, True, True)
+ return layout
+
+ # </excerpt>
+
+if __name__ == '__main__':
+ SimpleEffectDemo().run()
\ No newline at end of file
--- /dev/null
+import sys, os
+from PyQt4 import QtCore, QtGui, uic
+from PyQt4.phonon import Phonon
+
+class VideoPlayer(QtGui.QWidget):
+ def __init__(self, url, parent = None):
+
+ self.url = url
+
+ QtGui.QWidget.__init__(self, parent)
+ self.setSizePolicy(QtGui.QSizePolicy.Expanding,
+ QtGui.QSizePolicy.Preferred)
+
+
+ self.player = Phonon.VideoPlayer(Phonon.VideoCategory,self)
+ self.player.load(Phonon.MediaSource(self.url))
+ self.player.mediaObject().setTickInterval(100)
+ self.player.mediaObject().tick.connect(self.tock)
+
+ self.play_pause = QtGui.QPushButton(self)
+ self.play_pause.setIcon(QtGui.QIcon(':/icons/player_play.svg'))
+ self.play_pause.clicked.connect(self.playClicked)
+ self.player.mediaObject().stateChanged.connect(self.stateChanged)
+
+ self.slider = Phonon.SeekSlider(self.player.mediaObject() , self)
+
+ self.status = QtGui.QLabel(self)
+ self.status.setAlignment(QtCore.Qt.AlignRight |
+ QtCore.Qt.AlignVCenter)
+
+ self.download = QtGui.QPushButton("Download", self)
+ self.download.clicked.connect(self.fetch)
+ topLayout = QtGui.QVBoxLayout(self)
+ topLayout.addWidget(self.player)
+ layout = QtGui.QHBoxLayout(self)
+ layout.addWidget(self.play_pause)
+ layout.addWidget(self.slider)
+ layout.addWidget(self.status)
+ layout.addWidget(self.download)
+ topLayout.addLayout(layout)
+ self.setLayout(topLayout)
+
+ def playClicked(self):
+ if self.player.mediaObject().state() == Phonon.PlayingState:
+ self.player.pause()
+ else:
+ self.player.play()
+
+ def stateChanged(self, new, old):
+ if new == Phonon.PlayingState:
+ self.play_pause.setIcon(QtGui.QIcon(':/icons/player_pause.svg'))
+ else:
+ self.play_pause.setIcon(QtGui.QIcon(':/icons/player_play.svg'))
+
+ def tock(self, time):
+ time = time/1000
+ h = time/3600
+ m = (time-3600*h) / 60
+ s = (time-3600*h-m*60)
+ self.status.setText('%02d:%02d:%02d'%(h,m,s))
+
+ def fetch(self):
+ print 'Should download %s'%self.url
+
+def main():
+ app = QtGui.QApplication(sys.argv)
+ window=VideoPlayer(sys.argv[1])
+ window.show()
+ # It's exec_ because exec is a reserved word in Python
+ sys.exit(app.exec_())
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+from PyQt4 import QtCore, QtGui
+import pygst
+import sys, os, time, math
+pygst.require("0.10")
+import gst
+import gobject
+
+#This class runs the code it contains in another thread using QThread
+class Player(QtCore.QThread):
+ def __init__(self):
+ QtCore.QThread.__init__(self)
+
+ def run(self):
+ #create the pipeline
+ player = gst.Pipeline("player")
+ #filesrc element
+ source = gst.element_factory_make("filesrc", "file-source")
+ #volume element to adjust volume of audio
+ volume = gst.element_factory_make("volume", "volume")
+ #level element to get the rms/peak property
+ level = gst.element_factory_make("level", "volume-level")
+ #decoder to play mp3 files
+ decoder = gst.element_factory_make("mad", "mp3-decoder")
+ #convert the audio to play to speakers
+ conv = gst.element_factory_make("audioconvert", "converter")
+ #autosink if not alsa
+ sink = gst.element_factory_make("autoaudiosink", "audio-output")
+
+ #add the elements to the pipeline
+ player.add(source, volume, level, decoder, conv, sink)
+
+ #link the elements in order
+ gst.element_link_many(source, decoder, conv, volume, level, sink)
+ #set properties of elements
+ player.get_by_name("volume").set_property('volume', 1)
+ player.get_by_name("volume-level").set_property('peak-ttl' , 0)
+ player.get_by_name("volume-level").set_property('peak-falloff', 20)
+ #add bus to listen signal from
+ bus = gst.Pipeline.get_bus(player)
+ gst.Bus.add_signal_watch(bus)
+
+ #the source of the player
+ filepath = "/home/momo/music_local/test/aboul.wav.mp3"
+ #set the property of the element filesrc
+ player.get_by_name("file-source").set_property('location', filepath)
+ #play the file
+ player.set_state(gst.STATE_PLAYING)
+ #get the current thread in Qt
+ play_thread_id = self.currentThread
+
+ #set the minimum decibels
+ MIN_DB = -45
+ #set the maximum decibels
+ MAX_DB = 0
+ #if current thread is running
+ while play_thread_id == self.currentThread:
+ #listen to messages that emit during playing
+ messagePoll = bus.poll(gst.MESSAGE_ANY,-1)
+ #if the message is level
+ if messagePoll.src == level:
+ #get the structure of the message
+ struc = messagePoll.structure
+ #if the structure message is rms
+ if struc.has_key('rms'):
+ rms = struc["rms"]
+ #get the values of rms in a list
+ rms0 = abs(float(rms[0]))
+ #compute for rms to decibels
+ rmsdb = 10 * math.log(rms0 / 32768 )
+ #compute for progress bar
+ vlrms = (rmsdb-MIN_DB) * 100 / (MAX_DB-MIN_DB)
+ #emit the signal to the qt progress bar
+ self.emit(QtCore.SIGNAL("setLabel"), abs(vlrms))
+ #set timer
+ time.sleep(0.05)
+
+#this code produced using pyuic from qt designer
+class Ui_Dialog(object):
+ def setupUi(self, Dialog):
+ Dialog.setObjectName("Dialog")
+ Dialog.resize(QtCore.QSize(QtCore.QRect(0,0,94,300).size()).expandedTo(Dialog.minimumSizeHint()))
+
+ self.progressBar = QtGui.QProgressBar(Dialog)
+ self.progressBar.setGeometry(QtCore.QRect(10,10,31,281))
+ self.progressBar.setProperty("value",QtCore.QVariant(24))
+ self.progressBar.setOrientation(QtCore.Qt.Vertical)
+ self.progressBar.setObjectName("progressBar")
+ self.progressBar.setValue(0)
+ self.progressBar.setMinimum(0)
+ self.progressBar.setMaximum(100)
+
+ self.retranslateUi(Dialog)
+ QtCore.QMetaObject.connectSlotsByName(Dialog)
+ #sets the value of the progress bar emited
+ def setLabel(self,value):
+ self.progressBar.setValue(value)
+
+ def retranslateUi(self, Dialog):
+ Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
+
+if __name__ == "__main__":
+ app = QtGui.QApplication(sys.argv)
+ window = QtGui.QDialog()
+ ui = Ui_Dialog()
+ ui.setupUi(window)
+ window.show()
+ #creates instance of the Player class
+ player=Player()
+ #connect to signal emitted in Player class
+ QtCore.QObject.connect(player, QtCore.SIGNAL("setLabel"), ui.setLabel, QtCore.Qt.QueuedConnection)
+ #run the Player class thread
+ player.start()
+ app.exec_()