]> git.parisson.com Git - telecaster-server.git/commitdiff
mv tests
authorGuillaume Pellerin <yomguy@parisson.com>
Fri, 1 Aug 2014 21:58:09 +0000 (23:58 +0200)
committerGuillaume Pellerin <yomguy@parisson.com>
Fri, 1 Aug 2014 21:58:09 +0000 (23:58 +0200)
130 files changed:
tcserver/conf/usr/local/share/telecaster/scripts/old/ffmpeg_dv_jack_fifo_shout [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/old/ffmpeg_usb_jack_fifo_shout [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_dv_jack_shout [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_osc_multi.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_double_shout [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_double_shout2.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_jack_fifo_shout [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_jack_shout [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_simple_ogg [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_simple_ogg_jack [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_triple_shout [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/osc_record_start.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/osc_record_stop.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/audio_player_osc.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/audio_player_qt.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/audio_video.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/audio_video_crossfade.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_osc.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_osc_touch.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_osc_touch_1cam.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_osc_touch_2cam.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_osc_touch_3cams.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_parallel.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_parallel_no_effects.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_pipes.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/crc.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/cross-fade.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/cross-fade_2.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/demo.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/gtk_sink_pad.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/osc_play.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/osc_stop.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/osc_test.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/playbin.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/radiodelay.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/rtpx264.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/rtpx264_pl.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/simple-effect-gtk.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/video_player_qt.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/py/vumeter.py [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/rtpx264.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/rtpx264_pl.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_flu_simple_webm.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_alsa_webm_stream.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_dv_webm_stream.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_only_simple_webm.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_file.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_file_webm.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_start.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_ice.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_hd.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_hd_alsa_test.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_hd_jack_test.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_hd_test.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_m.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_sd_test.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_tee.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/tcp2x.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/x264_2.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/x264_pl2.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/x264_relay.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/x264_relay_x.sh [deleted file]
tcserver/conf/usr/local/share/telecaster/scripts/tests/x_jack_webm.sh [deleted file]
tests/audio_player_osc.py [new file with mode: 0644]
tests/audio_player_qt.py [new file with mode: 0644]
tests/audio_video.py [new file with mode: 0644]
tests/audio_video_crossfade.py [new file with mode: 0644]
tests/control_mixer.py [new file with mode: 0644]
tests/control_mixer_osc.py [new file with mode: 0644]
tests/control_mixer_osc_touch.py [new file with mode: 0644]
tests/control_mixer_osc_touch_1cam.py [new file with mode: 0644]
tests/control_mixer_osc_touch_2cam.py [new file with mode: 0644]
tests/control_mixer_osc_touch_3cams.py [new file with mode: 0644]
tests/control_mixer_parallel.py [new file with mode: 0644]
tests/control_mixer_parallel_no_effects.py [new file with mode: 0644]
tests/control_mixer_pipes.py [new file with mode: 0644]
tests/crc.py [new file with mode: 0644]
tests/cross-fade.py [new file with mode: 0644]
tests/cross-fade_2.py [new file with mode: 0644]
tests/demo.py [new file with mode: 0644]
tests/ffmpeg_dv_jack_fifo_shout [new file with mode: 0755]
tests/ffmpeg_usb_jack_fifo_shout [new file with mode: 0755]
tests/gst_dv_jack_shout [new file with mode: 0755]
tests/gst_osc_multi.py [new file with mode: 0755]
tests/gst_video_double_shout [new file with mode: 0755]
tests/gst_video_double_shout2.sh [new file with mode: 0755]
tests/gst_video_jack_fifo_shout [new file with mode: 0755]
tests/gst_video_jack_shout [new file with mode: 0755]
tests/gst_video_simple_ogg [new file with mode: 0755]
tests/gst_video_simple_ogg_jack [new file with mode: 0755]
tests/gst_video_triple_shout [new file with mode: 0755]
tests/gtk_sink_pad.py [new file with mode: 0644]
tests/osc_play.py [new file with mode: 0644]
tests/osc_record_start.py [new file with mode: 0755]
tests/osc_record_stop.py [new file with mode: 0755]
tests/osc_stop.py [new file with mode: 0644]
tests/osc_test.py [new file with mode: 0644]
tests/playbin.py [new file with mode: 0644]
tests/radiodelay.py [new file with mode: 0644]
tests/rtpx264.sh [new file with mode: 0755]
tests/rtpx264_2.sh [new file with mode: 0755]
tests/rtpx264_pl.sh [new file with mode: 0755]
tests/rtpx264_pl_fs.sh [new file with mode: 0755]
tests/simple-effect-gtk.py [new file with mode: 0644]
tests/tc_flu_simple_webm.sh [new file with mode: 0755]
tests/tc_video_alsa_webm_stream.sh [new file with mode: 0755]
tests/tc_video_dv_webm_stream.sh [new file with mode: 0755]
tests/tc_video_only_simple_webm.sh [new file with mode: 0755]
tests/tc_video_simple_file.sh [new file with mode: 0755]
tests/tc_video_simple_file_webm.sh [new file with mode: 0755]
tests/tc_video_simple_start.sh [new file with mode: 0755]
tests/tc_video_simple_webm_ice.sh [new file with mode: 0755]
tests/tc_video_simple_webm_stream.sh [new file with mode: 0755]
tests/tc_video_simple_webm_stream_hd.sh [new file with mode: 0755]
tests/tc_video_simple_webm_stream_hd_alsa_test.sh [new file with mode: 0755]
tests/tc_video_simple_webm_stream_hd_jack_test.sh [new file with mode: 0755]
tests/tc_video_simple_webm_stream_hd_test.sh [new file with mode: 0755]
tests/tc_video_simple_webm_stream_m.sh [new file with mode: 0755]
tests/tc_video_simple_webm_stream_sd_test.sh [new file with mode: 0755]
tests/tc_video_simple_webm_tee.sh [new file with mode: 0755]
tests/tcp2x.sh [new file with mode: 0755]
tests/video_player_qt.py [new file with mode: 0644]
tests/vumeter.py [new file with mode: 0644]
tests/x264_2.sh [new file with mode: 0755]
tests/x264_pl2.sh [new file with mode: 0755]
tests/x264_relay.sh [new file with mode: 0755]
tests/x264_relay_x.sh [new file with mode: 0755]
tests/x_jack_webm.sh [new file with mode: 0755]

diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/old/ffmpeg_dv_jack_fifo_shout b/tcserver/conf/usr/local/share/telecaster/scripts/old/ffmpeg_dv_jack_fifo_shout
deleted file mode 100755 (executable)
index 07075a1..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-set -e
-
-case "$1" in
- start)
-   fifo=/tmp/video_fifo
-   if [ ! -e $fifo ]; then
-   mkfifo $fifo
-   fi
-
-   dir=/mnt/data1/video_tests
-   now=`date -R`
-
-   dvgrab -buffers 1 - | ffmpeg -f dv -i - -f jack -i ffmpeg -vcodec libtheora -s 480x320 -aspect 16:9 -acodec libvorbis -b 300k -f ogg -y $fifo -map 0.0 -map 1.0 &
-
-   sleep 5
-   jack_connect jack_rack:out_1 ffmpeg:input_1
-   jack_connect jack_rack:out_2 ffmpeg:input_2
-   #jack_connect jack_rack:out_1 ffmpeg:input_1
-   #jack_connect jack_rack:out_2 ffmpeg:input_2
-
-   sleep 1
-   cat $fifo | tee "$dir/Pre-Barreau_-_Augustins_-_Video_Live_-_$now.ogg" | oggfwd -d "pb_video_live" -g "Teaching"  -n "pb_video_live" localhost 8000 source2parisson /pb_video_live.ogg &
-   ;;
- stop)
-   jack_disconnect jack_rack:out_1 ffmpeg:input_1
-   jack_disconnect jack_rack:out_2 ffmpeg:input_2
-   pkill ffmpeg
-   ;;
-esac
-
-
-
-
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/old/ffmpeg_usb_jack_fifo_shout b/tcserver/conf/usr/local/share/telecaster/scripts/old/ffmpeg_usb_jack_fifo_shout
deleted file mode 100755 (executable)
index 47fe2ed..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-    set -e
-
-case "$1" in
- start)
-   fifo=/tmp/video_fifo2
-   if [ ! -e $fifo ]; then
-   mkfifo $fifo
-   fi
-
-   dir=$HOME/archives/2011
-   now=`date -R`
-   file=$dir/video_test2.ogg
-
-   ffmpeg -f video4linux2 -i /dev/video0 -f jack -i ffmpeg -itsoffset 00:00:00.8 -r 20 -f ogg -vcodec libtheora -s 320x240 -b 380k -acodec libvorbis -ab 64k -ar 44100 -ac 1 -y $file -map 0.0 -map 1.0 &
-
-   sleep 3
-   jack_connect jack_rack:out_1 ffmpeg:input_1
-   #jack_connect jack_rack:out_1 ffmpeg:input_2
-
-   sleep 2
-   cat $file | tee $file.ogg | oggfwd -d "TeleCaster Live Video Services" -g "Vocal"  -n "TeleCaster Live Video" localhost 8000 source2parisson /telecaster_live_video.ogg &
-   ;;
- stop)
-   jack_disconnect jack_rack:out_1 ffmpeg:input_1
-#   jack_disconnect jack_rack:out_1 ffmpeg:input_2
-   pkill -9 ffmpeg
-   ;;
-esac
-
-
-
-
-
-# http://www.kkoncepts.net/node/69
\ No newline at end of file
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_dv_jack_shout b/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_dv_jack_shout
deleted file mode 100755 (executable)
index ef59dfc..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-gst-launch-0.10 dv1394src ! queue ! dvdemux name=d ! queue ! dvdec  \
-       ! queue ! videoscale ! video/x-raw-yuv, width=480, height=368 \
-       ! queue ! ffmpegcolorspace ! theoraenc bitrate=500 ! muxout. \
-        oggmux name=muxout \
-       ! queue ! shout2send mount=/telecaster_live_video.ogg port=8000 password=source2parisson ip=127.0.0.1 \
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_osc_multi.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_osc_multi.py
deleted file mode 100755 (executable)
index d47e17f..0000000
+++ /dev/null
@@ -1,175 +0,0 @@
-#!/usr/bin/python
-import gobject; gobject.threads_init()
-import pygst; pygst.require("0.10")
-import gst
-from threading import Thread
-
-
-class OSCController(Thread):
-
-    def __init__(self, port):
-        Thread.__init__(self)
-        import liblo
-        self.port = port
-        try:
-            self.server = liblo.Server(self.port)
-        except liblo.ServerError, err:
-            print str(err)
-
-    def add_method(self, path, type, method):
-        self.server.add_method(path, type, method)
-
-    def run(self):
-        while True:
-            self.server.recv(100)
-
-
-class GSTSrcVideo(object):
-
-    def __init__(self, pipe=None, mime_type='video/x-raw-yuv', framerate='24/1',
-                 width=160, height=90, xpos=0, ypos=0):
-        self.mime_type = mime_type
-        self.framerate = framerate
-        self.width = width
-        self.height = height
-        self.xpos = xpos
-        self.ypos = ypos
-        if not pipe:
-            pipe = 'videotestsrc pattern="snow"'
-        self.pipe = pipe + ' ! %s, framerate=%s, width=%s, height=%s' \
-                        % (self.mime_type, self.framerate, str(self.width), str(self.height))
-
-
-class GSTSrcAudio(object):
-
-    def __init__(self, pipe=None, mime_type='audio/x-raw-float', channels=2):
-        self.mime_type = mime_type
-        self.channels = channels
-        if not pipe:
-            pipe = 'jackaudiosrc connect=2'
-        self.pipe = pipe + ' ! %s, channels=%s' % (self.mime_type, str(self.channels))
-
-
-class V4lControl(object):
-
-    def __init__(self, device=0):
-        self.program = 'v4l2-ctl'
-        self.device = device
-
-    def execute(self, args):
-        command = ' '.join([self.program, '-d', self.device, '-c', args])
-        os.system(command)
-
-    def power_line_frequency(self, value):
-          arg = 'power_line_frequency=' + value
-          self.execute(arg)
-
-
-class GSTWebmStreamer(object):
-
-    def __init__(self, host='127.0.0.1', port=9000, blocksize=65536):
-        self.host = host
-        self.port = port
-        self.blocksize = blocksize
-        self.muxer = """webmmux streamable=true name=muxer \
-                    ! queue ! tcpserversink host=%s port=%s protocol=none blocksize=%s sync-method=1
-                    """ % (self.host, str(self.port), str(self.blocksize))
-
-    def video_setup(self, threads=4, quality=10):
-        self.video = """! queue ! ffmpegcolorspace ! queue ! vp8enc speed=2 threads=%s quality=%s \
-                        max-latency=25 max-keyframe-distance=96 auto-alt-ref-frames=true  \
-                        ! queue ! muxer.""" % (str(threads), str(quality))
-
-    def audio_setup(self, quality=0.3):
-        self.audio = "! queue ! audioconvert ! queue ! vorbisenc quality=%s ! queue ! muxer." % str(self.quality)
-
-    @property
-    def pipe(self):
-        return ' '.join([self.video, self.audio, self.muxer])
-
-
-class GSTMixer(object):
-
-    def __init__(self, osc_port=8338):
-        self.name = 'mixer'
-        self.pipe = ['videomixer name=mixer ! ffmpegcolorspace ! xvimagesink']
-        self.srcs = []
-        self.i= 0
-        self.osc_port = osc_port
-        self.osc = OSCController(self.osc_port)
-
-    def osc_callback(self, path, value):
-        paths = path.split('/')
-        sink = paths[1]
-        param = paths[2]
-        for src in self.srcs:
-            if src['sink'] == sink:
-                break
-        src['control'].set(param, 5 * gst.SECOND, value[0])
-
-    def osc_alpha_callback(self, path, value):
-        paths = path.split('/')
-        layer = paths[1]
-        param = paths[2]
-        id = int(param[-1])-1
-        for src in self.srcs:
-            if src['id'] == id:
-                break
-        src['control'].set('alpha', 5 * gst.SECOND, value[0])
-
-    def osc_xy_callback(self, path, value):
-        for src in self.srcs:
-            if src['id'] == 2:
-                break
-        src['control'].set("xpos", 5 * gst.SECOND, int(value[0]*480))
-        src['control'].set("ypos", 5 * gst.SECOND, int(value[1]*270))
-
-    def add_src(self, src):
-        self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
-        self.i += 1
-
-    def setup(self):
-        self.srcs.reverse()
-
-        for src in self.srcs:
-            self.pipe.append(' '.join([src['src'].pipe, '! ' + self.name + '.' + src['sink']]))
-
-        print ' '.join(self.pipe)
-        self.process = gst.parse_launch(' '.join(self.pipe))
-        mixer = self.process.get_by_name("mixer")
-
-        for src in self.srcs:
-            src['pad'] = mixer.get_pad(src['sink'])
-            src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
-
-            src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
-            src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
-
-            src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-            src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
-
-            src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-            src['control'].set("alpha", 5 * gst.SECOND, 1.0)
-
-            self.osc.add_method('/1/fader'+str(src['id']+1), 'f', self.osc_alpha_callback)
-
-        self.osc.add_method('/3/xy', 'ff', self.osc_xy_callback)
-
-    def run(self):
-        self.osc.start()
-        self.process.set_state(gst.STATE_PLAYING)
-        gobject.MainLoop().run()
-
-
-if __name__ == '__main__':
-    src1 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc pattern="black" ')
-    src2 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc ')
-    src3 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=200, ypos=150)
-    src4 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=300, ypos=250)
-    mixer = GSTMixer()
-    mixer.add_src(src1)
-    mixer.add_src(src2)
-    mixer.add_src(src3)
-    mixer.add_src(src4)
-    mixer.setup()
-    mixer.run()
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_double_shout b/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_double_shout
deleted file mode 100755 (executable)
index 246d01e..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-
-gst-launch v4l2src device=/dev/video0 ! videoscale ! video/x-raw-yuv, width=160, height=120 ! videomixer name=mix sink_1::xpos=20 sink_1::ypos=20 sink_1::alpha=0.9  ! queue ! ffmpegcolorspace ! theoraenc quality=40 ! muxout. jackaudiosrc connect=1 ! audioconvert ! audio/x-raw-int,rate=44100,channels=1,width=16 ! queue ! audioconvert ! vorbisenc ! queue ! muxout. oggmux name=muxout ! tee name=t ! queue ! filesink location="video_test.ogg" t. ! queue ! shout2send mount=/telecaster_live_video.ogg port=8000 password=source2parisson ip=127.0.0.1 v4l2src device=/dev/video1 ! videoscale ! video/x-raw-yuv, width=480, height=270 ! mix.
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_double_shout2.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_double_shout2.sh
deleted file mode 100755 (executable)
index cd25b51..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=480
-HEIGHT=270
-
-gst-launch v4l2src device=/dev/video0 ! queue ! videoscale ! video/x-raw-yuv, width=160, height=120 \
-       ! queue ! videorate ! video/x-raw-yuv,framerate=25/1 \
-       ! queue ! videomixer name=mix sink_1::xpos=0 sink_1::ypos=0 sink_1::alpha=0.9 \
-       ! queue ! ffmpegcolorspace ! queue ! theoraenc quality=25 ! muxout. \
-       jackaudiosrc connect=1 ! audioconvert ! audio/x-raw-int,rate=44100,channels=1,width=16 \
-       ! queue ! audioconvert ! vorbisenc ! queue ! muxout.  \
-       oggmux name=muxout ! queue ! shout2send mount=/telecaster_live_video.ogg port=8000 password=source2parisson ip=127.0.0.1 \
-       v4l2src device=/dev/video1 ! queue ! videoscale ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT \
-       ! queue ! videorate ! video/x-raw-yuv,framerate=25/1 ! mix. \
-       > /dev/null &
-               
-sleep 2
-
-jack_disconnect system:capture_1 gst-launch-0.10:in_jackaudiosrc0_1
-jack_connect   jack_rack:out_1  gst-launch-0.10:in_jackaudiosrc0_1
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_jack_fifo_shout b/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_jack_fifo_shout
deleted file mode 100755 (executable)
index 1c19e27..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-set -e
-
-case "$1" in
- start)
-   dir=/home/$USER/trash
-   dat=`date '+%y-%m-%d-%H:%M:%S'`
-   file=$dir/video_test_$dat.ogg
-
-   gst-launch-0.10 v4l2src device=/dev/video0 ! queue ! videorate ! video/x-raw-yuv,width=320 ! queue  ! theoraenc quality=60 ! queue ! muxout. jackaudiosrc connect=1 ! audioconvert ! audio/x-raw-int,rate=44100,channels=1,width=16 ! queue ! audioconvert ! vorbisenc ! queue ! muxout. oggmux name=muxout ! filesink location=$file sync=true &
-   
-#    gst-launch v4l2src ! queue ! videorate ! video/x-raw-yuv,fps=30,width=320 ! queue  ! theoraenc quality=60 ! queue ! muxout. jackaudiosrc connect=1 ! audioconvert ! audio/x-raw-int,rate=44100,channels=1,width=16 ! queue ! audioconvert ! vorbisenc ! queue ! muxout. oggmux name=muxout ! shout2send mount=/telecaster_live_video.ogg port=8000 password=source2parisson ip=127.0.0.1 &
-   
-#    
-    sleep 10
-    cat $file | oggfwd -d "TeleCaster Live Video Services" -g "Vocal"  -n "TeleCaster Live Video" localhost 8000 source2parisson /telecaster_live_video.ogg &
-   ;;
- stop)
-   pkill -9 oggfwd
-   pkill -9 gst-launch-0.10
-   ;;
-esac
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_jack_shout b/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_jack_shout
deleted file mode 100755 (executable)
index f5d25ba..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-set -e
-
-case "$1" in
- start)
-   dir=/home/$USER/trash
-   dat=`date '+%y-%m-%d-%H:%M:%S'`
-   file=$dir/video_test_$dat.ogg
-
-#   gst-launch-0.10 v4l2src device=/dev/video0 ! queue ! videorate ! video/x-raw-yuv,width=320 ! queue  ! theoraenc quality=60 ! queue ! muxout. jackaudiosrc connect=1 ! audioconvert ! audio/x-raw-int,rate=44100,channels=1,width=16 ! queue ! audioconvert ! vorbisenc ! queue ! muxout. oggmux name=muxout ! filesink location=$file sync=true &
-   
-    gst-launch-0.10 v4l2src ! queue ! videorate ! video/x-raw-yuv,fps=30,width=320 ! queue  ! theoraenc quality=60 ! queue ! muxout. jackaudiosrc connect=1 ! audioconvert ! audio/x-raw-int,rate=44100,channels=1,width=16 ! queue ! audioconvert ! vorbisenc ! queue ! muxout. oggmux name=muxout ! shout2send mount=/telecaster_live_video.ogg port=8000 password=source2parisson ip=127.0.0.1 &
-   
-#    
-#    sleep 10
-#    cat $file | oggfwd -d "TeleCaster Live Video Services" -g "Vocal"  -n "TeleCaster Live Video" localhost 8000 source2parisson /telecaster_live_video.ogg &
-   ;;
- stop)
-   pkill -9 oggfwd
-   pkill -9 gst-launch-0.10
-   ;;
-esac
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_simple_ogg b/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_simple_ogg
deleted file mode 100755 (executable)
index af63af4..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv,width=640, height=480 \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! theoraenc quality=20 ! mux. \
-       alsasrc device=hw:0,0 ! audio/x-raw-int,rate=44100,channels=2,depth=16 \
-       !  queue ! audioconvert ! audio/x-raw-float,rate=44100,channels=2,depth=16 ! queue ! vorbisenc ! mux. \
-       oggmux name=mux ! filesink location=/var/www/test/test.ogg \
-
-# ! queue ! videorate ! video/x-raw-yuv, framerate=25/1 \
-#jackaudiosrc connect=1 ! queue ! audioconvert ! audio/x-raw-int,rate=44100,channels=1,width=16
\ No newline at end of file
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_simple_ogg_jack b/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_simple_ogg_jack
deleted file mode 100755 (executable)
index 69d2091..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv,width=640, height=480 \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! theoraenc quality=25 ! mux. \
-       jackaudiosrc connect=1 ! queue ! audioconvert ! queue ! vorbisenc ! mux. \
-       oggmux name=mux ! filesink location=/var/www/test/test.ogg \
-
-# ! queue ! videorate ! video/x-raw-yuv, framerate=25/1 \
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_triple_shout b/tcserver/conf/usr/local/share/telecaster/scripts/tests/gst_video_triple_shout
deleted file mode 100755 (executable)
index d6aeec8..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-gst-launch v4l2src device=/dev/video0 ! queue ! videoscale ! video/x-raw-yuv, width=160, height=120 \
-       ! queue ! ffmpegcolorspace ! video/x-raw-rgb \
-       ! queue ! videomixer name=mix \
-               sink_1::xpos=40 sink_1::ypos=60 sink_1::alpha=0.9 sink_2::xpos=40 sink_2::ypos=180 sink_2::alpha=0.9 \
-       ! queue ! videoscale ! video/x-raw-rgb, width=480, height=270 \
-       ! queue ! ffmpegcolorspace ! theoraenc quality=10 ! oggmux name=muxout \
-       ! queue ! shout2send mount=/telecaster_live_video.ogg port=8000 password=source2parisson ip=127.0.0.1 \
-       ximagesrc ! queue ! videorate ! video/x-raw-rgb, framerate=30/1 ! videoscale ! video/x-raw-rgb, width=160, height=120 ! mix. \
-    v4l2src device=/dev/video1 ! queue ! ffmpegcolorspace ! video/x-raw-rgb ! videoscale ! video/x-raw-rgb, width=640, height=360 ! mix. \
\ No newline at end of file
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/osc_record_start.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/osc_record_start.py
deleted file mode 100755 (executable)
index 12356a4..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import liblo, sys
-
-port = int(sys.argv[-1])
-
-# send all messages to port 1234 on the local machine
-try:
-    target = liblo.Address(port)
-except liblo.AddressError, err:
-    print str(err)
-    sys.exit()
-
-# send message "/foo/message1" with int, float and string arguments
-liblo.send(target, "/record", 1)
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/osc_record_stop.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/osc_record_stop.py
deleted file mode 100755 (executable)
index 81da1a1..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import liblo, sys
-
-port = int(sys.argv[-1])
-
-# send all messages to port 1234 on the local machine
-try:
-    target = liblo.Address(port)
-except liblo.AddressError, err:
-    print str(err)
-    sys.exit()
-
-# send message "/foo/message1" with int, float and string arguments
-liblo.send(target, "/record", 0)
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/audio_player_osc.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/audio_player_osc.py
deleted file mode 100644 (file)
index 609882c..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-
-import gobject
-gobject.threads_init()
-import pygst
-pygst.require("0.10")
-import gst
-from threading import Thread
-import sys
-import liblo
-
-class OSCController(Thread):
-
-    def __init__(self, port):
-        Thread.__init__(self)
-        import liblo
-        self.port = port
-        try:
-            self.server = liblo.Server(self.port)
-        except liblo.ServerError, err:
-            print str(err)
-
-    def run(self):
-        while True:
-            self.server.recv(100)
-
-            
-class AudioPlayer(Thread):
-    
-    def __init__(self, uri):
-        Thread.__init__(self)
-        self.uri = uri
-        self.controller = OSCController(12345)
-        self.controller.server.add_method('/play', 'i', self.play_stop_cb)
-        self.controller.start()
-        
-        self.mainloop = gobject.MainLoop()
-        self.player = gst.element_factory_make("playbin", "player")
-        self.player.set_property('uri', self.uri)
-        
-    def play_stop_cb(self, path, value):
-        value = value[0]
-        if value:
-            print 'play'
-            self.player.set_state(gst.STATE_NULL)
-            self.player.set_state(gst.STATE_PLAYING)
-        else:
-            print 'stop'
-            self.player.set_state(gst.STATE_NULL)
-            
-    def run(self):
-        self.mainloop.run()
-    
-if __name__ == '__main__':
-    path = sys.argv[-1]
-    player = AudioPlayer(path)
-    player.start()
-    
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/audio_player_qt.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/audio_player_qt.py
deleted file mode 100644 (file)
index ed7da6e..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import sys, os
-from PyQt4 import QtCore, QtGui, uic
-from PyQt4.phonon import Phonon
-
-class AudioPlayer(QtGui.QWidget):
-    def __init__(self, url, parent = None):
-
-        self.url = url
-
-        QtGui.QWidget.__init__(self, parent)
-        self.setSizePolicy(QtGui.QSizePolicy.Expanding,
-            QtGui.QSizePolicy.Preferred)
-
-
-        self.player = Phonon.createPlayer(Phonon.MusicCategory,
-            Phonon.MediaSource(url))
-        self.player.setTickInterval(100)
-        self.player.tick.connect(self.tock)
-
-        self.play_pause = QtGui.QPushButton(self)
-        self.play_pause.setIcon(QtGui.QIcon(':/icons/player_play.svg'))
-        self.play_pause.clicked.connect(self.playClicked)
-        self.player.stateChanged.connect(self.stateChanged)
-
-        self.slider = Phonon.SeekSlider(self.player , self)
-
-        self.status = QtGui.QLabel(self)
-        self.status.setAlignment(QtCore.Qt.AlignRight |
-            QtCore.Qt.AlignVCenter)
-
-        self.download = QtGui.QPushButton("Download", self)
-        self.download.clicked.connect(self.fetch)
-
-        layout = QtGui.QHBoxLayout(self)
-        layout.addWidget(self.play_pause)
-        layout.addWidget(self.slider)
-        layout.addWidget(self.status)
-        layout.addWidget(self.download)
-
-    def playClicked(self):
-        if self.player.state() == Phonon.PlayingState:
-            self.player.pause()
-        else:
-            self.player.play()
-
-    def stateChanged(self, new, old):
-        if new == Phonon.PlayingState:
-            self.play_pause.setIcon(QtGui.QIcon(':/icons/player_pause.svg'))
-        else:
-            self.play_pause.setIcon(QtGui.QIcon(':/icons/player_play.svg'))
-
-    def tock(self, time):
-        time = time/1000
-        h = time/3600
-        m = (time-3600*h) / 60
-        s = (time-3600*h-m*60)
-        self.status.setText('%02d:%02d:%02d'%(h,m,s))
-
-    def fetch(self):
-        print 'Should download %s'%self.url
-
-def main():
-    app = QtGui.QApplication(sys.argv)
-    window=AudioPlayer(sys.argv[1])
-    window.show()
-    # It's exec_ because exec is a reserved word in Python
-    sys.exit(app.exec_())
-
-if __name__ == "__main__":
-    main()
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/audio_video.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/audio_video.py
deleted file mode 100644 (file)
index bf17145..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env python
-
-"""A short Audio-Video example"""
-import gobject
-gobject.threads_init()
-import gst
-import pygtk
-pygtk.require("2.0")
-import gtk
-gtk.gdk.threads_init()
-import sys
-import os
-from demo import Demo
-
-def create_decodebin():
-    try:
-        return gst.element_factory_make("decodebin2")
-    except:
-        return gst.element_factory_make("decodebin")
-
-class DemoException(Exception):
-    """Base exception class for errors which occur during demos"""
-
-    def __init__(self, reason):
-        self.reason = reason
-
-class AVDemo(Demo):
-    """Extends base demo with both audio and video sinks
-    * a window containing a drawing area and basic media controls
-    * a basic gstreamer pipeline using an ximagesink and an autoaudiosink
-    * connects the ximagesink to the window's drawing area
-
-    Derived classes need only override magic(), __name__,
-    and __usage__ to create new demos."""
-
-    __name__ = "AV Demo"
-    __usage__ = "python audio_video.py <filename>"
-    __def_win_size__ = (320, 240)
-
-    # this commment allows us to include only a portion of the file
-    # in the tutorial for this demo
-
-    def magic(self, pipeline, (videosink, audiosink), args):
-        """This is where the magic happens"""
-
-        def onPadAdded(source, pad):
-            # first we see if we can link to the videosink
-            tpad = videoqueue.get_compatible_pad(pad)
-            if tpad:
-                pad.link(tpad)
-                return
-            # if not, we try the audio sink
-            tpad = audioqueue.get_compatible_pad(pad)
-            if tpad:
-                pad.link(tpad)
-                return
-
-        src = gst.element_factory_make("filesrc", "src")
-        src.props.location = args[0]
-        dcd = create_decodebin()
-        audioqueue = gst.element_factory_make("queue")
-        videoqueue = gst.element_factory_make("queue")
-        pipeline.add(src, dcd, audioqueue, videoqueue)
-
-        src.link(dcd)
-        videoqueue.link(videosink)
-        audioqueue.link(audiosink)
-        dcd.connect("pad-added", onPadAdded)
-
-    def createPipeline(self, w):
-        """Given a window, creates a pipeline and connects it to the window"""
-
-        # code will make the ximagesink output in the specified window
-        def set_xid(window):
-            gtk.gdk.threads_enter()
-            videosink.set_xwindow_id(window.window.xid)
-            videosink.expose()
-            gtk.gdk.threads_leave()
-
-        # this code receives the messages from the pipeline. if we
-        # need to set X11 id, then we call set_xid
-        def bus_handler(unused_bus, message):
-            if message.type == gst.MESSAGE_ELEMENT:
-                if message.structure.get_name() == 'prepare-xwindow-id':
-                    set_xid(w)
-            return gst.BUS_PASS
-
-        # create our pipeline, and connect our bus_handler
-        self.pipeline = gst.Pipeline()
-        bus = self.pipeline.get_bus()
-        bus.set_sync_handler(bus_handler)
-
-        videosink = gst.element_factory_make("ximagesink", "sink")
-        videosink.set_property("force-aspect-ratio", True)
-        videosink.set_property("handle-expose", True)
-        scale = gst.element_factory_make("videoscale", "scale")
-        cspace = gst.element_factory_make("ffmpegcolorspace", "cspace")
-
-        audiosink = gst.element_factory_make("autoaudiosink")
-        audioconvert = gst.element_factory_make("audioconvert")
-
-        # pipeline looks like: ... ! cspace ! scale ! sink
-        #                      ... ! audioconvert ! autoaudiosink
-        self.pipeline.add(cspace, scale, videosink, audiosink,
-            audioconvert)
-        scale.link(videosink)
-        cspace.link(scale)
-        audioconvert.link(audiosink)
-        return (self.pipeline, (cspace, audioconvert))
-
-# if this file is being run directly, create the demo and run it
-if __name__ == '__main__':
-    AVDemo().run()
\ No newline at end of file
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/audio_video_crossfade.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/audio_video_crossfade.py
deleted file mode 100644 (file)
index d046ca1..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/env python
-
-"""A short Audio-Video example"""
-import gobject
-gobject.threads_init()
-import gst
-import pygtk
-pygtk.require("2.0")
-import gtk
-import sys
-import os
-from audio_video import AVDemo, create_decodebin
-
-class AVCrossfade(AVDemo):
-    """Base class implementing boring, boiler-plate code.
-    Sets up a basic gstreamer environment which includes:
-
-    * a window containing a drawing area and basic media controls
-    * a basic gstreamer pipeline using an ximagesink and an autoaudiosink
-    * connects the ximagesink to the window's drawing area
-
-    Derived classes need only override magic(), __name__,
-    and __usage__ to create new demos."""
-
-    __name__ = "AV Demo"
-    __usage__ = "python audio_video.py <filename>"
-    __def_win_size__ = (320, 240)
-
-    # this commment allows us to include only a portion of the file
-    # in the tutorial for this demo
-
-    def onPad(self, decoder, pad, target):
-        tpad = target.get_compatible_pad(pad)
-        if tpad:
-            pad.link(tpad)
-
-    def addVideoChain(self, pipeline, name, decoder, mixer):
-        alpha = gst.element_factory_make("alpha")
-        alpha.props.alpha = 1.0
-        videoscale = gst.element_factory_make("videoscale")
-        videorate = gst.element_factory_make("videorate")
-        colorspace = gst.element_factory_make("ffmpegcolorspace")
-        queue = gst.element_factory_make("queue")
-
-        pipeline.add(alpha, videoscale, videorate, colorspace, queue)
-        decoder.connect("pad-added", self.onPad, videorate)
-        videorate.link(videoscale)
-        videoscale.link(colorspace)
-        colorspace.link(queue)
-        queue.link(alpha)
-        alpha.link(mixer)
-
-        setattr(self, "alpha%s" % name, alpha)
-
-    def addAudioChain(self, pipeline, name, decoder, adder):
-        volume = gst.element_factory_make("volume")
-        volume.props.volume = 0.5
-        audioconvert = gst.element_factory_make("audioconvert")
-        audiorate = gst.element_factory_make("audioresample")
-        queue = gst.element_factory_make("queue")
-
-        pipeline.add(volume, audioconvert, audiorate, queue)
-        decoder.connect("pad-added", self.onPad, audioconvert)
-        audioconvert.link(audiorate)
-        audiorate.link(queue)
-        queue.link(volume)
-        volume.link(adder)
-
-        setattr(self, "vol%s" % name, volume)
-
-    def addSourceChain(self, pipeline, name, filename, mixer, adder):
-        src = gst.element_factory_make("filesrc")
-        src.props.location = filename
-        dcd = create_decodebin()
-
-        pipeline.add(src, dcd)
-        src.link(dcd)
-        self.addVideoChain(pipeline, name, dcd, mixer)
-        self.addAudioChain(pipeline, name, dcd, adder)
-
-    def magic(self, pipeline, (videosink, audiosink), args):
-        """This is where the magic happens"""
-        mixer = gst.element_factory_make("videomixer")
-        adder = gst.element_factory_make("adder")
-        pipeline.add(mixer, adder)
-
-        mixer.link(videosink)
-        adder.link(audiosink)
-        self.addSourceChain(pipeline, "A", args[0], mixer, adder)
-        self.addSourceChain(pipeline, "B", args[1], mixer, adder)
-        self.alphaB.props.alpha = 0.5
-
-    def onValueChanged(self, adjustment):
-        balance = self.balance.get_value()
-        crossfade = self.crossfade.get_value()
-        self.volA.props.volume = (2 - balance) * (1 - crossfade)
-        self.volB.props.volume = balance * crossfade
-        self.alphaB.props.alpha = crossfade
-
-    def customWidgets(self):
-        self.crossfade = gtk.Adjustment(0.5, 0, 1.0)
-        self.balance = gtk.Adjustment(1.0, 0.0, 2.0)
-        crossfadeslider = gtk.HScale(self.crossfade)
-        balanceslider = gtk.HScale(self.balance)
-        self.crossfade.connect("value-changed", self.onValueChanged)
-        self.balance.connect("value-changed", self.onValueChanged)
-
-        ret = gtk.Table()
-        ret.attach(gtk.Label("Crossfade"), 0, 1, 0, 1)
-        ret.attach(crossfadeslider, 1, 2, 0, 1)
-        ret.attach(gtk.Label("Balance"), 0, 1, 1, 2)
-        ret.attach(balanceslider, 1, 2, 1, 2)
-        return ret
-
-# if this file is being run directly, create the demo and run it
-if __name__ == '__main__':
-    AVCrossfade().run()
\ No newline at end of file
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer.py
deleted file mode 100644 (file)
index d0fcb9e..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/python
-import gobject; gobject.threads_init()
-import pygst; pygst.require("0.10")
-import gst
-
-p = gst.parse_launch ("""videomixer name=mix0 ! ffmpegcolorspace ! xvimagesink
-      videotestsrc ! video/x-raw-yuv, framerate=24/1, width=640, height=360 ! mix0.sink_0
-      videomixer name=mix1 ! mix0.sink_1
-      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix1.sink_0
-      videomixer name=mix2 ! mix1.sink_1
-      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix2.sink_0
-      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix2.sink_1
-""")
-
-m1 = p.get_by_name ("mix1")
-s1_0 = m1.get_pad ("sink_0")
-s1_0.set_property ("xpos", 100)
-s1_1 = m1.get_pad ("sink_1")
-s1_1.set_property ("xpos", 250)
-
-m2 = p.get_by_name ("mix2")
-s2_0 = m2.get_pad ("sink_0")
-s2_0.set_property ("xpos", 200)
-s2_1 = m2.get_pad ("sink_1")
-s2_1.set_property ("xpos", 250)
-
-c1_0 = gst.Controller(s1_0, "ypos", "alpha")
-c1_0.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-c1_0.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-c1_0.set("ypos", 0, 0)
-c1_0.set("ypos", 5 * gst.SECOND, 200)
-c1_0.set("alpha", 0, 0)
-c1_0.set("alpha", 5 * gst.SECOND, 1.0)
-
-c1_1 = gst.Controller(s1_1, "ypos", "alpha")
-c1_1.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-c1_1.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-c1_1.set("ypos", 0, 0)
-c1_1.set("ypos", 5 * gst.SECOND, 200)
-c1_1.set("alpha", 0, 0)
-c1_1.set("alpha", 5 * gst.SECOND, 1.0)
-
-c2_0 = gst.Controller(s2_0, "ypos", "alpha")
-c2_0.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-c2_0.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-c2_0.set("ypos", 0, 0)
-c2_0.set("ypos", 5 * gst.SECOND, 200)
-c2_0.set("alpha", 0, 0)
-c2_0.set("alpha", 5 * gst.SECOND, 1.0)
-
-c2_1 = gst.Controller(s2_1, "ypos", "alpha")
-c2_1.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-c2_1.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-c2_1.set("ypos", 0, 0)
-c2_1.set("ypos", 5 * gst.SECOND, 200)
-c2_1.set("alpha", 0, 0)
-c2_1.set("alpha", 5 * gst.SECOND, 1.0)
-
-p.set_state (gst.STATE_PLAYING)
-
-gobject.MainLoop().run()
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_osc.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_osc.py
deleted file mode 100644 (file)
index 379985e..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/python
-import gobject; gobject.threads_init()
-import pygst; pygst.require("0.10")
-import gst
-from threading import Thread
-
-
-class OSCController(Thread):
-
-    def __init__(self, port):
-        Thread.__init__(self)
-        import liblo
-        self.port = port
-        try:
-            self.server = liblo.Server(self.port)
-        except liblo.ServerError, err:
-            print str(err)
-
-    def add_method(self, path, type, method):
-        self.server.add_method(path, type, method)
-
-    def run(self):
-        while True:
-            self.server.recv(100)
-
-
-class GSTSrcVideo(object):
-
-    def __init__(self, pipe=None, framerate='24/1', width=160, height=90, xpos=0, ypos=0):
-        self.framerate = framerate
-        self.width = width
-        self.height = height
-        self.xpos = xpos
-        self.ypos = ypos
-        if not pipe:
-            pipe = 'videotestsrc pattern="snow"'
-        self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
-                        % (self.framerate, str(self.width), str(self.height))
-
-class GSTMixer(object):
-
-    def __init__(self, osc_port=13000):
-        self.name = 'mixer'
-        self.pipe = ['videomixer name=mixer ! ffmpegcolorspace ! xvimagesink']
-        self.srcs = []
-        self.i= 0
-
-        self.osc_port = osc_port
-        self.osc = OSCController(self.osc_port)
-
-    def osc_callback(self, path, value):
-        paths = path.split('/')
-        sink = paths[1]
-        param = paths[2]
-        for src in self.srcs:
-            if src['sink'] == sink:
-                break
-        src['control'].set(param, 5 * gst.SECOND, value[0])
-
-    def add_src(self, src):
-        self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
-        self.i += 1
-
-    def setup(self):
-        self.srcs.reverse()
-
-        for src in self.srcs:
-            self.pipe.append(' '.join([src['src'].pipe, '! ' + self.name + '.' + src['sink']]))
-
-        print ' '.join(self.pipe)
-        self.process = gst.parse_launch(' '.join(self.pipe))
-        mixer = self.process.get_by_name("mixer")
-
-        for src in self.srcs:
-            src['pad'] = mixer.get_pad(src['sink'])
-            src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
-
-            src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
-            src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
-            self.osc.add_method('/'+src['sink']+'/xpos', 'i', self.osc_callback)
-
-            src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-            src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
-            self.osc.add_method('/'+src['sink']+'/ypos', 'i', self.osc_callback)
-
-            src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-            src['control'].set("alpha", 5 * gst.SECOND, 1.0)
-            self.osc.add_method('/'+src['sink']+'/alpha', 'f', self.osc_callback)
-
-
-    def run(self):
-        self.osc.start()
-        self.process.set_state(gst.STATE_PLAYING)
-        gobject.MainLoop().run()
-
-
-if __name__ == '__main__':
-    src1 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc')
-    src2 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=100, ypos=50)
-    src3 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=200, ypos=150)
-    src4 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=300, ypos=250)
-    mixer = GSTMixer()
-    mixer.add_src(src1)
-    mixer.add_src(src2)
-    mixer.add_src(src3)
-    mixer.add_src(src4)
-    mixer.setup()
-    mixer.run()
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_osc_touch.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_osc_touch.py
deleted file mode 100644 (file)
index 8c709d1..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/python
-import gobject; gobject.threads_init()
-import pygst; pygst.require("0.10")
-import gst
-from threading import Thread
-
-
-class OSCController(Thread):
-
-    def __init__(self, port):
-        Thread.__init__(self)
-        import liblo
-        self.port = port
-        try:
-            self.server = liblo.Server(self.port)
-        except liblo.ServerError, err:
-            print str(err)
-
-    def add_method(self, path, type, method):
-        self.server.add_method(path, type, method)
-
-    def run(self):
-        while True:
-            self.server.recv(100)
-
-
-class GSTSrcVideo(object):
-
-    def __init__(self, pipe=None, framerate='24/1', width=160, height=90, xpos=0, ypos=0):
-        self.framerate = framerate
-        self.width = width
-        self.height = height
-        self.xpos = xpos
-        self.ypos = ypos
-        if not pipe:
-            pipe = 'videotestsrc pattern="snow"'
-        self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
-                        % (self.framerate, str(self.width), str(self.height))
-
-class GSTMixer(object):
-
-    def __init__(self, osc_port=8338):
-        self.name = 'mixer'
-        self.pipe = ['videomixer name=mixer ! ffmpegcolorspace ! xvimagesink']
-        self.srcs = []
-        self.i= 0
-
-        self.osc_port = osc_port
-        self.osc = OSCController(self.osc_port)
-
-    def osc_callback(self, path, value):
-        paths = path.split('/')
-        sink = paths[1]
-        param = paths[2]
-        for src in self.srcs:
-            if src['sink'] == sink:
-                break
-        src['control'].set(param, 5 * gst.SECOND, value[0])
-
-    def osc_alpha_callback(self, path, value):
-        paths = path.split('/')
-        layer = paths[1]
-        param = paths[2]
-        id = int(param[-1])-1
-        for src in self.srcs:
-            if src['id'] == id:
-                break
-        src['control'].set('alpha', 5 * gst.SECOND, value[0])
-
-    def osc_xy_callback(self, path, value):
-        for src in self.srcs:
-            if src['id'] == 2:
-                break
-        src['control'].set("xpos", 5 * gst.SECOND, int(value[0]*480))
-        src['control'].set("ypos", 5 * gst.SECOND, int(value[1]*270))
-
-    def add_src(self, src):
-        self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
-        self.i += 1
-
-    def setup(self):
-        self.srcs.reverse()
-
-        for src in self.srcs:
-            self.pipe.append(' '.join([src['src'].pipe, '! ' + self.name + '.' + src['sink']]))
-
-        print ' '.join(self.pipe)
-        self.process = gst.parse_launch(' '.join(self.pipe))
-        mixer = self.process.get_by_name("mixer")
-
-        for src in self.srcs:
-            src['pad'] = mixer.get_pad(src['sink'])
-            src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
-
-            src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
-            src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
-
-            src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-            src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
-
-            src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-            src['control'].set("alpha", 5 * gst.SECOND, 1.0)
-
-            self.osc.add_method('/1/fader'+str(src['id']+1), 'f', self.osc_alpha_callback)
-
-        self.osc.add_method('/3/xy', 'ff', self.osc_xy_callback)
-
-    def run(self):
-        self.osc.start()
-        self.process.set_state(gst.STATE_PLAYING)
-        gobject.MainLoop().run()
-
-
-if __name__ == '__main__':
-    src1 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc pattern="black" ')
-    src2 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc ')
-    src3 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=200, ypos=150)
-    src4 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=300, ypos=250)
-    mixer = GSTMixer()
-    mixer.add_src(src1)
-    mixer.add_src(src2)
-    mixer.add_src(src3)
-    mixer.add_src(src4)
-    mixer.setup()
-    mixer.run()
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_osc_touch_1cam.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_osc_touch_1cam.py
deleted file mode 100644 (file)
index 0bfcaa6..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/python
-import gobject; gobject.threads_init()
-import pygst; pygst.require("0.10")
-import gst
-from threading import Thread
-
-
-class OSCController(Thread):
-
-    def __init__(self, port):
-        Thread.__init__(self)
-        import liblo
-        self.port = port
-        try:
-            self.server = liblo.Server(self.port)
-        except liblo.ServerError, err:
-            print str(err)
-
-    def add_method(self, path, type, method):
-        self.server.add_method(path, type, method)
-
-    def run(self):
-        while True:
-            self.server.recv(100)
-
-
-class GSTSrcVideo(object):
-
-    def __init__(self, pipe=None, framerate='{30/1}', width=160, height=90, xpos=0, ypos=0):
-        self.framerate = framerate
-        self.width = width
-        self.height = height
-        self.xpos = xpos
-        self.ypos = ypos
-        if not pipe:
-            pipe = 'videotestsrc pattern="snow"'
-        self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
-                        % (self.framerate, str(self.width), str(self.height))
-
-
-class GSTWebmHttpStreamer(object):
-
-    def __init__(self, protocol='tcp', port=9000):
-        self.protocol = protocol
-        self.port = port
-
-
-class GSTMixer(object):
-
-    def __init__(self, osc_port=8338):
-        self.name = 'mixer'
-        self.pipe = ['videomixer name=mixer ! queue ! ffmpegcolorspace ! xvimagesink sync=false']
-        self.srcs = []
-        self.i= 0
-
-        self.osc_port = osc_port
-        self.osc = OSCController(self.osc_port)
-
-    def osc_callback(self, path, value):
-        paths = path.split('/')
-        sink = paths[1]
-        param = paths[2]
-        for src in self.srcs:
-            if src['sink'] == sink:
-                break
-        src['control'].set(param, 5 * gst.SECOND, value[0])
-
-    def osc_alpha_callback(self, path, value):
-        paths = path.split('/')
-        layer = paths[1]
-        param = paths[2]
-        id = int(param[-1])-1
-        for src in self.srcs:
-            if src['id'] == id:
-                break
-        src['control'].set('alpha', 5 * gst.SECOND, value[0])
-
-    def osc_xy_callback(self, path, value):
-        for src in self.srcs:
-            if src['id'] == 2:
-                break
-        src['control'].set("xpos", 5 * gst.SECOND, int(value[0]*480))
-        src['control'].set("ypos", 5 * gst.SECOND, int(value[1]*270))
-
-    def add_src(self, src):
-        self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
-        self.i += 1
-
-    def setup(self):
-        self.srcs.reverse()
-
-        for src in self.srcs:
-            self.pipe.append(' '.join([src['src'].pipe, '! queue ! ' + self.name + '.' + src['sink']]))
-
-        print ' '.join(self.pipe)
-        self.process = gst.parse_launch(' '.join(self.pipe))
-        mixer = self.process.get_by_name("mixer")
-
-        for src in self.srcs:
-            src['pad'] = mixer.get_pad(src['sink'])
-            src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
-
-            src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
-            src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
-
-            src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-            src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
-
-            src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-            src['control'].set("alpha", 5 * gst.SECOND, 1.0)
-
-            self.osc.add_method('/1/fader'+str(src['id']+1), 'f', self.osc_alpha_callback)
-
-        self.osc.add_method('/3/xy', 'ff', self.osc_xy_callback)
-
-    def run(self):
-        self.osc.start()
-        self.process.set_state(gst.STATE_PLAYING)
-        gobject.MainLoop().run()
-
-
-if __name__ == '__main__':
-    src1 = GSTSrcVideo(width=800, height=600, pipe='videotestsrc pattern="black"')
-    src2 = GSTSrcVideo(width=800, height=600, pipe='videotestsrc ')
-    src3 = GSTSrcVideo(width=640, height=480, xpos=200, ypos=150, pipe='v4l2src device=/dev/video0')
-    src4 = GSTSrcVideo(width=160, height=90, xpos=300, ypos=250)
-    mixer = GSTMixer()
-    mixer.add_src(src1)
-    mixer.add_src(src2)
-    mixer.add_src(src3)
-    mixer.add_src(src4)
-    mixer.setup()
-    mixer.run()
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_osc_touch_2cam.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_osc_touch_2cam.py
deleted file mode 100644 (file)
index 56cbaff..0000000
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/usr/bin/python
-import gobject; gobject.threads_init()
-import pygst; pygst.require("0.10")
-import gst
-from threading import Thread
-
-
-class OSCController(Thread):
-
-    def __init__(self, port):
-        Thread.__init__(self)
-        import liblo
-        self.port = port
-        try:
-            self.server = liblo.Server(self.port)
-        except liblo.ServerError, err:
-            print str(err)
-
-    def add_method(self, path, type, method):
-        self.server.add_method(path, type, method)
-
-    def run(self):
-        while True:
-            self.server.recv(100)
-
-
-class GSTSrcVideo(object):
-
-    def __init__(self, pipe=None, framerate='{30/1}', width=160, height=90, xpos=0, ypos=0, queue_option=''):
-        self.framerate = framerate
-        self.width = width
-        self.height = height
-        self.xpos = xpos
-        self.ypos = ypos
-        self.queue_option = queue_option
-
-        if not pipe:
-            pipe = 'videotestsrc pattern="snow"'
-        self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
-                        % (self.framerate, str(self.width), str(self.height))
-
-class GSTWebmHttpEncoder(object):
-
-    def __init__(self, protocol='none', host='127.0.0.1', port=9000):
-        self.protocol = protocol
-        self.port = port
-        self.pipe = '! queue ! vp8enc speed=2 threads=4 quality=10.0 max-latency=25 max-keyframe-distance=30 auto-alt-ref-frames=true  ! queue ! muxout. \
-                    jackaudiosrc connect=2 ! audio/x-raw-float, channels=2 \
-                    ! queue ! audioconvert ! queue ! vorbisenc quality=0.3 ! queue ! muxout.  \
-                    webmmux streamable=true name=muxout \
-                    ! queue ! tcpserversink protocol=%s host=%s port=%s blocksize=65536 sync-method=1' \
-                    % (self.protocol, self.host, self.port)
-
-
-class GSTMixer(object):
-
-    def __init__(self, osc_port=8338, stream_port=9000):
-        self.name = 'mixer'
-        self.pipe = ['videomixer name=mixer ! queue ! ffmpegcolorspace ! xvimagesink sync=false']
-        self.srcs = []
-        self.i= 0
-
-        self.osc_port = osc_port
-        self.stream_port = stream_port
-        self.osc = OSCController(self.osc_port)
-        self.encoder = GSTWebmHttpEncoder(port=self.stream_port)
-
-    def osc_callback(self, path, value):
-        paths = path.split('/')
-        sink = paths[1]
-        param = paths[2]
-        for src in self.srcs:
-            if src['sink'] == sink:
-                break
-        src['control'].set(param, 5 * gst.SECOND, value[0])
-
-    def osc_alpha_callback(self, path, value):
-        paths = path.split('/')
-        layer = paths[1]
-        param = paths[2]
-        id = int(param[-1])-1
-        for src in self.srcs:
-            if src['id'] == id:
-                break
-        src['control'].set('alpha', 5 * gst.SECOND, value[0])
-
-    def osc_xy_callback(self, path, value):
-        for src in self.srcs:
-            if src['id'] == 2:
-                break
-        src['control'].set("xpos", 5 * gst.SECOND, int(value[0]*480))
-        src['control'].set("ypos", 5 * gst.SECOND, int(value[1]*270))
-
-    def add_src(self, src):
-        self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
-        self.i += 1
-
-    def setup(self):
-        self.srcs.reverse()
-
-        for src in self.srcs:
-            queue = 'queue'
-            if src['src'].queue_option:
-                # queue = 'timeoverlay ! queue'
-                queue += ' ' + src['src'].queue_option
-            self.pipe.append(' '.join([src['src'].pipe, '! ' + queue +  ' ! ' + self.name + '.' + src['sink']]))
-
-        pipe += self.encoder.pipe
-
-        print ' '.join(self.pipe)
-        self.process = gst.parse_launch(' '.join(self.pipe))
-        mixer = self.process.get_by_name("mixer")
-
-        for src in self.srcs:
-            src['pad'] = mixer.get_pad(src['sink'])
-            src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
-
-            src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
-            src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
-
-            src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-            src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
-
-            src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-            src['control'].set("alpha", 5 * gst.SECOND, 1.0)
-
-            self.osc.add_method('/1/fader'+str(src['id']+1), 'f', self.osc_alpha_callback)
-
-        self.osc.add_method('/3/xy', 'ff', self.osc_xy_callback)
-
-    def run(self):
-        self.osc.start()
-        self.process.set_state(gst.STATE_PLAYING)
-        gobject.MainLoop().run()
-
-
-if __name__ == '__main__':
-    src1 = GSTSrcVideo(width=640, height=480, pipe='videotestsrc pattern="black"')
-    src4 = GSTSrcVideo(width=640, height=480, pipe='videotestsrc ')
-    src3 = GSTSrcVideo(width=640, height=480, xpos=0, ypos=0, pipe='v4l2src device=/dev/video0 do-timestamp=true', queue_option='leaky=upstream min-threshold-time=10000000000')
-    src3 = GSTSrcVideo(width=640, height=480, xpos=0, ypos=0, pipe='v4l2src device=/dev/video1 do-timestamp=true', queue_option='leaky=upstream min-threshold-time=10000000000')
-#    src2 = GSTSrcVideo(width=640, height=480, xpos=0, ypos=0, pipe='souphttpsrc location=http://192.168.0.15:8080/videofeed do-timestamp=true ! jpegdec ! queue ! ffmpegcolorspace ! videorate')
-    mixer = GSTMixer()
-    mixer.add_src(src1)
-    mixer.add_src(src2)
-    mixer.add_src(src3)
-    mixer.add_src(src4)
-    mixer.setup()
-    mixer.run()
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_osc_touch_3cams.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_osc_touch_3cams.py
deleted file mode 100644 (file)
index ab60d18..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/python
-import gobject; gobject.threads_init()
-import pygst; pygst.require("0.10")
-import gst
-from threading import Thread
-
-
-class OSCController(Thread):
-
-    def __init__(self, port):
-        Thread.__init__(self)
-        import liblo
-        self.port = port
-        try:
-            self.server = liblo.Server(self.port)
-        except liblo.ServerError, err:
-            print str(err)
-
-    def add_method(self, path, type, method):
-        self.server.add_method(path, type, method)
-
-    def run(self):
-        while True:
-            self.server.recv(100)
-
-
-class GSTSrcVideo(object):
-
-    def __init__(self, pipe=None, framerate='24/1', width=160, height=90, xpos=0, ypos=0):
-        self.framerate = framerate
-        self.width = width
-        self.height = height
-        self.xpos = xpos
-        self.ypos = ypos
-        if not pipe:
-            pipe = 'videotestsrc pattern="snow"'
-        self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
-                        % (self.framerate, str(self.width), str(self.height))
-
-
-class GSTWebmHttpStreamer(object):
-
-    def __init__(self, protocol='tcp', port=9000):
-        self.protocol = protocol
-        self.port = port
-
-
-class GSTMixer(object):
-
-    def __init__(self, osc_port=8338):
-        self.name = 'mixer'
-        self.pipe = ['videomixer name=mixer ! ffmpegcolorspace ! xvimagesink']
-        self.srcs = []
-        self.i= 0
-
-        self.osc_port = osc_port
-        self.osc = OSCController(self.osc_port)
-
-    def osc_callback(self, path, value):
-        paths = path.split('/')
-        sink = paths[1]
-        param = paths[2]
-        for src in self.srcs:
-            if src['sink'] == sink:
-                break
-        src['control'].set(param, 5 * gst.SECOND, value[0])
-
-    def osc_alpha_callback(self, path, value):
-        paths = path.split('/')
-        layer = paths[1]
-        param = paths[2]
-        id = int(param[-1])-1
-        for src in self.srcs:
-            if src['id'] == id:
-                break
-        src['control'].set('alpha', 5 * gst.SECOND, value[0])
-
-    def osc_xy_callback(self, path, value):
-        for src in self.srcs:
-            if src['id'] == 2:
-                break
-        src['control'].set("xpos", 5 * gst.SECOND, int(value[0]*480))
-        src['control'].set("ypos", 5 * gst.SECOND, int(value[1]*270))
-
-    def add_src(self, src):
-        self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
-        self.i += 1
-
-    def setup(self):
-        self.srcs.reverse()
-
-        for src in self.srcs:
-            self.pipe.append(' '.join([src['src'].pipe, '! ' + self.name + '.' + src['sink']]))
-
-        print ' '.join(self.pipe)
-        self.process = gst.parse_launch(' '.join(self.pipe))
-        mixer = self.process.get_by_name("mixer")
-
-        for src in self.srcs:
-            src['pad'] = mixer.get_pad(src['sink'])
-            src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
-
-            src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
-            src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
-
-            src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-            src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
-
-            src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-            src['control'].set("alpha", 5 * gst.SECOND, 1.0)
-
-            self.osc.add_method('/1/fader'+str(src['id']+1), 'f', self.osc_alpha_callback)
-
-        self.osc.add_method('/3/xy', 'ff', self.osc_xy_callback)
-
-    def run(self):
-        self.osc.start()
-        self.process.set_state(gst.STATE_PLAYING)
-        gobject.MainLoop().run()
-
-
-if __name__ == '__main__':
-    src1 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc pattern="black" ')
-    src2 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc ')
-    src3 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=200, ypos=150)
-    src4 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=300, ypos=250)
-    mixer = GSTMixer()
-    mixer.add_src(src1)
-    mixer.add_src(src2)
-    mixer.add_src(src3)
-    mixer.add_src(src4)
-    mixer.setup()
-    mixer.run()
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_parallel.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_parallel.py
deleted file mode 100644 (file)
index 379985e..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/usr/bin/python
-import gobject; gobject.threads_init()
-import pygst; pygst.require("0.10")
-import gst
-from threading import Thread
-
-
-class OSCController(Thread):
-
-    def __init__(self, port):
-        Thread.__init__(self)
-        import liblo
-        self.port = port
-        try:
-            self.server = liblo.Server(self.port)
-        except liblo.ServerError, err:
-            print str(err)
-
-    def add_method(self, path, type, method):
-        self.server.add_method(path, type, method)
-
-    def run(self):
-        while True:
-            self.server.recv(100)
-
-
-class GSTSrcVideo(object):
-
-    def __init__(self, pipe=None, framerate='24/1', width=160, height=90, xpos=0, ypos=0):
-        self.framerate = framerate
-        self.width = width
-        self.height = height
-        self.xpos = xpos
-        self.ypos = ypos
-        if not pipe:
-            pipe = 'videotestsrc pattern="snow"'
-        self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
-                        % (self.framerate, str(self.width), str(self.height))
-
-class GSTMixer(object):
-
-    def __init__(self, osc_port=13000):
-        self.name = 'mixer'
-        self.pipe = ['videomixer name=mixer ! ffmpegcolorspace ! xvimagesink']
-        self.srcs = []
-        self.i= 0
-
-        self.osc_port = osc_port
-        self.osc = OSCController(self.osc_port)
-
-    def osc_callback(self, path, value):
-        paths = path.split('/')
-        sink = paths[1]
-        param = paths[2]
-        for src in self.srcs:
-            if src['sink'] == sink:
-                break
-        src['control'].set(param, 5 * gst.SECOND, value[0])
-
-    def add_src(self, src):
-        self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
-        self.i += 1
-
-    def setup(self):
-        self.srcs.reverse()
-
-        for src in self.srcs:
-            self.pipe.append(' '.join([src['src'].pipe, '! ' + self.name + '.' + src['sink']]))
-
-        print ' '.join(self.pipe)
-        self.process = gst.parse_launch(' '.join(self.pipe))
-        mixer = self.process.get_by_name("mixer")
-
-        for src in self.srcs:
-            src['pad'] = mixer.get_pad(src['sink'])
-            src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
-
-            src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
-            src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
-            self.osc.add_method('/'+src['sink']+'/xpos', 'i', self.osc_callback)
-
-            src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-            src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
-            self.osc.add_method('/'+src['sink']+'/ypos', 'i', self.osc_callback)
-
-            src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-            src['control'].set("alpha", 5 * gst.SECOND, 1.0)
-            self.osc.add_method('/'+src['sink']+'/alpha', 'f', self.osc_callback)
-
-
-    def run(self):
-        self.osc.start()
-        self.process.set_state(gst.STATE_PLAYING)
-        gobject.MainLoop().run()
-
-
-if __name__ == '__main__':
-    src1 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc')
-    src2 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=100, ypos=50)
-    src3 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=200, ypos=150)
-    src4 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=300, ypos=250)
-    mixer = GSTMixer()
-    mixer.add_src(src1)
-    mixer.add_src(src2)
-    mixer.add_src(src3)
-    mixer.add_src(src4)
-    mixer.setup()
-    mixer.run()
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_parallel_no_effects.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_parallel_no_effects.py
deleted file mode 100644 (file)
index 771b229..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/python
-import gobject; gobject.threads_init()
-import pygst; pygst.require("0.10")
-import gst
-
-p = gst.parse_launch ("""videomixer name=mix0 ! ffmpegcolorspace ! xvimagesink
-      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix0.sink_3
-      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix0.sink_2
-      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix0.sink_1
-      videotestsrc ! video/x-raw-yuv, framerate=24/1, width=640, height=360 ! mix0.sink_0
-""")
-
-m1 = p.get_by_name("mix0")
-
-s1_1 = m1.get_pad("sink_1")
-c1_1 = gst.Controller(s1_1, "xpos", "ypos", "alpha")
-c1_1.set("xpos", 0, 0)
-c1_1.set("ypos", 0, 0)
-c1_1.set("alpha", 0, 1.0)
-
-s1_2 = m1.get_pad("sink_2")
-c1_2 = gst.Controller(s1_2, "xpos", "ypos", "alpha")
-c1_2.set("xpos", 0, 200)
-c1_2.set("ypos", 0, 200)
-c1_2.set("alpha", 0, 1.0)
-
-s1_3 = m1.get_pad("sink_3")
-c1_3 = gst.Controller(s1_3, "xpos", "ypos", "alpha")
-c1_3.set("xpos", 0, 400)
-c1_3.set("ypos", 0, 0)
-c1_3.set("alpha", 0, 1.0)
-
-p.set_state(gst.STATE_PLAYING)
-
-gobject.MainLoop().run()
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_pipes.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/control_mixer_pipes.py
deleted file mode 100644 (file)
index d0fcb9e..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/python
-import gobject; gobject.threads_init()
-import pygst; pygst.require("0.10")
-import gst
-
-p = gst.parse_launch ("""videomixer name=mix0 ! ffmpegcolorspace ! xvimagesink
-      videotestsrc ! video/x-raw-yuv, framerate=24/1, width=640, height=360 ! mix0.sink_0
-      videomixer name=mix1 ! mix0.sink_1
-      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix1.sink_0
-      videomixer name=mix2 ! mix1.sink_1
-      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix2.sink_0
-      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix2.sink_1
-""")
-
-m1 = p.get_by_name ("mix1")
-s1_0 = m1.get_pad ("sink_0")
-s1_0.set_property ("xpos", 100)
-s1_1 = m1.get_pad ("sink_1")
-s1_1.set_property ("xpos", 250)
-
-m2 = p.get_by_name ("mix2")
-s2_0 = m2.get_pad ("sink_0")
-s2_0.set_property ("xpos", 200)
-s2_1 = m2.get_pad ("sink_1")
-s2_1.set_property ("xpos", 250)
-
-c1_0 = gst.Controller(s1_0, "ypos", "alpha")
-c1_0.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-c1_0.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-c1_0.set("ypos", 0, 0)
-c1_0.set("ypos", 5 * gst.SECOND, 200)
-c1_0.set("alpha", 0, 0)
-c1_0.set("alpha", 5 * gst.SECOND, 1.0)
-
-c1_1 = gst.Controller(s1_1, "ypos", "alpha")
-c1_1.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-c1_1.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-c1_1.set("ypos", 0, 0)
-c1_1.set("ypos", 5 * gst.SECOND, 200)
-c1_1.set("alpha", 0, 0)
-c1_1.set("alpha", 5 * gst.SECOND, 1.0)
-
-c2_0 = gst.Controller(s2_0, "ypos", "alpha")
-c2_0.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-c2_0.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-c2_0.set("ypos", 0, 0)
-c2_0.set("ypos", 5 * gst.SECOND, 200)
-c2_0.set("alpha", 0, 0)
-c2_0.set("alpha", 5 * gst.SECOND, 1.0)
-
-c2_1 = gst.Controller(s2_1, "ypos", "alpha")
-c2_1.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-c2_1.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-c2_1.set("ypos", 0, 0)
-c2_1.set("ypos", 5 * gst.SECOND, 200)
-c2_1.set("alpha", 0, 0)
-c2_1.set("alpha", 5 * gst.SECOND, 1.0)
-
-p.set_state (gst.STATE_PLAYING)
-
-gobject.MainLoop().run()
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/crc.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/crc.py
deleted file mode 100644 (file)
index 578b526..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-# This file is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-# 
-# This file is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-# 
-# You should have received a copy of the GNU General Public License
-# along with This file.  If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-
-import zlib
-
-import gst
-
-def main(path):
-    pipeline = gst.parse_launch('''
-        filesrc location="%s" !
-        decodebin ! audio/x-raw-int !
-        appsink name=sink sync=False''' % path)
-    sink = pipeline.get_by_name('sink')
-
-    pipeline.set_state(gst.STATE_PLAYING)
-    crc = 0
-
-    while True:
-        try:
-            buf = sink.emit('pull-buffer')
-        except SystemError, e:
-            # it's probably a bug that emits triggers a SystemError
-            print 'SystemError', e
-            break
-
-        # should be coming from a CD
-        assert len(buf) % 4 == 0, "buffer is not a multiple of 4 bytes"
-        crc = zlib.crc32(buf, crc)
-
-    crc = crc % 2 ** 32
-    print "CRC: %08X" % crc
-
-
-path = 'test.flac'
-
-try:
-    path = sys.argv[1]
-except IndexError:
-    pass
-
-main(path)
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/cross-fade.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/cross-fade.py
deleted file mode 100644 (file)
index fb6a92a..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/usr/bin/env python
-"""Extends basic demo with a gnl composition"""
-import gobject
-gobject.threads_init()
-from demo import Demo, DemoException
-import gtk
-import gst
-import sys
-import os
-
-def create_decodebin():
-    try:
-        return gst.element_factory_make("decodebin2")
-    except:
-        return gst.element_factory_make("decodebin")
-
-class SimpleCrossfadeDemo(Demo):
-    __name__ = "Demo of crosfade  without using gnonlin"
-    __usage__ = '''python %s sourceA sourceB
-    live crossfading between two sources''' % sys.argv[0]
-    __def_size__ = (320, 420)
-
-    def magic(self, pipeline, sink, args):
-
-        def onPad(obj, pad, target):
-            sinkpad = target.get_compatible_pad(pad, pad.get_caps())
-            if sinkpad:
-                pad.link(sinkpad)
-            return True
-
-        assert len(sys.argv) == 3
-        assert os.path.exists(sys.argv[-1])
-        assert os.path.exists(sys.argv[-2])
-
-        # <excerpt 1>
-        src = gst.element_factory_make("filesrc")
-        src.set_property("location", sys.argv[-1])
-
-        srcAdecode = create_decodebin()
-        srcAconvert = gst.element_factory_make("ffmpegcolorspace")
-        srcAalpha = gst.element_factory_make("alpha")
-        srcAalpha.set_property("alpha", 1.0)
-
-        srcB = gst.element_factory_make("filesrc")
-        srcB.set_property("location", sys.argv[-2])
-        srcBdecode = create_decodebin()
-        srcBconvert = gst.element_factory_make("ffmpegcolorspace")
-        srcBalpha = gst.element_factory_make("alpha")
-        srcBalpha.set_property("alpha", 0.5)
-
-        mixer = gst.element_factory_make("videomixer")
-        mixer.set_property("background", "black")
-        # </excerpt>
-
-        # <excerpt 2>
-        pipeline.add(mixer)
-
-        pipeline.add(src, srcAdecode, srcAconvert, srcAalpha)
-        src.link(srcAdecode)
-        srcAdecode.connect("pad-added", onPad, srcAconvert)
-        srcAconvert.link(srcAalpha)
-        srcAalpha.link(mixer)
-
-        pipeline.add(srcB, srcBdecode, srcBconvert, srcBalpha)
-        srcB.link(srcBdecode)
-        srcBdecode.connect("pad-added", onPad, srcBconvert)
-        srcBconvert.link(srcBalpha)
-        srcBalpha.link(mixer)
-
-        mixer.link(sink)
-
-        # remember the alpha elements
-        self.srcBalpha = srcBalpha
-        # </excerpt>
-
-
-    # overriding from parent
-    def customWidgets(self):
-        """Create a control for each property in the videobalance
-        widget"""
-
-        # <excerpt 3>
-        # to be called a property value needs to change
-        def onValueChanged(widget):
-            if self.srcBalpha:
-                self.srcBalpha.set_property("alpha", widget.get_value())
-        # </excerpt>
-
-        lower = 0
-        upper = 1
-        default = 0.5
-
-        # create a place to hold our controls
-        controls = gtk.VBox()
-        labels = gtk.VBox()
-
-        widget = gtk.HScale(); label = gtk.Label("Crossfade")
-
-        # set appropriate atributes
-        widget.set_update_policy(gtk.UPDATE_CONTINUOUS)
-        widget.set_draw_value(True)
-        widget.set_range(lower, upper)
-        widget.set_value(default)
-
-        # connect to our signal handler, specifying the property
-        # to adjust
-        widget.connect("value-changed", onValueChanged)
-
-        # pack widget into box
-        controls.pack_start(widget, True, True)
-        labels.pack_start(label, True, False)
-
-        layout = gtk.HBox()
-        layout.pack_start(labels, False, False)
-        layout.pack_end(controls, True, True)
-        return layout
-
-if __name__ == '__main__':
-    SimpleCrossfadeDemo().run()
\ No newline at end of file
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/cross-fade_2.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/cross-fade_2.py
deleted file mode 100644 (file)
index c1fd340..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/python
-import gobject; gobject.threads_init()
-import pygst; pygst.require("0.10")
-import gst
-
-p = gst.parse_launch ("""videomixer name=mix ! ffmpegcolorspace ! xvimagesink
-      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=10/1, width=200, height=150 ! mix.sink_0
-      videotestsrc ! video/x-raw-yuv, framerate=10/1, width=640, height=360 ! mix.sink_1
-""")
-
-m = p.get_by_name ("mix")
-s0 = m.get_pad ("sink_0")
-s0.set_property ("xpos", 100)
-
-control = gst.Controller(s0, "ypos", "alpha")
-control.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
-control.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
-control.set("ypos", 0, 0); control.set("ypos", 5 * gst.SECOND, 200)
-control.set("alpha", 0, 0); control.set("alpha", 5 * gst.SECOND, 1.0)
-
-p.set_state (gst.STATE_PLAYING)
-
-gobject.MainLoop().run()
\ No newline at end of file
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/demo.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/demo.py
deleted file mode 100644 (file)
index 51e95f3..0000000
+++ /dev/null
@@ -1,160 +0,0 @@
-#!/usr/bin/env python
-
-"""Basic Framework for writing GStreamer Demos in Python"""
-#<excerpt 2>
-import gobject
-gobject.threads_init()
-import gst
-#</excerpt>
-import pygtk
-pygtk.require("2.0")
-import gtk
-gtk.gdk.threads_init()
-import sys
-import os
-
-
-class DemoException(Exception):
-    """Base exception class for errors which occur during demos"""
-
-    def __init__(self, reason):
-        self.reason = reason
-
-class Demo:
-    """Base class implementing boring, boiler-plate code.
-    Sets up a basic gstreamer environment which includes:
-
-    * a window containing a drawing area and basic media controls
-    * a basic gstreamer pipeline using an ximagesink
-    * connects the ximagesink to the window's drawing area
-
-    Derived classes need only override magic(), __name__,
-    and __usage__ to create new demos."""
-
-    __name__ = "Basic Demo"
-    __usage__ = "python demo.py -- runs a simple test demo"
-    __def_win_size__ = (320, 240)
-
-    # this commment allows us to include only a portion of the file
-    # in the tutorial for this demo
-    # <excerpt 1>     ...
-
-    def magic(self, pipeline, sink, args):
-        """This is where the magic happens"""
-        src = gst.element_factory_make("videotestsrc", "src")
-        pipeline.add(src)
-        src.link(sink)
-
-
-    def createPipeline(self, w):
-        """Given a window, creates a pipeline and connects it to the window"""
-
-        # code will make the ximagesink output in the specified window
-        def set_xid(window):
-            gtk.gdk.threads_enter()
-            sink.set_xwindow_id(window.window.xid)
-            sink.expose()
-            gtk.gdk.threads_leave()
-
-        # this code receives the messages from the pipeline. if we
-        # need to set X11 id, then we call set_xid
-        def bus_handler(unused_bus, message):
-            if message.type == gst.MESSAGE_ELEMENT:
-                if message.structure.get_name() == 'prepare-xwindow-id':
-                    set_xid(w)
-            return gst.BUS_PASS
-
-        # create our pipeline, and connect our bus_handler
-        self.pipeline = gst.Pipeline()
-        bus = self.pipeline.get_bus()
-        bus.set_sync_handler(bus_handler)
-
-        sink = gst.element_factory_make("ximagesink", "sink")
-        sink.set_property("force-aspect-ratio", True)
-        sink.set_property("handle-expose", True)
-        scale = gst.element_factory_make("videoscale", "scale")
-        cspace = gst.element_factory_make("ffmpegcolorspace", "cspace")
-
-        # our pipeline looks like this: ... ! cspace ! scale ! sink
-        self.pipeline.add(cspace, scale, sink)
-        scale.link(sink)
-        cspace.link(scale)
-        return (self.pipeline, cspace)
-
-    # ... end of excerpt </excerpt>
-
-    # subclasses can override this method to provide custom controls
-    def customWidgets(self):
-        return gtk.HBox()
-
-    def createWindow(self):
-        """Creates a top-level window, sets various boring attributes,
-        creates a place to put the video sink, adds some and finally
-        connects some basic signal handlers. Really, really boring.
-        """
-
-        # create window, set basic attributes
-        w = gtk.Window()
-        w.set_size_request(*self.__def_win_size__)
-        w.set_title("Gstreamer " + self.__name__)
-        w.connect("destroy", gtk.main_quit)
-
-        # declare buttons and their associated handlers
-        controls = (
-            ("play_button", gtk.ToolButton(gtk.STOCK_MEDIA_PLAY), self.onPlay),
-            ("stop_button", gtk.ToolButton(gtk.STOCK_MEDIA_STOP), self.onStop),
-            ("quit_button", gtk.ToolButton(gtk.STOCK_QUIT), gtk.main_quit)
-        )
-
-        # as well as the container in which to put them
-        box = gtk.HButtonBox()
-
-        # for every widget, connect to its clicked signal and add it
-        # to the enclosing box
-        for name, widget, handler in controls:
-            widget.connect("clicked", handler)
-            box.pack_start(widget, True)
-            setattr(self, name, widget)
-
-        viewer = gtk.DrawingArea()
-        viewer.modify_bg(gtk.STATE_NORMAL, viewer.style.black)
-
-        # we will need this later
-        self.xid = None
-
-        # now finally do the top-level layout for the window
-        layout = gtk.VBox(False)
-        layout.pack_start(viewer)
-
-        # subclasses can override childWidgets() to supply
-        # custom controls
-        layout.pack_start(self.customWidgets(), False, False)
-        layout.pack_end(box, False, False)
-        w.add(layout)
-        w.show_all()
-
-        # we want to return only the portion of the window which will
-        # be used to display the video, not the whole top-level
-        # window. a DrawingArea widget is, in fact, an X11 window.
-        return viewer
-
-    def onPlay(self, unused_button):
-        self.pipeline.set_state(gst.STATE_PLAYING)
-
-    def onStop(self, unused_button):
-        self.pipeline.set_state(gst.STATE_READY)
-
-    def run(self):
-        w = self.createWindow()
-        p, s = self.createPipeline(w)
-        try:
-            self.magic(p, s, sys.argv[1:])
-            gtk.main()
-        except DemoException, e:
-            print e.reason
-            print self.__usage__
-            sys.exit(-1)
-
-# if this file is being run directly, create the demo and run it
-if __name__ == '__main__':
-    Demo().run()
\ No newline at end of file
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/gtk_sink_pad.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/gtk_sink_pad.py
deleted file mode 100644 (file)
index 0b0c53f..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env python
-
-import sys, os
-import pygtk, gtk, gobject
-import pygst
-pygst.require("0.10")
-import gst
-
-class GTK_Main:
-
-       def __init__(self):
-               window = gtk.Window(gtk.WINDOW_TOPLEVEL)
-               window.set_title("Vorbis-Player")
-               window.set_default_size(500, 200)
-               window.connect("destroy", gtk.main_quit, "WM destroy")
-               vbox = gtk.VBox()
-               window.add(vbox)
-               self.entry = gtk.Entry()
-               vbox.pack_start(self.entry, False)
-               self.button = gtk.Button("Start")
-               vbox.add(self.button)
-               self.button.connect("clicked", self.start_stop)
-               window.show_all()
-
-               self.player = gst.Pipeline("player")
-               source = gst.element_factory_make("filesrc", "file-source")
-               demuxer = gst.element_factory_make("oggdemux", "demuxer")
-               demuxer.connect("pad-added", self.demuxer_callback)
-               self.audio_decoder = gst.element_factory_make("vorbisdec", "vorbis-decoder")
-               audioconv = gst.element_factory_make("audioconvert", "converter")
-               audiosink = gst.element_factory_make("autoaudiosink", "audio-output")
-
-               self.player.add(source, demuxer, self.audio_decoder, audioconv, audiosink)
-               gst.element_link_many(source, demuxer)
-               gst.element_link_many(self.audio_decoder, audioconv, audiosink)
-
-               bus = self.player.get_bus()
-               bus.add_signal_watch()
-               bus.connect("message", self.on_message)
-
-       def start_stop(self, w):
-               if self.button.get_label() == "Start":
-                       filepath = self.entry.get_text()
-                       if os.path.isfile(filepath):
-                               self.button.set_label("Stop")
-                               self.player.get_by_name("file-source").set_property("location", filepath)
-                               self.player.set_state(gst.STATE_PLAYING)
-               else:
-                       self.player.set_state(gst.STATE_NULL)
-                       self.button.set_label("Start")
-
-       def on_message(self, bus, message):
-               t = message.type
-               if t == gst.MESSAGE_EOS:
-                       self.player.set_state(gst.STATE_NULL)
-                       self.button.set_label("Start")
-               elif t == gst.MESSAGE_ERROR:
-                       err, debug = message.parse_error()
-                       print "Error: %s" % err, debug
-                       self.player.set_state(gst.STATE_NULL)
-                       self.button.set_label("Start")
-
-       def demuxer_callback(self, demuxer, pad):
-               adec_pad = self.audio_decoder.get_pad("sink")
-               pad.link(adec_pad)
-
-GTK_Main()
-gtk.gdk.threads_init()
-gtk.main()
\ No newline at end of file
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/osc_play.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/osc_play.py
deleted file mode 100644 (file)
index 76e4922..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import liblo, sys
-
-# send all messages to port 1234 on the local machine
-try:
-    target = liblo.Address(12345)
-except liblo.AddressError, err:
-    print str(err)
-    sys.exit()
-
-# send message "/foo/message1" with int, float and string arguments
-liblo.send(target, "/play", 1)
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/osc_stop.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/osc_stop.py
deleted file mode 100644 (file)
index cce3314..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import liblo, sys
-
-# send all messages to port 1234 on the local machine
-try:
-    target = liblo.Address(12345)
-except liblo.AddressError, err:
-    print str(err)
-    sys.exit()
-
-# send message "/foo/message1" with int, float and string arguments
-liblo.send(target, "/play", 0)
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/osc_test.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/osc_test.py
deleted file mode 100644 (file)
index 0f4337e..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import liblo, sys
-
-# send all messages to port 1234 on the local machine
-try:
-    target = liblo.Address(13000)
-except liblo.AddressError, err:
-    print str(err)
-    sys.exit()
-
-# send message "/foo/message1" with int, float and string arguments
-liblo.send(target, "/sink_2/xpos", 200)
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/playbin.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/playbin.py
deleted file mode 100644 (file)
index 32051e1..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-import pygst 
-pygst.require("0.10") 
-import gst 
-
-#pipeline = gst.Pipeline() 
-playbin = gst.element_factory_make("playbin2", 'player') 
-#sink = gst.element_factory_make("autoaudiosink", None) 
-
-playbin.set_property("uri", "/home/momo/music_local/test/sweep.wav")
-#playbin.set_property("uri", "/home/momo/video_local/webm/ocean-clip.webm")
-#playbin.set_property("audio-sink", sink)
-
-#pipeline.add(playbin) 
-
-import time 
-playbin.set_state(gst.STATE_PLAYING) 
-time.sleep(200) 
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/radiodelay.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/radiodelay.py
deleted file mode 100644 (file)
index 39fc21c..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/python
-
-import pygst
-pygst.require("0.10")
-import gst
-import pygtk
-import gtk
-import sys
-
-class Main:
-    def __init__(self):
-       #this just reads the command line args
-       try:
-               DELAY = float(sys.argv[1])
-               DELAY = long(DELAY * 1000000000)
-               print DELAY 
-       except IndexError:
-               DELAY = 0
-
-        self.delay_pipeline = gst.Pipeline("mypipeline")
-       #ALSA
-       self.audiosrc = gst.element_factory_make("alsasrc", "audio")
-       self.audiosrc.set_property("device","default")
-        self.delay_pipeline.add(self.audiosrc)
-       #Queue
-       self.audioqueue = gst.element_factory_make("queue","queue1")
-       self.audioqueue.set_property("max-size-time",0)
-       self.audioqueue.set_property("max-size-buffers",0)
-       self.audioqueue.set_property("max-size-bytes",0)
-       self.audioqueue.set_property("min-threshold-time",DELAY)
-       self.audioqueue.set_property("leaky","no")
-       self.delay_pipeline.add(self.audioqueue)
-       #Audio Output
-        self.sink = gst.element_factory_make("autoaudiosink", "sink")
-        self.delay_pipeline.add(self.sink)
-       #Link the elements
-        self.audiosrc.link(self.audioqueue)
-       self.audioqueue.link(self.sink)
-       #Begin Playing
-        self.delay_pipeline.set_state(gst.STATE_PLAYING)
-
-start=Main()
-gtk.main()
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/rtpx264.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/rtpx264.sh
deleted file mode 100755 (executable)
index 86ae994..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/sh
-
-gst-launch -v  gstrtpbin name=rtpbin \
- v4l2src \
- ! queue ! videoscale method=1 ! video/x-raw-yuv,width=640,height=360 \
- ! queue ! x264enc byte-stream=true bitrate=1000 bframes=4 ref=4 me=hex subme=4 weightb=true threads=4 ! rtph264pay \
- ! rtpbin.send_rtp_sink_0 \
- rtpbin.send_rtp_src_0 ! udpsink port=5000 host=127.0.0.1 \
- rtpbin.send_rtcp_src_0 ! udpsink port=5001 host=127.0.0.1 sync=false async=false  \
- udpsrc port=5002 ! rtpbin.recv_rtcp_sink_0 > /dev/null &
\ No newline at end of file
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/rtpx264_pl.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/rtpx264_pl.sh
deleted file mode 100755 (executable)
index 308b176..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/sh
-
-gst-launch -v gstrtpbin name=rtpbin latency=200 \
- udpsrc caps="application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96" port=5000 \
- ! rtpbin.recv_rtp_sink_0 \
- rtpbin. ! rtph264depay ! tee name=t ! ffdec_h264 ! xvimagesink \
- udpsrc port=5001 ! rtpbin.recv_rtcp_sink_0 \
- rtpbin.send_rtcp_src_0 ! udpsink port=5002 host=127.0.0.1 sync=false async=false \
- t. ! filesink location=/tmp/video.mp4 
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/simple-effect-gtk.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/simple-effect-gtk.py
deleted file mode 100644 (file)
index 19824f7..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env python
-"""Extends basic demo with a gnl composition"""
-
-from demo import Demo, DemoException
-import gtk
-import gst
-import sys
-import os
-
-def create_decodebin():
-    try:
-        return gst.element_factory_make("decodebin2")
-    except:
-        return gst.element_factory_make("decodebin")
-
-class SimpleEffectDemo(Demo):
-    __name__ = "Basic GStreamer Effect Demo"
-    __usage__ = '''python %s file
-    display file with a color_balance effect''' % sys.argv[0]
-    __def_win_size__ = (320, 500)
-    # <excerpt 1>
-    def magic(self, pipeline, sink, args):
-
-        def onPad(obj, pad, target):
-            sinkpad = target.get_compatible_pad(pad, pad.get_caps())
-            pad.link(sinkpad)
-            return True
-
-        assert os.path.exists(sys.argv[-1])
-
-        # create the following pipeline
-        # filesrc location = sys.argv[1] ! decodebin ! videobalance ! ...
-        src = gst.element_factory_make("filesrc")
-        src.set_property("location", sys.argv[-1])
-        decode = create_decodebin()
-
-        self.balance = gst.element_factory_make("videobalance")
-
-        pipeline.add(src, decode, self.balance)
-        src.link(decode)
-        decode.connect("pad-added", onPad, self.balance)
-        self.balance.link(sink)
-
-        return
-    # </excerpt>
-
-    # <excerpt 2>
-    # overriding from parent
-    def customWidgets(self):
-        """Create a control for each property in the videobalance
-        widget"""
-
-        # to be called a property value needs to change
-        def onValueChanged(widget, prop):
-            # set the corresponding property of the videobalance element
-            self.balance.set_property(prop, widget.get_value())
-
-        # videobalance has several properties, with the following range
-        # and defaults
-        properties = [("contrast", 0, 2, 1),
-                      ("brightness", -1, 1, 0),
-                      ("hue", -1, 1, 0),
-                      ("saturation", 0, 2, 1)]
-
-        # create a place to hold our controls
-        controls = gtk.VBox()
-        labels = gtk.VBox()
-        # for every propety, create a control and set its attributes
-        for prop, lower, upper, default in properties:
-            widget = gtk.HScale(); label = gtk.Label(prop)
-
-            # set appropriate atributes
-            widget.set_update_policy(gtk.UPDATE_CONTINUOUS)
-            widget.set_value(default)
-            widget.set_draw_value(True)
-            widget.set_range(lower, upper)
-
-            # connect to our signal handler, specifying the property
-            # to adjust
-            widget.connect("value-changed", onValueChanged, prop)
-
-            # pack widget into box
-            controls.pack_start(widget, True, True)
-            labels.pack_start(label, True, False)
-
-        layout = gtk.HBox()
-        layout.pack_start(labels, False, False)
-        layout.pack_end(controls, True, True)
-        return layout
-
-    # </excerpt>
-
-if __name__ == '__main__':
-    SimpleEffectDemo().run()
\ No newline at end of file
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/video_player_qt.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/video_player_qt.py
deleted file mode 100644 (file)
index 91e946a..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-import sys, os
-from PyQt4 import QtCore, QtGui, uic
-from PyQt4.phonon import Phonon
-
-class VideoPlayer(QtGui.QWidget):
-    def __init__(self, url, parent = None):
-
-        self.url = url
-
-        QtGui.QWidget.__init__(self, parent)
-        self.setSizePolicy(QtGui.QSizePolicy.Expanding,
-            QtGui.QSizePolicy.Preferred)
-
-
-        self.player = Phonon.VideoPlayer(Phonon.VideoCategory,self)
-        self.player.load(Phonon.MediaSource(self.url))
-        self.player.mediaObject().setTickInterval(100)
-        self.player.mediaObject().tick.connect(self.tock)
-
-        self.play_pause = QtGui.QPushButton(self)
-        self.play_pause.setIcon(QtGui.QIcon(':/icons/player_play.svg'))
-        self.play_pause.clicked.connect(self.playClicked)
-        self.player.mediaObject().stateChanged.connect(self.stateChanged)
-
-        self.slider = Phonon.SeekSlider(self.player.mediaObject() , self)
-
-        self.status = QtGui.QLabel(self)
-        self.status.setAlignment(QtCore.Qt.AlignRight |
-            QtCore.Qt.AlignVCenter)
-
-        self.download = QtGui.QPushButton("Download", self)
-        self.download.clicked.connect(self.fetch)
-        topLayout = QtGui.QVBoxLayout(self)
-        topLayout.addWidget(self.player)
-        layout = QtGui.QHBoxLayout(self)
-        layout.addWidget(self.play_pause)
-        layout.addWidget(self.slider)
-        layout.addWidget(self.status)
-        layout.addWidget(self.download)
-        topLayout.addLayout(layout)
-        self.setLayout(topLayout)
-
-    def playClicked(self):
-        if self.player.mediaObject().state() == Phonon.PlayingState:
-            self.player.pause()
-        else:
-            self.player.play()
-
-    def stateChanged(self, new, old):
-        if new == Phonon.PlayingState:
-            self.play_pause.setIcon(QtGui.QIcon(':/icons/player_pause.svg'))
-        else:
-            self.play_pause.setIcon(QtGui.QIcon(':/icons/player_play.svg'))
-
-    def tock(self, time):
-        time = time/1000
-        h = time/3600
-        m = (time-3600*h) / 60
-        s = (time-3600*h-m*60)
-        self.status.setText('%02d:%02d:%02d'%(h,m,s))
-
-    def fetch(self):
-        print 'Should download %s'%self.url
-
-def main():
-    app = QtGui.QApplication(sys.argv)
-    window=VideoPlayer(sys.argv[1])
-    window.show()
-    # It's exec_ because exec is a reserved word in Python
-    sys.exit(app.exec_())
-
-if __name__ == "__main__":
-    main()
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/vumeter.py b/tcserver/conf/usr/local/share/telecaster/scripts/tests/py/vumeter.py
deleted file mode 100644 (file)
index c2838ec..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-from PyQt4 import QtCore, QtGui
-import pygst
-import sys, os, time, math
-pygst.require("0.10")
-import gst
-import gobject
-
-#This class runs the code it contains in another thread using QThread
-class Player(QtCore.QThread):
-     def __init__(self):
-          QtCore.QThread.__init__(self)
-
-     def run(self):
-          #create the pipeline
-          player = gst.Pipeline("player")
-          #filesrc element
-          source = gst.element_factory_make("filesrc", "file-source")
-          #volume element to adjust volume of audio
-          volume = gst.element_factory_make("volume", "volume")
-          #level element to get the rms/peak property
-          level = gst.element_factory_make("level", "volume-level")
-          #decoder to play mp3 files
-          decoder = gst.element_factory_make("mad", "mp3-decoder")
-          #convert the audio to play to speakers
-          conv = gst.element_factory_make("audioconvert", "converter")
-          #autosink if not alsa
-          sink = gst.element_factory_make("autoaudiosink", "audio-output")
-
-          #add the elements to the pipeline
-          player.add(source, volume, level, decoder, conv, sink)
-
-          #link the elements in order
-          gst.element_link_many(source, decoder, conv, volume, level, sink)
-          #set properties of elements
-          player.get_by_name("volume").set_property('volume', 1)
-          player.get_by_name("volume-level").set_property('peak-ttl' , 0)
-          player.get_by_name("volume-level").set_property('peak-falloff', 20)
-          #add bus to listen signal from
-          bus = gst.Pipeline.get_bus(player)
-          gst.Bus.add_signal_watch(bus)
-
-          #the source of the player
-          filepath = "/home/momo/music_local/test/aboul.wav.mp3"
-          #set the property of the element filesrc
-          player.get_by_name("file-source").set_property('location', filepath)
-          #play the file
-          player.set_state(gst.STATE_PLAYING)
-          #get the current thread in Qt
-          play_thread_id = self.currentThread
-
-          #set the minimum decibels
-          MIN_DB = -45
-          #set the maximum decibels
-          MAX_DB = 0
-          #if current thread is running
-          while play_thread_id == self.currentThread:
-               #listen to messages that emit during playing
-               messagePoll = bus.poll(gst.MESSAGE_ANY,-1)
-               #if the message is level
-               if messagePoll.src == level:
-                    #get the structure of the message
-                    struc = messagePoll.structure
-               #if the structure message is rms
-               if struc.has_key('rms'):
-                    rms = struc["rms"]
-                    #get the values of rms in a list
-                    rms0 = abs(float(rms[0]))
-                    #compute for rms to decibels
-                    rmsdb = 10 * math.log(rms0 / 32768 )
-                    #compute for progress bar
-                    vlrms = (rmsdb-MIN_DB) * 100 / (MAX_DB-MIN_DB)
-                    #emit the signal to the qt progress bar
-                    self.emit(QtCore.SIGNAL("setLabel"), abs(vlrms))
-               #set timer
-               time.sleep(0.05)
-
-#this code produced using pyuic from qt designer
-class Ui_Dialog(object):
-     def setupUi(self, Dialog):
-          Dialog.setObjectName("Dialog")
-          Dialog.resize(QtCore.QSize(QtCore.QRect(0,0,94,300).size()).expandedTo(Dialog.minimumSizeHint()))
-
-          self.progressBar = QtGui.QProgressBar(Dialog)
-          self.progressBar.setGeometry(QtCore.QRect(10,10,31,281))
-          self.progressBar.setProperty("value",QtCore.QVariant(24))
-          self.progressBar.setOrientation(QtCore.Qt.Vertical)
-          self.progressBar.setObjectName("progressBar")
-          self.progressBar.setValue(0)
-          self.progressBar.setMinimum(0)
-          self.progressBar.setMaximum(100)
-
-          self.retranslateUi(Dialog)
-          QtCore.QMetaObject.connectSlotsByName(Dialog)
-          #sets the value of the progress bar emited
-     def setLabel(self,value):
-          self.progressBar.setValue(value)
-
-     def retranslateUi(self, Dialog):
-          Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
-
-if __name__ == "__main__":
-     app = QtGui.QApplication(sys.argv)
-     window = QtGui.QDialog()
-     ui = Ui_Dialog()
-     ui.setupUi(window)
-     window.show()
-     #creates instance of the Player class
-     player=Player()
-     #connect to signal emitted in Player class
-     QtCore.QObject.connect(player, QtCore.SIGNAL("setLabel"), ui.setLabel, QtCore.Qt.QueuedConnection)
-     #run the Player class thread
-     player.start()
-     app.exec_()
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/rtpx264.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/rtpx264.sh
deleted file mode 100755 (executable)
index e3ca8cf..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-
-gst-launch -v  gstrtpbin name=rtpbin \
- v4l2src ! video/x-raw-yuv,width=640,height=480 \
- ! queue ! x264enc byte-stream=true bitrate=500 bframes=4 ref=4 me=hex subme=4 weightb=true threads=4 ! rtph264pay \
- ! rtpbin.send_rtp_sink_0 \
- rtpbin.send_rtp_src_0 ! udpsink port=5000 host=127.0.0.1 \
- rtpbin.send_rtcp_src_0 ! udpsink port=5001 host=127.0.0.1 sync=false async=false  \
- udpsrc port=5002 ! rtpbin.recv_rtcp_sink_0 
\ No newline at end of file
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/rtpx264_pl.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/rtpx264_pl.sh
deleted file mode 100755 (executable)
index c4445cd..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-
-gst-launch -v gstrtpbin name=rtpbin latency=200 \
- udpsrc caps="application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96" port=5000 \
- ! rtpbin.recv_rtp_sink_0 \
- rtpbin. ! rtph264depay ! queue ! tee name=t ! ffdec_h264 ! xvimagesink \
- t. ! queue ! filesink location=/tmp/video.mp4 \
- udpsrc port=5001 ! rtpbin.recv_rtcp_sink_0 \
- rtpbin.send_rtcp_src_0 ! udpsink port=5002 host=127.0.0.1 sync=false async=false \
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_flu_simple_webm.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_flu_simple_webm.sh
deleted file mode 100755 (executable)
index e86ca98..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=640
-HEIGHT=360
-#WIDTH=1024
-#HEIGHT=576
-
-
-pipe="v4l2src device=/dev/video0  \
-       ! vp8-encoder ! muxout. \
-       jackaudiosrc ! vorbis-encoder ! muxout.  \
-       webmmux streamable=true name=muxout"
-
-flumotion-launch pipeline-producer pipeline=$pipe ! http-streamer port=8800 
-
-sleep 2
-
-jack_disconnect system:capture_1 flumotion-launch:in_jackaudiosrc0_1
-jack_connect   jack_rack:out_1  flumotion-launch:in_jackaudiosrc0_1
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_alsa_webm_stream.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_alsa_webm_stream.sh
deleted file mode 100755 (executable)
index 7754217..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=320
-HEIGHT=240
-#WIDTH=1024
-#HEIGHT=576
-
-gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! vp8enc speed=2 threads=2 quality=9.0 ! queue ! muxout. \
-       alsasrc device=hw:0 \
-       ! queue ! audioconvert ! queue ! vorbisenc quality=0.3 ! queue ! muxout.  \
-       webmmux streamable=true name=muxout \
-       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none \
-       > /dev/null 
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_dv_webm_stream.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_dv_webm_stream.sh
deleted file mode 100755 (executable)
index 054abc6..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-#WIDTH=640
-#HEIGHT=360
-#WIDTH=1024
-#HEIGHT=576
-WIDTH=480
-HEIGHT=320
-
-gst-launch dv1394src ! dvdemux ! queue ! dvdec ! queue ! deinterlace \
-       ! queue ! videoscale ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT \
-       ! queue ! ffmpegcolorspace \
-        ! queue ! vp8enc speed=2 threads=2 quality=10.0 max-latency=25 max-keyframe-distance=96 \
-        ! queue ! muxout. \
-       jackaudiosrc connect=1 ! audio/x-raw-float, channels=2 \
-       ! queue ! audioconvert ! queue ! vorbisenc quality=0.6 ! queue ! muxout.  \
-       webmmux streamable=true name=muxout \
-       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none blocksize=65536 sync-method=1 
-
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_only_simple_webm.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_only_simple_webm.sh
deleted file mode 100755 (executable)
index 7c92b88..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=640
-HEIGHT=480
-#WIDTH=1024
-#HEIGHT=576
-
-gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! vp8enc speed=2 threads=4 quality=5.0 ! queue ! muxout. \
-       webmmux streamable=true name=muxout \
-       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none 
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_file.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_file.sh
deleted file mode 100755 (executable)
index 181dd2f..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=432
-HEIGHT=240
-
-gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! theoraenc bitrate=400 speed-level=0 ! queue ! muxout. \
-       jackaudiosrc connect=1 \
-       ! queue ! audioconvert ! queue ! vorbisenc ! queue ! muxout.  \
-       oggmux name=muxout ! filesink location=/home/telecaster/archives/test.ogg \
-       > /dev/null &
-
-sleep 2
-
-jack_disconnect system:capture_1 gst-launch-0.10:in_jackaudiosrc0_1
-jack_connect   jack_rack:out_1  gst-launch-0.10:in_jackaudiosrc0_1
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_file_webm.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_file_webm.sh
deleted file mode 100755 (executable)
index d08dc06..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=640
-HEIGHT=360
-#WIDTH=1024
-#HEIGHT=576
-
-gst-launch v4l2src device=/dev/video1 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
-       ! queue ! videoflip method=rotate-180 \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! vp8enc speed=2 threads=2 quality=5 ! queue ! muxout. \
-       jackaudiosrc connect=1 \
-       ! queue ! audioconvert ! queue ! vorbisenc quality=3 ! queue ! muxout.  \
-       webmmux streamable=true name=muxout ! filesink location=/home/telecaster/trash/test.webm \
-       > /dev/null &
-
-sleep 2
-
-jack_disconnect system:capture_1 gst-launch-0.10:in_jackaudiosrc0_1
-jack_connect   jack_rack:out_1  gst-launch-0.10:in_jackaudiosrc0_1
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_start.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_start.sh
deleted file mode 100755 (executable)
index 061e290..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=432
-HEIGHT=240
-
-gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! theoraenc quality=10 ! queue ! muxout. \
-       jackaudiosrc connect=1 \
-       ! queue ! audioconvert ! queue ! vorbisenc quality=3 ! queue ! muxout.  \
-       oggmux name=muxout ! shout2send mount=/telecaster_live_video.ogg port=8000 password=source2parisson ip=127.0.0.1 \
-       > /dev/null &
-
-sleep 2
-
-jack_disconnect system:capture_1 gst-launch-0.10:in_jackaudiosrc0_1
-jack_connect   jack_rack:out_1  gst-launch-0.10:in_jackaudiosrc0_1
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_ice.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_ice.sh
deleted file mode 100755 (executable)
index 58c1573..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=640
-HEIGHT=360
-
-gst-launch v4l2src device=/dev/video0 ! video/x-raw-rgb, width=$WIDTH, height=$HEIGHT  \
-    ! queue ! ffmpegcolorspace \
-    ! queue ! vp8enc speed=2 threads=2 quality=9.0 \
-    ! webmmux streamable=true \
-    ! shout2send mount=/telecaster_live_video.webm port=8000 password=source2parisson ip=127.0.0.1
\ No newline at end of file
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream.sh
deleted file mode 100755 (executable)
index d41d76f..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=640
-HEIGHT=480
-
-gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT, framerate={30/1}  \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! vp8enc speed=2 threads=4 quality=7.0 max-latency=2 max-keyframe-distance=3 auto-alt-ref-frames=true  ! queue ! muxout. \
-       jackaudiosrc connect=2 client-name=webmenc ! audio/x-raw-float, channels=2 \
-       ! queue ! audioconvert ! queue ! vorbisenc quality=0.3 ! queue ! muxout.  \
-       webmmux streamable=true name=muxout \
-       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none blocksize=65536 sync-method=1
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_hd.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_hd.sh
deleted file mode 100755 (executable)
index 7afd26c..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=1024
-HEIGHT=576
-
-gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
-       ! queue ! videoflip method=rotate-180 \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! vp8enc speed=2 threads=2 quality=5 ! queue ! muxout. \
-       jackaudiosrc connect=1 \
-       ! queue ! audioconvert ! queue ! vorbisenc quality=3 ! queue ! muxout.  \
-       webmmux streamable=true name=muxout \
-       ! tee name=t ! queue ! tcpserversink host=127.0.0.1 port=9000 \
-       t. ! queue ! filesink location=/home/telecaster/trash/test.webm \
-       > /dev/null &
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_hd_alsa_test.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_hd_alsa_test.sh
deleted file mode 100755 (executable)
index 3894ae2..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=1280
-HEIGHT=720
-
-gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! vp8enc speed=2 threads=1 quality=9.0 ! queue ! muxout. \
-       alsasrc \
-       ! queue ! audioconvert ! queue ! vorbisenc quality=0.3 ! queue ! muxout.  \
-       webmmux streamable=true name=muxout \
-       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none
-
-       
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_hd_jack_test.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_hd_jack_test.sh
deleted file mode 100755 (executable)
index 0cfb252..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=1280
-HEIGHT=720
-
-gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! vp8enc speed=2 threads=4 quality=9.0 ! queue ! muxout. \
-       jackaudiosrc connect=1 \
-       ! queue ! audioconvert ! queue ! vorbisenc quality=0.3 ! queue ! muxout.  \
-       webmmux streamable=true name=muxout \
-       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none
-
-       
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_hd_test.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_hd_test.sh
deleted file mode 100755 (executable)
index 8522473..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=1280
-HEIGHT=720
-
-gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! vp8enc speed=2 threads=4 quality=9.0 \
-       ! queue ! webmmux streamable=true name=muxout \
-       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_m.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_m.sh
deleted file mode 100755 (executable)
index b4d8403..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=640
-HEIGHT=360
-#WIDTH=1024
-#HEIGHT=576
-
-gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
-       ! queue ! videoflip method=rotate-180 \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! vp8enc speed=2 threads=2 quality=5 ! queue ! muxout. \
-       jackaudiosrc connect=1 \
-       ! queue ! audioconvert ! queue ! vorbisenc quality=3 ! queue ! muxout.  \
-       webmmux streamable=true name=muxout \
-       ! tee name=t ! queue ! multifdsink name=sink sync=false recover-policy=3 \
-       t. ! queue ! filesink location=/home/telecaster/trash/test.webm \
-       > /dev/null &
-
-sleep 2
-
-jack_disconnect system:capture_1 gst-launch-0.10:in_jackaudiosrc0_1
-jack_connect   jack_rack:out_1  gst-launch-0.10:in_jackaudiosrc0_1
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_sd_test.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_stream_sd_test.sh
deleted file mode 100755 (executable)
index 0fb2ce0..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=800
-HEIGHT=600
-
-gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! vp8enc speed=2 threads=4 quality=9.0 \
-       ! queue ! webmmux streamable=true name=muxout \
-       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_tee.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tc_video_simple_webm_tee.sh
deleted file mode 100755 (executable)
index d006917..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-WIDTH=640
-HEIGHT=360
-#WIDTH=1024
-#HEIGHT=576
-
-gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
-       ! queue ! videoflip method=rotate-180 \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! vp8enc speed=2 threads=2 quality=9.0 ! queue ! muxout. \
-       jackaudiosrc connect=1 \
-       ! queue ! audioconvert ! queue ! vorbisenc quality=0.3 ! queue ! muxout.  \
-       webmmux streamable=true name=muxout \
-       ! tee name=t ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none \
-       t. ! queue ! filesink location=/home/telecaster/trash/test.webm \
-       > /dev/null &
-
-sleep 4
-
-jack_disconnect system:capture_1 gst-launch-0.10:in_jackaudiosrc0_1
-jack_connect   jack_rack:out_1  gst-launch-0.10:in_jackaudiosrc0_1
-
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/tcp2x.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/tcp2x.sh
deleted file mode 100755 (executable)
index 8e61926..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-
-gst-launch tcpclientsrc host=192.168.0.18 port=9000 \
-    ! matroskademux \
-    ! vp8dec ! ffmpegcolorspace \
-    ! ximagesink
-
-# tcpclientsrc host=192.168.0.18 port=9000 protocol=none \
\ No newline at end of file
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/x264_2.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/x264_2.sh
deleted file mode 100755 (executable)
index ae35e7c..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh
-
-gst-launch v4l2src ! video/x-raw-yuv,width=640,height=480 \
- ! queue ! x264enc byte-stream=true bitrate=500 bframes=4 ref=4 me=hex subme=4 weightb=true threads=4 \
- ! tcpserversink host=127.0.0.1 port=9000 protocol=none
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/x264_pl2.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/x264_pl2.sh
deleted file mode 100755 (executable)
index 8595bed..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh
-
-gst-launch tcpclientsrc host=127.0.0.1 port=9000 \
-  ! ffdec_h264 ! xvimagesink 
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/x264_relay.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/x264_relay.sh
deleted file mode 100755 (executable)
index 8d1a65b..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/sh
-
-gst-launch tcpclientsrc host=192.168.0.18 port=9000 \
-    ! matroskademux \
-    ! queue ! vp8dec \
-    ! queue ! ffmpegcolorspace \
-    ! queue ! x264enc bitrate=200 bframes=4 ref=4 me=hex subme=4 weightb=true threads=0 ! muxout. \
-       mp4mux name=muxout \
-       ! queue ! filesink location=/tmp/video.mp4
-
-# tcpclientsrc host=192.168.0.18 port=9000 protocol=none \
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/x264_relay_x.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/x264_relay_x.sh
deleted file mode 100755 (executable)
index 3a30d1b..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/sh
-
-gst-launch tcpclientsrc host=192.168.0.18 port=9000 \
-    ! matroskademux \
-    ! vp8dec ! ffmpegcolorspace \
-    ! queue ! x264enc \
-    ! queue ! vdpauh264dec ! ffmpegcolorspace ! ximagesink
-
-# tcpclientsrc host=192.168.0.18 port=9000 protocol=none \
\ No newline at end of file
diff --git a/tcserver/conf/usr/local/share/telecaster/scripts/tests/x_jack_webm.sh b/tcserver/conf/usr/local/share/telecaster/scripts/tests/x_jack_webm.sh
deleted file mode 100755 (executable)
index 8abeab6..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-
-# Start TeleCaster video channel
-
-gst-launch ximagesrc ! video/x-raw-rgb,framerate=30/1 \
-       ! queue ! ffmpegcolorspace \
-       ! queue ! vp8enc speed=2 threads=2 quality=9.0 ! queue ! muxout. \
-       jackaudiosrc connect=1 \
-       ! queue ! audioconvert ! queue ! vorbisenc quality=0.3 ! queue ! muxout.  \
-       webmmux streamable=true name=muxout \
-       ! queue ! filesink location=/home/momo/tmp/desktop.webm
-       
-
diff --git a/tests/audio_player_osc.py b/tests/audio_player_osc.py
new file mode 100644 (file)
index 0000000..609882c
--- /dev/null
@@ -0,0 +1,57 @@
+
+import gobject
+gobject.threads_init()
+import pygst
+pygst.require("0.10")
+import gst
+from threading import Thread
+import sys
+import liblo
+
+class OSCController(Thread):
+
+    def __init__(self, port):
+        Thread.__init__(self)
+        import liblo
+        self.port = port
+        try:
+            self.server = liblo.Server(self.port)
+        except liblo.ServerError, err:
+            print str(err)
+
+    def run(self):
+        while True:
+            self.server.recv(100)
+
+            
+class AudioPlayer(Thread):
+    
+    def __init__(self, uri):
+        Thread.__init__(self)
+        self.uri = uri
+        self.controller = OSCController(12345)
+        self.controller.server.add_method('/play', 'i', self.play_stop_cb)
+        self.controller.start()
+        
+        self.mainloop = gobject.MainLoop()
+        self.player = gst.element_factory_make("playbin", "player")
+        self.player.set_property('uri', self.uri)
+        
+    def play_stop_cb(self, path, value):
+        value = value[0]
+        if value:
+            print 'play'
+            self.player.set_state(gst.STATE_NULL)
+            self.player.set_state(gst.STATE_PLAYING)
+        else:
+            print 'stop'
+            self.player.set_state(gst.STATE_NULL)
+            
+    def run(self):
+        self.mainloop.run()
+    
+if __name__ == '__main__':
+    path = sys.argv[-1]
+    player = AudioPlayer(path)
+    player.start()
+    
diff --git a/tests/audio_player_qt.py b/tests/audio_player_qt.py
new file mode 100644 (file)
index 0000000..ed7da6e
--- /dev/null
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+import sys, os
+from PyQt4 import QtCore, QtGui, uic
+from PyQt4.phonon import Phonon
+
+class AudioPlayer(QtGui.QWidget):
+    def __init__(self, url, parent = None):
+
+        self.url = url
+
+        QtGui.QWidget.__init__(self, parent)
+        self.setSizePolicy(QtGui.QSizePolicy.Expanding,
+            QtGui.QSizePolicy.Preferred)
+
+
+        self.player = Phonon.createPlayer(Phonon.MusicCategory,
+            Phonon.MediaSource(url))
+        self.player.setTickInterval(100)
+        self.player.tick.connect(self.tock)
+
+        self.play_pause = QtGui.QPushButton(self)
+        self.play_pause.setIcon(QtGui.QIcon(':/icons/player_play.svg'))
+        self.play_pause.clicked.connect(self.playClicked)
+        self.player.stateChanged.connect(self.stateChanged)
+
+        self.slider = Phonon.SeekSlider(self.player , self)
+
+        self.status = QtGui.QLabel(self)
+        self.status.setAlignment(QtCore.Qt.AlignRight |
+            QtCore.Qt.AlignVCenter)
+
+        self.download = QtGui.QPushButton("Download", self)
+        self.download.clicked.connect(self.fetch)
+
+        layout = QtGui.QHBoxLayout(self)
+        layout.addWidget(self.play_pause)
+        layout.addWidget(self.slider)
+        layout.addWidget(self.status)
+        layout.addWidget(self.download)
+
+    def playClicked(self):
+        if self.player.state() == Phonon.PlayingState:
+            self.player.pause()
+        else:
+            self.player.play()
+
+    def stateChanged(self, new, old):
+        if new == Phonon.PlayingState:
+            self.play_pause.setIcon(QtGui.QIcon(':/icons/player_pause.svg'))
+        else:
+            self.play_pause.setIcon(QtGui.QIcon(':/icons/player_play.svg'))
+
+    def tock(self, time):
+        time = time/1000
+        h = time/3600
+        m = (time-3600*h) / 60
+        s = (time-3600*h-m*60)
+        self.status.setText('%02d:%02d:%02d'%(h,m,s))
+
+    def fetch(self):
+        print 'Should download %s'%self.url
+
+def main():
+    app = QtGui.QApplication(sys.argv)
+    window=AudioPlayer(sys.argv[1])
+    window.show()
+    # It's exec_ because exec is a reserved word in Python
+    sys.exit(app.exec_())
+
+if __name__ == "__main__":
+    main()
diff --git a/tests/audio_video.py b/tests/audio_video.py
new file mode 100644 (file)
index 0000000..bf17145
--- /dev/null
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+
+"""A short Audio-Video example"""
+import gobject
+gobject.threads_init()
+import gst
+import pygtk
+pygtk.require("2.0")
+import gtk
+gtk.gdk.threads_init()
+import sys
+import os
+from demo import Demo
+
+def create_decodebin():
+    try:
+        return gst.element_factory_make("decodebin2")
+    except:
+        return gst.element_factory_make("decodebin")
+
+class DemoException(Exception):
+    """Base exception class for errors which occur during demos"""
+
+    def __init__(self, reason):
+        self.reason = reason
+
+class AVDemo(Demo):
+    """Extends base demo with both audio and video sinks
+    * a window containing a drawing area and basic media controls
+    * a basic gstreamer pipeline using an ximagesink and an autoaudiosink
+    * connects the ximagesink to the window's drawing area
+
+    Derived classes need only override magic(), __name__,
+    and __usage__ to create new demos."""
+
+    __name__ = "AV Demo"
+    __usage__ = "python audio_video.py <filename>"
+    __def_win_size__ = (320, 240)
+
+    # this commment allows us to include only a portion of the file
+    # in the tutorial for this demo
+
+    def magic(self, pipeline, (videosink, audiosink), args):
+        """This is where the magic happens"""
+
+        def onPadAdded(source, pad):
+            # first we see if we can link to the videosink
+            tpad = videoqueue.get_compatible_pad(pad)
+            if tpad:
+                pad.link(tpad)
+                return
+            # if not, we try the audio sink
+            tpad = audioqueue.get_compatible_pad(pad)
+            if tpad:
+                pad.link(tpad)
+                return
+
+        src = gst.element_factory_make("filesrc", "src")
+        src.props.location = args[0]
+        dcd = create_decodebin()
+        audioqueue = gst.element_factory_make("queue")
+        videoqueue = gst.element_factory_make("queue")
+        pipeline.add(src, dcd, audioqueue, videoqueue)
+
+        src.link(dcd)
+        videoqueue.link(videosink)
+        audioqueue.link(audiosink)
+        dcd.connect("pad-added", onPadAdded)
+
+    def createPipeline(self, w):
+        """Given a window, creates a pipeline and connects it to the window"""
+
+        # code will make the ximagesink output in the specified window
+        def set_xid(window):
+            gtk.gdk.threads_enter()
+            videosink.set_xwindow_id(window.window.xid)
+            videosink.expose()
+            gtk.gdk.threads_leave()
+
+        # this code receives the messages from the pipeline. if we
+        # need to set X11 id, then we call set_xid
+        def bus_handler(unused_bus, message):
+            if message.type == gst.MESSAGE_ELEMENT:
+                if message.structure.get_name() == 'prepare-xwindow-id':
+                    set_xid(w)
+            return gst.BUS_PASS
+
+        # create our pipeline, and connect our bus_handler
+        self.pipeline = gst.Pipeline()
+        bus = self.pipeline.get_bus()
+        bus.set_sync_handler(bus_handler)
+
+        videosink = gst.element_factory_make("ximagesink", "sink")
+        videosink.set_property("force-aspect-ratio", True)
+        videosink.set_property("handle-expose", True)
+        scale = gst.element_factory_make("videoscale", "scale")
+        cspace = gst.element_factory_make("ffmpegcolorspace", "cspace")
+
+        audiosink = gst.element_factory_make("autoaudiosink")
+        audioconvert = gst.element_factory_make("audioconvert")
+
+        # pipeline looks like: ... ! cspace ! scale ! sink
+        #                      ... ! audioconvert ! autoaudiosink
+        self.pipeline.add(cspace, scale, videosink, audiosink,
+            audioconvert)
+        scale.link(videosink)
+        cspace.link(scale)
+        audioconvert.link(audiosink)
+        return (self.pipeline, (cspace, audioconvert))
+
+# if this file is being run directly, create the demo and run it
+if __name__ == '__main__':
+    AVDemo().run()
\ No newline at end of file
diff --git a/tests/audio_video_crossfade.py b/tests/audio_video_crossfade.py
new file mode 100644 (file)
index 0000000..d046ca1
--- /dev/null
@@ -0,0 +1,117 @@
+#!/usr/bin/env python
+
+"""A short Audio-Video example"""
+import gobject
+gobject.threads_init()
+import gst
+import pygtk
+pygtk.require("2.0")
+import gtk
+import sys
+import os
+from audio_video import AVDemo, create_decodebin
+
+class AVCrossfade(AVDemo):
+    """Base class implementing boring, boiler-plate code.
+    Sets up a basic gstreamer environment which includes:
+
+    * a window containing a drawing area and basic media controls
+    * a basic gstreamer pipeline using an ximagesink and an autoaudiosink
+    * connects the ximagesink to the window's drawing area
+
+    Derived classes need only override magic(), __name__,
+    and __usage__ to create new demos."""
+
+    __name__ = "AV Demo"
+    __usage__ = "python audio_video.py <filename>"
+    __def_win_size__ = (320, 240)
+
+    # this commment allows us to include only a portion of the file
+    # in the tutorial for this demo
+
+    def onPad(self, decoder, pad, target):
+        tpad = target.get_compatible_pad(pad)
+        if tpad:
+            pad.link(tpad)
+
+    def addVideoChain(self, pipeline, name, decoder, mixer):
+        alpha = gst.element_factory_make("alpha")
+        alpha.props.alpha = 1.0
+        videoscale = gst.element_factory_make("videoscale")
+        videorate = gst.element_factory_make("videorate")
+        colorspace = gst.element_factory_make("ffmpegcolorspace")
+        queue = gst.element_factory_make("queue")
+
+        pipeline.add(alpha, videoscale, videorate, colorspace, queue)
+        decoder.connect("pad-added", self.onPad, videorate)
+        videorate.link(videoscale)
+        videoscale.link(colorspace)
+        colorspace.link(queue)
+        queue.link(alpha)
+        alpha.link(mixer)
+
+        setattr(self, "alpha%s" % name, alpha)
+
+    def addAudioChain(self, pipeline, name, decoder, adder):
+        volume = gst.element_factory_make("volume")
+        volume.props.volume = 0.5
+        audioconvert = gst.element_factory_make("audioconvert")
+        audiorate = gst.element_factory_make("audioresample")
+        queue = gst.element_factory_make("queue")
+
+        pipeline.add(volume, audioconvert, audiorate, queue)
+        decoder.connect("pad-added", self.onPad, audioconvert)
+        audioconvert.link(audiorate)
+        audiorate.link(queue)
+        queue.link(volume)
+        volume.link(adder)
+
+        setattr(self, "vol%s" % name, volume)
+
+    def addSourceChain(self, pipeline, name, filename, mixer, adder):
+        src = gst.element_factory_make("filesrc")
+        src.props.location = filename
+        dcd = create_decodebin()
+
+        pipeline.add(src, dcd)
+        src.link(dcd)
+        self.addVideoChain(pipeline, name, dcd, mixer)
+        self.addAudioChain(pipeline, name, dcd, adder)
+
+    def magic(self, pipeline, (videosink, audiosink), args):
+        """This is where the magic happens"""
+        mixer = gst.element_factory_make("videomixer")
+        adder = gst.element_factory_make("adder")
+        pipeline.add(mixer, adder)
+
+        mixer.link(videosink)
+        adder.link(audiosink)
+        self.addSourceChain(pipeline, "A", args[0], mixer, adder)
+        self.addSourceChain(pipeline, "B", args[1], mixer, adder)
+        self.alphaB.props.alpha = 0.5
+
+    def onValueChanged(self, adjustment):
+        balance = self.balance.get_value()
+        crossfade = self.crossfade.get_value()
+        self.volA.props.volume = (2 - balance) * (1 - crossfade)
+        self.volB.props.volume = balance * crossfade
+        self.alphaB.props.alpha = crossfade
+
+    def customWidgets(self):
+        self.crossfade = gtk.Adjustment(0.5, 0, 1.0)
+        self.balance = gtk.Adjustment(1.0, 0.0, 2.0)
+        crossfadeslider = gtk.HScale(self.crossfade)
+        balanceslider = gtk.HScale(self.balance)
+        self.crossfade.connect("value-changed", self.onValueChanged)
+        self.balance.connect("value-changed", self.onValueChanged)
+
+        ret = gtk.Table()
+        ret.attach(gtk.Label("Crossfade"), 0, 1, 0, 1)
+        ret.attach(crossfadeslider, 1, 2, 0, 1)
+        ret.attach(gtk.Label("Balance"), 0, 1, 1, 2)
+        ret.attach(balanceslider, 1, 2, 1, 2)
+        return ret
+
+# if this file is being run directly, create the demo and run it
+if __name__ == '__main__':
+    AVCrossfade().run()
\ No newline at end of file
diff --git a/tests/control_mixer.py b/tests/control_mixer.py
new file mode 100644 (file)
index 0000000..d0fcb9e
--- /dev/null
@@ -0,0 +1,61 @@
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+
+p = gst.parse_launch ("""videomixer name=mix0 ! ffmpegcolorspace ! xvimagesink
+      videotestsrc ! video/x-raw-yuv, framerate=24/1, width=640, height=360 ! mix0.sink_0
+      videomixer name=mix1 ! mix0.sink_1
+      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix1.sink_0
+      videomixer name=mix2 ! mix1.sink_1
+      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix2.sink_0
+      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix2.sink_1
+""")
+
+m1 = p.get_by_name ("mix1")
+s1_0 = m1.get_pad ("sink_0")
+s1_0.set_property ("xpos", 100)
+s1_1 = m1.get_pad ("sink_1")
+s1_1.set_property ("xpos", 250)
+
+m2 = p.get_by_name ("mix2")
+s2_0 = m2.get_pad ("sink_0")
+s2_0.set_property ("xpos", 200)
+s2_1 = m2.get_pad ("sink_1")
+s2_1.set_property ("xpos", 250)
+
+c1_0 = gst.Controller(s1_0, "ypos", "alpha")
+c1_0.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c1_0.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c1_0.set("ypos", 0, 0)
+c1_0.set("ypos", 5 * gst.SECOND, 200)
+c1_0.set("alpha", 0, 0)
+c1_0.set("alpha", 5 * gst.SECOND, 1.0)
+
+c1_1 = gst.Controller(s1_1, "ypos", "alpha")
+c1_1.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c1_1.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c1_1.set("ypos", 0, 0)
+c1_1.set("ypos", 5 * gst.SECOND, 200)
+c1_1.set("alpha", 0, 0)
+c1_1.set("alpha", 5 * gst.SECOND, 1.0)
+
+c2_0 = gst.Controller(s2_0, "ypos", "alpha")
+c2_0.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c2_0.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c2_0.set("ypos", 0, 0)
+c2_0.set("ypos", 5 * gst.SECOND, 200)
+c2_0.set("alpha", 0, 0)
+c2_0.set("alpha", 5 * gst.SECOND, 1.0)
+
+c2_1 = gst.Controller(s2_1, "ypos", "alpha")
+c2_1.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c2_1.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c2_1.set("ypos", 0, 0)
+c2_1.set("ypos", 5 * gst.SECOND, 200)
+c2_1.set("alpha", 0, 0)
+c2_1.set("alpha", 5 * gst.SECOND, 1.0)
+
+p.set_state (gst.STATE_PLAYING)
+
+gobject.MainLoop().run()
diff --git a/tests/control_mixer_osc.py b/tests/control_mixer_osc.py
new file mode 100644 (file)
index 0000000..379985e
--- /dev/null
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+from threading import Thread
+
+
+class OSCController(Thread):
+
+    def __init__(self, port):
+        Thread.__init__(self)
+        import liblo
+        self.port = port
+        try:
+            self.server = liblo.Server(self.port)
+        except liblo.ServerError, err:
+            print str(err)
+
+    def add_method(self, path, type, method):
+        self.server.add_method(path, type, method)
+
+    def run(self):
+        while True:
+            self.server.recv(100)
+
+
+class GSTSrcVideo(object):
+
+    def __init__(self, pipe=None, framerate='24/1', width=160, height=90, xpos=0, ypos=0):
+        self.framerate = framerate
+        self.width = width
+        self.height = height
+        self.xpos = xpos
+        self.ypos = ypos
+        if not pipe:
+            pipe = 'videotestsrc pattern="snow"'
+        self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
+                        % (self.framerate, str(self.width), str(self.height))
+
+class GSTMixer(object):
+
+    def __init__(self, osc_port=13000):
+        self.name = 'mixer'
+        self.pipe = ['videomixer name=mixer ! ffmpegcolorspace ! xvimagesink']
+        self.srcs = []
+        self.i= 0
+
+        self.osc_port = osc_port
+        self.osc = OSCController(self.osc_port)
+
+    def osc_callback(self, path, value):
+        paths = path.split('/')
+        sink = paths[1]
+        param = paths[2]
+        for src in self.srcs:
+            if src['sink'] == sink:
+                break
+        src['control'].set(param, 5 * gst.SECOND, value[0])
+
+    def add_src(self, src):
+        self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
+        self.i += 1
+
+    def setup(self):
+        self.srcs.reverse()
+
+        for src in self.srcs:
+            self.pipe.append(' '.join([src['src'].pipe, '! ' + self.name + '.' + src['sink']]))
+
+        print ' '.join(self.pipe)
+        self.process = gst.parse_launch(' '.join(self.pipe))
+        mixer = self.process.get_by_name("mixer")
+
+        for src in self.srcs:
+            src['pad'] = mixer.get_pad(src['sink'])
+            src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
+
+            src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
+            src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
+            self.osc.add_method('/'+src['sink']+'/xpos', 'i', self.osc_callback)
+
+            src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+            src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
+            self.osc.add_method('/'+src['sink']+'/ypos', 'i', self.osc_callback)
+
+            src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+            src['control'].set("alpha", 5 * gst.SECOND, 1.0)
+            self.osc.add_method('/'+src['sink']+'/alpha', 'f', self.osc_callback)
+
+
+    def run(self):
+        self.osc.start()
+        self.process.set_state(gst.STATE_PLAYING)
+        gobject.MainLoop().run()
+
+
+if __name__ == '__main__':
+    src1 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc')
+    src2 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=100, ypos=50)
+    src3 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=200, ypos=150)
+    src4 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=300, ypos=250)
+    mixer = GSTMixer()
+    mixer.add_src(src1)
+    mixer.add_src(src2)
+    mixer.add_src(src3)
+    mixer.add_src(src4)
+    mixer.setup()
+    mixer.run()
diff --git a/tests/control_mixer_osc_touch.py b/tests/control_mixer_osc_touch.py
new file mode 100644 (file)
index 0000000..8c709d1
--- /dev/null
@@ -0,0 +1,125 @@
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+from threading import Thread
+
+
+class OSCController(Thread):
+
+    def __init__(self, port):
+        Thread.__init__(self)
+        import liblo
+        self.port = port
+        try:
+            self.server = liblo.Server(self.port)
+        except liblo.ServerError, err:
+            print str(err)
+
+    def add_method(self, path, type, method):
+        self.server.add_method(path, type, method)
+
+    def run(self):
+        while True:
+            self.server.recv(100)
+
+
+class GSTSrcVideo(object):
+
+    def __init__(self, pipe=None, framerate='24/1', width=160, height=90, xpos=0, ypos=0):
+        self.framerate = framerate
+        self.width = width
+        self.height = height
+        self.xpos = xpos
+        self.ypos = ypos
+        if not pipe:
+            pipe = 'videotestsrc pattern="snow"'
+        self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
+                        % (self.framerate, str(self.width), str(self.height))
+
+class GSTMixer(object):
+
+    def __init__(self, osc_port=8338):
+        self.name = 'mixer'
+        self.pipe = ['videomixer name=mixer ! ffmpegcolorspace ! xvimagesink']
+        self.srcs = []
+        self.i= 0
+
+        self.osc_port = osc_port
+        self.osc = OSCController(self.osc_port)
+
+    def osc_callback(self, path, value):
+        paths = path.split('/')
+        sink = paths[1]
+        param = paths[2]
+        for src in self.srcs:
+            if src['sink'] == sink:
+                break
+        src['control'].set(param, 5 * gst.SECOND, value[0])
+
+    def osc_alpha_callback(self, path, value):
+        paths = path.split('/')
+        layer = paths[1]
+        param = paths[2]
+        id = int(param[-1])-1
+        for src in self.srcs:
+            if src['id'] == id:
+                break
+        src['control'].set('alpha', 5 * gst.SECOND, value[0])
+
+    def osc_xy_callback(self, path, value):
+        for src in self.srcs:
+            if src['id'] == 2:
+                break
+        src['control'].set("xpos", 5 * gst.SECOND, int(value[0]*480))
+        src['control'].set("ypos", 5 * gst.SECOND, int(value[1]*270))
+
+    def add_src(self, src):
+        self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
+        self.i += 1
+
+    def setup(self):
+        self.srcs.reverse()
+
+        for src in self.srcs:
+            self.pipe.append(' '.join([src['src'].pipe, '! ' + self.name + '.' + src['sink']]))
+
+        print ' '.join(self.pipe)
+        self.process = gst.parse_launch(' '.join(self.pipe))
+        mixer = self.process.get_by_name("mixer")
+
+        for src in self.srcs:
+            src['pad'] = mixer.get_pad(src['sink'])
+            src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
+
+            src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
+            src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
+
+            src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+            src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
+
+            src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+            src['control'].set("alpha", 5 * gst.SECOND, 1.0)
+
+            self.osc.add_method('/1/fader'+str(src['id']+1), 'f', self.osc_alpha_callback)
+
+        self.osc.add_method('/3/xy', 'ff', self.osc_xy_callback)
+
+    def run(self):
+        self.osc.start()
+        self.process.set_state(gst.STATE_PLAYING)
+        gobject.MainLoop().run()
+
+
+if __name__ == '__main__':
+    src1 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc pattern="black" ')
+    src2 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc ')
+    src3 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=200, ypos=150)
+    src4 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=300, ypos=250)
+    mixer = GSTMixer()
+    mixer.add_src(src1)
+    mixer.add_src(src2)
+    mixer.add_src(src3)
+    mixer.add_src(src4)
+    mixer.setup()
+    mixer.run()
diff --git a/tests/control_mixer_osc_touch_1cam.py b/tests/control_mixer_osc_touch_1cam.py
new file mode 100644 (file)
index 0000000..0bfcaa6
--- /dev/null
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+from threading import Thread
+
+
+class OSCController(Thread):
+
+    def __init__(self, port):
+        Thread.__init__(self)
+        import liblo
+        self.port = port
+        try:
+            self.server = liblo.Server(self.port)
+        except liblo.ServerError, err:
+            print str(err)
+
+    def add_method(self, path, type, method):
+        self.server.add_method(path, type, method)
+
+    def run(self):
+        while True:
+            self.server.recv(100)
+
+
+class GSTSrcVideo(object):
+
+    def __init__(self, pipe=None, framerate='{30/1}', width=160, height=90, xpos=0, ypos=0):
+        self.framerate = framerate
+        self.width = width
+        self.height = height
+        self.xpos = xpos
+        self.ypos = ypos
+        if not pipe:
+            pipe = 'videotestsrc pattern="snow"'
+        self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
+                        % (self.framerate, str(self.width), str(self.height))
+
+
+class GSTWebmHttpStreamer(object):
+
+    def __init__(self, protocol='tcp', port=9000):
+        self.protocol = protocol
+        self.port = port
+
+
+class GSTMixer(object):
+
+    def __init__(self, osc_port=8338):
+        self.name = 'mixer'
+        self.pipe = ['videomixer name=mixer ! queue ! ffmpegcolorspace ! xvimagesink sync=false']
+        self.srcs = []
+        self.i= 0
+
+        self.osc_port = osc_port
+        self.osc = OSCController(self.osc_port)
+
+    def osc_callback(self, path, value):
+        paths = path.split('/')
+        sink = paths[1]
+        param = paths[2]
+        for src in self.srcs:
+            if src['sink'] == sink:
+                break
+        src['control'].set(param, 5 * gst.SECOND, value[0])
+
+    def osc_alpha_callback(self, path, value):
+        paths = path.split('/')
+        layer = paths[1]
+        param = paths[2]
+        id = int(param[-1])-1
+        for src in self.srcs:
+            if src['id'] == id:
+                break
+        src['control'].set('alpha', 5 * gst.SECOND, value[0])
+
+    def osc_xy_callback(self, path, value):
+        for src in self.srcs:
+            if src['id'] == 2:
+                break
+        src['control'].set("xpos", 5 * gst.SECOND, int(value[0]*480))
+        src['control'].set("ypos", 5 * gst.SECOND, int(value[1]*270))
+
+    def add_src(self, src):
+        self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
+        self.i += 1
+
+    def setup(self):
+        self.srcs.reverse()
+
+        for src in self.srcs:
+            self.pipe.append(' '.join([src['src'].pipe, '! queue ! ' + self.name + '.' + src['sink']]))
+
+        print ' '.join(self.pipe)
+        self.process = gst.parse_launch(' '.join(self.pipe))
+        mixer = self.process.get_by_name("mixer")
+
+        for src in self.srcs:
+            src['pad'] = mixer.get_pad(src['sink'])
+            src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
+
+            src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
+            src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
+
+            src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+            src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
+
+            src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+            src['control'].set("alpha", 5 * gst.SECOND, 1.0)
+
+            self.osc.add_method('/1/fader'+str(src['id']+1), 'f', self.osc_alpha_callback)
+
+        self.osc.add_method('/3/xy', 'ff', self.osc_xy_callback)
+
+    def run(self):
+        self.osc.start()
+        self.process.set_state(gst.STATE_PLAYING)
+        gobject.MainLoop().run()
+
+
+if __name__ == '__main__':
+    src1 = GSTSrcVideo(width=800, height=600, pipe='videotestsrc pattern="black"')
+    src2 = GSTSrcVideo(width=800, height=600, pipe='videotestsrc ')
+    src3 = GSTSrcVideo(width=640, height=480, xpos=200, ypos=150, pipe='v4l2src device=/dev/video0')
+    src4 = GSTSrcVideo(width=160, height=90, xpos=300, ypos=250)
+    mixer = GSTMixer()
+    mixer.add_src(src1)
+    mixer.add_src(src2)
+    mixer.add_src(src3)
+    mixer.add_src(src4)
+    mixer.setup()
+    mixer.run()
diff --git a/tests/control_mixer_osc_touch_2cam.py b/tests/control_mixer_osc_touch_2cam.py
new file mode 100644 (file)
index 0000000..56cbaff
--- /dev/null
@@ -0,0 +1,149 @@
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+from threading import Thread
+
+
+class OSCController(Thread):
+
+    def __init__(self, port):
+        Thread.__init__(self)
+        import liblo
+        self.port = port
+        try:
+            self.server = liblo.Server(self.port)
+        except liblo.ServerError, err:
+            print str(err)
+
+    def add_method(self, path, type, method):
+        self.server.add_method(path, type, method)
+
+    def run(self):
+        while True:
+            self.server.recv(100)
+
+
+class GSTSrcVideo(object):
+
+    def __init__(self, pipe=None, framerate='{30/1}', width=160, height=90, xpos=0, ypos=0, queue_option=''):
+        self.framerate = framerate
+        self.width = width
+        self.height = height
+        self.xpos = xpos
+        self.ypos = ypos
+        self.queue_option = queue_option
+
+        if not pipe:
+            pipe = 'videotestsrc pattern="snow"'
+        self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
+                        % (self.framerate, str(self.width), str(self.height))
+
+class GSTWebmHttpEncoder(object):
+
+    def __init__(self, protocol='none', host='127.0.0.1', port=9000):
+        self.protocol = protocol
+        self.port = port
+        self.pipe = '! queue ! vp8enc speed=2 threads=4 quality=10.0 max-latency=25 max-keyframe-distance=30 auto-alt-ref-frames=true  ! queue ! muxout. \
+                    jackaudiosrc connect=2 ! audio/x-raw-float, channels=2 \
+                    ! queue ! audioconvert ! queue ! vorbisenc quality=0.3 ! queue ! muxout.  \
+                    webmmux streamable=true name=muxout \
+                    ! queue ! tcpserversink protocol=%s host=%s port=%s blocksize=65536 sync-method=1' \
+                    % (self.protocol, self.host, self.port)
+
+
+class GSTMixer(object):
+
+    def __init__(self, osc_port=8338, stream_port=9000):
+        self.name = 'mixer'
+        self.pipe = ['videomixer name=mixer ! queue ! ffmpegcolorspace ! xvimagesink sync=false']
+        self.srcs = []
+        self.i= 0
+
+        self.osc_port = osc_port
+        self.stream_port = stream_port
+        self.osc = OSCController(self.osc_port)
+        self.encoder = GSTWebmHttpEncoder(port=self.stream_port)
+
+    def osc_callback(self, path, value):
+        paths = path.split('/')
+        sink = paths[1]
+        param = paths[2]
+        for src in self.srcs:
+            if src['sink'] == sink:
+                break
+        src['control'].set(param, 5 * gst.SECOND, value[0])
+
+    def osc_alpha_callback(self, path, value):
+        paths = path.split('/')
+        layer = paths[1]
+        param = paths[2]
+        id = int(param[-1])-1
+        for src in self.srcs:
+            if src['id'] == id:
+                break
+        src['control'].set('alpha', 5 * gst.SECOND, value[0])
+
+    def osc_xy_callback(self, path, value):
+        for src in self.srcs:
+            if src['id'] == 2:
+                break
+        src['control'].set("xpos", 5 * gst.SECOND, int(value[0]*480))
+        src['control'].set("ypos", 5 * gst.SECOND, int(value[1]*270))
+
+    def add_src(self, src):
+        self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
+        self.i += 1
+
+    def setup(self):
+        self.srcs.reverse()
+
+        for src in self.srcs:
+            queue = 'queue'
+            if src['src'].queue_option:
+                # queue = 'timeoverlay ! queue'
+                queue += ' ' + src['src'].queue_option
+            self.pipe.append(' '.join([src['src'].pipe, '! ' + queue +  ' ! ' + self.name + '.' + src['sink']]))
+
+        pipe += self.encoder.pipe
+
+        print ' '.join(self.pipe)
+        self.process = gst.parse_launch(' '.join(self.pipe))
+        mixer = self.process.get_by_name("mixer")
+
+        for src in self.srcs:
+            src['pad'] = mixer.get_pad(src['sink'])
+            src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
+
+            src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
+            src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
+
+            src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+            src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
+
+            src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+            src['control'].set("alpha", 5 * gst.SECOND, 1.0)
+
+            self.osc.add_method('/1/fader'+str(src['id']+1), 'f', self.osc_alpha_callback)
+
+        self.osc.add_method('/3/xy', 'ff', self.osc_xy_callback)
+
+    def run(self):
+        self.osc.start()
+        self.process.set_state(gst.STATE_PLAYING)
+        gobject.MainLoop().run()
+
+
+if __name__ == '__main__':
+    src1 = GSTSrcVideo(width=640, height=480, pipe='videotestsrc pattern="black"')
+    src4 = GSTSrcVideo(width=640, height=480, pipe='videotestsrc ')
+    src3 = GSTSrcVideo(width=640, height=480, xpos=0, ypos=0, pipe='v4l2src device=/dev/video0 do-timestamp=true', queue_option='leaky=upstream min-threshold-time=10000000000')
+    src3 = GSTSrcVideo(width=640, height=480, xpos=0, ypos=0, pipe='v4l2src device=/dev/video1 do-timestamp=true', queue_option='leaky=upstream min-threshold-time=10000000000')
+#    src2 = GSTSrcVideo(width=640, height=480, xpos=0, ypos=0, pipe='souphttpsrc location=http://192.168.0.15:8080/videofeed do-timestamp=true ! jpegdec ! queue ! ffmpegcolorspace ! videorate')
+    mixer = GSTMixer()
+    mixer.add_src(src1)
+    mixer.add_src(src2)
+    mixer.add_src(src3)
+    mixer.add_src(src4)
+    mixer.setup()
+    mixer.run()
diff --git a/tests/control_mixer_osc_touch_3cams.py b/tests/control_mixer_osc_touch_3cams.py
new file mode 100644 (file)
index 0000000..ab60d18
--- /dev/null
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+from threading import Thread
+
+
+class OSCController(Thread):
+
+    def __init__(self, port):
+        Thread.__init__(self)
+        import liblo
+        self.port = port
+        try:
+            self.server = liblo.Server(self.port)
+        except liblo.ServerError, err:
+            print str(err)
+
+    def add_method(self, path, type, method):
+        self.server.add_method(path, type, method)
+
+    def run(self):
+        while True:
+            self.server.recv(100)
+
+
+class GSTSrcVideo(object):
+
+    def __init__(self, pipe=None, framerate='24/1', width=160, height=90, xpos=0, ypos=0):
+        self.framerate = framerate
+        self.width = width
+        self.height = height
+        self.xpos = xpos
+        self.ypos = ypos
+        if not pipe:
+            pipe = 'videotestsrc pattern="snow"'
+        self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
+                        % (self.framerate, str(self.width), str(self.height))
+
+
+class GSTWebmHttpStreamer(object):
+
+    def __init__(self, protocol='tcp', port=9000):
+        self.protocol = protocol
+        self.port = port
+
+
+class GSTMixer(object):
+
+    def __init__(self, osc_port=8338):
+        self.name = 'mixer'
+        self.pipe = ['videomixer name=mixer ! ffmpegcolorspace ! xvimagesink']
+        self.srcs = []
+        self.i= 0
+
+        self.osc_port = osc_port
+        self.osc = OSCController(self.osc_port)
+
+    def osc_callback(self, path, value):
+        paths = path.split('/')
+        sink = paths[1]
+        param = paths[2]
+        for src in self.srcs:
+            if src['sink'] == sink:
+                break
+        src['control'].set(param, 5 * gst.SECOND, value[0])
+
+    def osc_alpha_callback(self, path, value):
+        paths = path.split('/')
+        layer = paths[1]
+        param = paths[2]
+        id = int(param[-1])-1
+        for src in self.srcs:
+            if src['id'] == id:
+                break
+        src['control'].set('alpha', 5 * gst.SECOND, value[0])
+
+    def osc_xy_callback(self, path, value):
+        for src in self.srcs:
+            if src['id'] == 2:
+                break
+        src['control'].set("xpos", 5 * gst.SECOND, int(value[0]*480))
+        src['control'].set("ypos", 5 * gst.SECOND, int(value[1]*270))
+
+    def add_src(self, src):
+        self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
+        self.i += 1
+
+    def setup(self):
+        self.srcs.reverse()
+
+        for src in self.srcs:
+            self.pipe.append(' '.join([src['src'].pipe, '! ' + self.name + '.' + src['sink']]))
+
+        print ' '.join(self.pipe)
+        self.process = gst.parse_launch(' '.join(self.pipe))
+        mixer = self.process.get_by_name("mixer")
+
+        for src in self.srcs:
+            src['pad'] = mixer.get_pad(src['sink'])
+            src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
+
+            src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
+            src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
+
+            src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+            src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
+
+            src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+            src['control'].set("alpha", 5 * gst.SECOND, 1.0)
+
+            self.osc.add_method('/1/fader'+str(src['id']+1), 'f', self.osc_alpha_callback)
+
+        self.osc.add_method('/3/xy', 'ff', self.osc_xy_callback)
+
+    def run(self):
+        self.osc.start()
+        self.process.set_state(gst.STATE_PLAYING)
+        gobject.MainLoop().run()
+
+
+if __name__ == '__main__':
+    src1 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc pattern="black" ')
+    src2 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc ')
+    src3 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=200, ypos=150)
+    src4 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=300, ypos=250)
+    mixer = GSTMixer()
+    mixer.add_src(src1)
+    mixer.add_src(src2)
+    mixer.add_src(src3)
+    mixer.add_src(src4)
+    mixer.setup()
+    mixer.run()
diff --git a/tests/control_mixer_parallel.py b/tests/control_mixer_parallel.py
new file mode 100644 (file)
index 0000000..379985e
--- /dev/null
@@ -0,0 +1,108 @@
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+from threading import Thread
+
+
+class OSCController(Thread):
+
+    def __init__(self, port):
+        Thread.__init__(self)
+        import liblo
+        self.port = port
+        try:
+            self.server = liblo.Server(self.port)
+        except liblo.ServerError, err:
+            print str(err)
+
+    def add_method(self, path, type, method):
+        self.server.add_method(path, type, method)
+
+    def run(self):
+        while True:
+            self.server.recv(100)
+
+
+class GSTSrcVideo(object):
+
+    def __init__(self, pipe=None, framerate='24/1', width=160, height=90, xpos=0, ypos=0):
+        self.framerate = framerate
+        self.width = width
+        self.height = height
+        self.xpos = xpos
+        self.ypos = ypos
+        if not pipe:
+            pipe = 'videotestsrc pattern="snow"'
+        self.pipe = pipe + ' ! video/x-raw-yuv, framerate=%s, width=%s, height=%s' \
+                        % (self.framerate, str(self.width), str(self.height))
+
+class GSTMixer(object):
+
+    def __init__(self, osc_port=13000):
+        self.name = 'mixer'
+        self.pipe = ['videomixer name=mixer ! ffmpegcolorspace ! xvimagesink']
+        self.srcs = []
+        self.i= 0
+
+        self.osc_port = osc_port
+        self.osc = OSCController(self.osc_port)
+
+    def osc_callback(self, path, value):
+        paths = path.split('/')
+        sink = paths[1]
+        param = paths[2]
+        for src in self.srcs:
+            if src['sink'] == sink:
+                break
+        src['control'].set(param, 5 * gst.SECOND, value[0])
+
+    def add_src(self, src):
+        self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
+        self.i += 1
+
+    def setup(self):
+        self.srcs.reverse()
+
+        for src in self.srcs:
+            self.pipe.append(' '.join([src['src'].pipe, '! ' + self.name + '.' + src['sink']]))
+
+        print ' '.join(self.pipe)
+        self.process = gst.parse_launch(' '.join(self.pipe))
+        mixer = self.process.get_by_name("mixer")
+
+        for src in self.srcs:
+            src['pad'] = mixer.get_pad(src['sink'])
+            src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
+
+            src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
+            src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
+            self.osc.add_method('/'+src['sink']+'/xpos', 'i', self.osc_callback)
+
+            src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+            src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
+            self.osc.add_method('/'+src['sink']+'/ypos', 'i', self.osc_callback)
+
+            src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+            src['control'].set("alpha", 5 * gst.SECOND, 1.0)
+            self.osc.add_method('/'+src['sink']+'/alpha', 'f', self.osc_callback)
+
+
+    def run(self):
+        self.osc.start()
+        self.process.set_state(gst.STATE_PLAYING)
+        gobject.MainLoop().run()
+
+
+if __name__ == '__main__':
+    src1 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc')
+    src2 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=100, ypos=50)
+    src3 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=200, ypos=150)
+    src4 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=300, ypos=250)
+    mixer = GSTMixer()
+    mixer.add_src(src1)
+    mixer.add_src(src2)
+    mixer.add_src(src3)
+    mixer.add_src(src4)
+    mixer.setup()
+    mixer.run()
diff --git a/tests/control_mixer_parallel_no_effects.py b/tests/control_mixer_parallel_no_effects.py
new file mode 100644 (file)
index 0000000..771b229
--- /dev/null
@@ -0,0 +1,35 @@
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+
+p = gst.parse_launch ("""videomixer name=mix0 ! ffmpegcolorspace ! xvimagesink
+      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix0.sink_3
+      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix0.sink_2
+      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix0.sink_1
+      videotestsrc ! video/x-raw-yuv, framerate=24/1, width=640, height=360 ! mix0.sink_0
+""")
+
+m1 = p.get_by_name("mix0")
+
+s1_1 = m1.get_pad("sink_1")
+c1_1 = gst.Controller(s1_1, "xpos", "ypos", "alpha")
+c1_1.set("xpos", 0, 0)
+c1_1.set("ypos", 0, 0)
+c1_1.set("alpha", 0, 1.0)
+
+s1_2 = m1.get_pad("sink_2")
+c1_2 = gst.Controller(s1_2, "xpos", "ypos", "alpha")
+c1_2.set("xpos", 0, 200)
+c1_2.set("ypos", 0, 200)
+c1_2.set("alpha", 0, 1.0)
+
+s1_3 = m1.get_pad("sink_3")
+c1_3 = gst.Controller(s1_3, "xpos", "ypos", "alpha")
+c1_3.set("xpos", 0, 400)
+c1_3.set("ypos", 0, 0)
+c1_3.set("alpha", 0, 1.0)
+
+p.set_state(gst.STATE_PLAYING)
+
+gobject.MainLoop().run()
diff --git a/tests/control_mixer_pipes.py b/tests/control_mixer_pipes.py
new file mode 100644 (file)
index 0000000..d0fcb9e
--- /dev/null
@@ -0,0 +1,61 @@
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+
+p = gst.parse_launch ("""videomixer name=mix0 ! ffmpegcolorspace ! xvimagesink
+      videotestsrc ! video/x-raw-yuv, framerate=24/1, width=640, height=360 ! mix0.sink_0
+      videomixer name=mix1 ! mix0.sink_1
+      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix1.sink_0
+      videomixer name=mix2 ! mix1.sink_1
+      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix2.sink_0
+      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=24/1, width=200, height=150 ! mix2.sink_1
+""")
+
+m1 = p.get_by_name ("mix1")
+s1_0 = m1.get_pad ("sink_0")
+s1_0.set_property ("xpos", 100)
+s1_1 = m1.get_pad ("sink_1")
+s1_1.set_property ("xpos", 250)
+
+m2 = p.get_by_name ("mix2")
+s2_0 = m2.get_pad ("sink_0")
+s2_0.set_property ("xpos", 200)
+s2_1 = m2.get_pad ("sink_1")
+s2_1.set_property ("xpos", 250)
+
+c1_0 = gst.Controller(s1_0, "ypos", "alpha")
+c1_0.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c1_0.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c1_0.set("ypos", 0, 0)
+c1_0.set("ypos", 5 * gst.SECOND, 200)
+c1_0.set("alpha", 0, 0)
+c1_0.set("alpha", 5 * gst.SECOND, 1.0)
+
+c1_1 = gst.Controller(s1_1, "ypos", "alpha")
+c1_1.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c1_1.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c1_1.set("ypos", 0, 0)
+c1_1.set("ypos", 5 * gst.SECOND, 200)
+c1_1.set("alpha", 0, 0)
+c1_1.set("alpha", 5 * gst.SECOND, 1.0)
+
+c2_0 = gst.Controller(s2_0, "ypos", "alpha")
+c2_0.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c2_0.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c2_0.set("ypos", 0, 0)
+c2_0.set("ypos", 5 * gst.SECOND, 200)
+c2_0.set("alpha", 0, 0)
+c2_0.set("alpha", 5 * gst.SECOND, 1.0)
+
+c2_1 = gst.Controller(s2_1, "ypos", "alpha")
+c2_1.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+c2_1.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+c2_1.set("ypos", 0, 0)
+c2_1.set("ypos", 5 * gst.SECOND, 200)
+c2_1.set("alpha", 0, 0)
+c2_1.set("alpha", 5 * gst.SECOND, 1.0)
+
+p.set_state (gst.STATE_PLAYING)
+
+gobject.MainLoop().run()
diff --git a/tests/crc.py b/tests/crc.py
new file mode 100644 (file)
index 0000000..578b526
--- /dev/null
@@ -0,0 +1,53 @@
+# This file is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+# 
+# This file is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+# 
+# You should have received a copy of the GNU General Public License
+# along with This file.  If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+
+import zlib
+
+import gst
+
+def main(path):
+    pipeline = gst.parse_launch('''
+        filesrc location="%s" !
+        decodebin ! audio/x-raw-int !
+        appsink name=sink sync=False''' % path)
+    sink = pipeline.get_by_name('sink')
+
+    pipeline.set_state(gst.STATE_PLAYING)
+    crc = 0
+
+    while True:
+        try:
+            buf = sink.emit('pull-buffer')
+        except SystemError, e:
+            # it's probably a bug that emits triggers a SystemError
+            print 'SystemError', e
+            break
+
+        # should be coming from a CD
+        assert len(buf) % 4 == 0, "buffer is not a multiple of 4 bytes"
+        crc = zlib.crc32(buf, crc)
+
+    crc = crc % 2 ** 32
+    print "CRC: %08X" % crc
+
+
+path = 'test.flac'
+
+try:
+    path = sys.argv[1]
+except IndexError:
+    pass
+
+main(path)
diff --git a/tests/cross-fade.py b/tests/cross-fade.py
new file mode 100644 (file)
index 0000000..fb6a92a
--- /dev/null
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+"""Extends basic demo with a gnl composition"""
+import gobject
+gobject.threads_init()
+from demo import Demo, DemoException
+import gtk
+import gst
+import sys
+import os
+
+def create_decodebin():
+    try:
+        return gst.element_factory_make("decodebin2")
+    except:
+        return gst.element_factory_make("decodebin")
+
+class SimpleCrossfadeDemo(Demo):
+    __name__ = "Demo of crosfade  without using gnonlin"
+    __usage__ = '''python %s sourceA sourceB
+    live crossfading between two sources''' % sys.argv[0]
+    __def_size__ = (320, 420)
+
+    def magic(self, pipeline, sink, args):
+
+        def onPad(obj, pad, target):
+            sinkpad = target.get_compatible_pad(pad, pad.get_caps())
+            if sinkpad:
+                pad.link(sinkpad)
+            return True
+
+        assert len(sys.argv) == 3
+        assert os.path.exists(sys.argv[-1])
+        assert os.path.exists(sys.argv[-2])
+
+        # <excerpt 1>
+        src = gst.element_factory_make("filesrc")
+        src.set_property("location", sys.argv[-1])
+
+        srcAdecode = create_decodebin()
+        srcAconvert = gst.element_factory_make("ffmpegcolorspace")
+        srcAalpha = gst.element_factory_make("alpha")
+        srcAalpha.set_property("alpha", 1.0)
+
+        srcB = gst.element_factory_make("filesrc")
+        srcB.set_property("location", sys.argv[-2])
+        srcBdecode = create_decodebin()
+        srcBconvert = gst.element_factory_make("ffmpegcolorspace")
+        srcBalpha = gst.element_factory_make("alpha")
+        srcBalpha.set_property("alpha", 0.5)
+
+        mixer = gst.element_factory_make("videomixer")
+        mixer.set_property("background", "black")
+        # </excerpt>
+
+        # <excerpt 2>
+        pipeline.add(mixer)
+
+        pipeline.add(src, srcAdecode, srcAconvert, srcAalpha)
+        src.link(srcAdecode)
+        srcAdecode.connect("pad-added", onPad, srcAconvert)
+        srcAconvert.link(srcAalpha)
+        srcAalpha.link(mixer)
+
+        pipeline.add(srcB, srcBdecode, srcBconvert, srcBalpha)
+        srcB.link(srcBdecode)
+        srcBdecode.connect("pad-added", onPad, srcBconvert)
+        srcBconvert.link(srcBalpha)
+        srcBalpha.link(mixer)
+
+        mixer.link(sink)
+
+        # remember the alpha elements
+        self.srcBalpha = srcBalpha
+        # </excerpt>
+
+
+    # overriding from parent
+    def customWidgets(self):
+        """Create a control for each property in the videobalance
+        widget"""
+
+        # <excerpt 3>
+        # to be called a property value needs to change
+        def onValueChanged(widget):
+            if self.srcBalpha:
+                self.srcBalpha.set_property("alpha", widget.get_value())
+        # </excerpt>
+
+        lower = 0
+        upper = 1
+        default = 0.5
+
+        # create a place to hold our controls
+        controls = gtk.VBox()
+        labels = gtk.VBox()
+
+        widget = gtk.HScale(); label = gtk.Label("Crossfade")
+
+        # set appropriate atributes
+        widget.set_update_policy(gtk.UPDATE_CONTINUOUS)
+        widget.set_draw_value(True)
+        widget.set_range(lower, upper)
+        widget.set_value(default)
+
+        # connect to our signal handler, specifying the property
+        # to adjust
+        widget.connect("value-changed", onValueChanged)
+
+        # pack widget into box
+        controls.pack_start(widget, True, True)
+        labels.pack_start(label, True, False)
+
+        layout = gtk.HBox()
+        layout.pack_start(labels, False, False)
+        layout.pack_end(controls, True, True)
+        return layout
+
+if __name__ == '__main__':
+    SimpleCrossfadeDemo().run()
\ No newline at end of file
diff --git a/tests/cross-fade_2.py b/tests/cross-fade_2.py
new file mode 100644 (file)
index 0000000..c1fd340
--- /dev/null
@@ -0,0 +1,23 @@
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+
+p = gst.parse_launch ("""videomixer name=mix ! ffmpegcolorspace ! xvimagesink
+      videotestsrc pattern="snow" ! video/x-raw-yuv, framerate=10/1, width=200, height=150 ! mix.sink_0
+      videotestsrc ! video/x-raw-yuv, framerate=10/1, width=640, height=360 ! mix.sink_1
+""")
+
+m = p.get_by_name ("mix")
+s0 = m.get_pad ("sink_0")
+s0.set_property ("xpos", 100)
+
+control = gst.Controller(s0, "ypos", "alpha")
+control.set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+control.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+control.set("ypos", 0, 0); control.set("ypos", 5 * gst.SECOND, 200)
+control.set("alpha", 0, 0); control.set("alpha", 5 * gst.SECOND, 1.0)
+
+p.set_state (gst.STATE_PLAYING)
+
+gobject.MainLoop().run()
\ No newline at end of file
diff --git a/tests/demo.py b/tests/demo.py
new file mode 100644 (file)
index 0000000..51e95f3
--- /dev/null
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+
+"""Basic Framework for writing GStreamer Demos in Python"""
+#<excerpt 2>
+import gobject
+gobject.threads_init()
+import gst
+#</excerpt>
+import pygtk
+pygtk.require("2.0")
+import gtk
+gtk.gdk.threads_init()
+import sys
+import os
+
+
+class DemoException(Exception):
+    """Base exception class for errors which occur during demos"""
+
+    def __init__(self, reason):
+        self.reason = reason
+
+class Demo:
+    """Base class implementing boring, boiler-plate code.
+    Sets up a basic gstreamer environment which includes:
+
+    * a window containing a drawing area and basic media controls
+    * a basic gstreamer pipeline using an ximagesink
+    * connects the ximagesink to the window's drawing area
+
+    Derived classes need only override magic(), __name__,
+    and __usage__ to create new demos."""
+
+    __name__ = "Basic Demo"
+    __usage__ = "python demo.py -- runs a simple test demo"
+    __def_win_size__ = (320, 240)
+
+    # this commment allows us to include only a portion of the file
+    # in the tutorial for this demo
+    # <excerpt 1>     ...
+
+    def magic(self, pipeline, sink, args):
+        """This is where the magic happens"""
+        src = gst.element_factory_make("videotestsrc", "src")
+        pipeline.add(src)
+        src.link(sink)
+
+
+    def createPipeline(self, w):
+        """Given a window, creates a pipeline and connects it to the window"""
+
+        # code will make the ximagesink output in the specified window
+        def set_xid(window):
+            gtk.gdk.threads_enter()
+            sink.set_xwindow_id(window.window.xid)
+            sink.expose()
+            gtk.gdk.threads_leave()
+
+        # this code receives the messages from the pipeline. if we
+        # need to set X11 id, then we call set_xid
+        def bus_handler(unused_bus, message):
+            if message.type == gst.MESSAGE_ELEMENT:
+                if message.structure.get_name() == 'prepare-xwindow-id':
+                    set_xid(w)
+            return gst.BUS_PASS
+
+        # create our pipeline, and connect our bus_handler
+        self.pipeline = gst.Pipeline()
+        bus = self.pipeline.get_bus()
+        bus.set_sync_handler(bus_handler)
+
+        sink = gst.element_factory_make("ximagesink", "sink")
+        sink.set_property("force-aspect-ratio", True)
+        sink.set_property("handle-expose", True)
+        scale = gst.element_factory_make("videoscale", "scale")
+        cspace = gst.element_factory_make("ffmpegcolorspace", "cspace")
+
+        # our pipeline looks like this: ... ! cspace ! scale ! sink
+        self.pipeline.add(cspace, scale, sink)
+        scale.link(sink)
+        cspace.link(scale)
+        return (self.pipeline, cspace)
+
+    # ... end of excerpt </excerpt>
+
+    # subclasses can override this method to provide custom controls
+    def customWidgets(self):
+        return gtk.HBox()
+
+    def createWindow(self):
+        """Creates a top-level window, sets various boring attributes,
+        creates a place to put the video sink, adds some and finally
+        connects some basic signal handlers. Really, really boring.
+        """
+
+        # create window, set basic attributes
+        w = gtk.Window()
+        w.set_size_request(*self.__def_win_size__)
+        w.set_title("Gstreamer " + self.__name__)
+        w.connect("destroy", gtk.main_quit)
+
+        # declare buttons and their associated handlers
+        controls = (
+            ("play_button", gtk.ToolButton(gtk.STOCK_MEDIA_PLAY), self.onPlay),
+            ("stop_button", gtk.ToolButton(gtk.STOCK_MEDIA_STOP), self.onStop),
+            ("quit_button", gtk.ToolButton(gtk.STOCK_QUIT), gtk.main_quit)
+        )
+
+        # as well as the container in which to put them
+        box = gtk.HButtonBox()
+
+        # for every widget, connect to its clicked signal and add it
+        # to the enclosing box
+        for name, widget, handler in controls:
+            widget.connect("clicked", handler)
+            box.pack_start(widget, True)
+            setattr(self, name, widget)
+
+        viewer = gtk.DrawingArea()
+        viewer.modify_bg(gtk.STATE_NORMAL, viewer.style.black)
+
+        # we will need this later
+        self.xid = None
+
+        # now finally do the top-level layout for the window
+        layout = gtk.VBox(False)
+        layout.pack_start(viewer)
+
+        # subclasses can override childWidgets() to supply
+        # custom controls
+        layout.pack_start(self.customWidgets(), False, False)
+        layout.pack_end(box, False, False)
+        w.add(layout)
+        w.show_all()
+
+        # we want to return only the portion of the window which will
+        # be used to display the video, not the whole top-level
+        # window. a DrawingArea widget is, in fact, an X11 window.
+        return viewer
+
+    def onPlay(self, unused_button):
+        self.pipeline.set_state(gst.STATE_PLAYING)
+
+    def onStop(self, unused_button):
+        self.pipeline.set_state(gst.STATE_READY)
+
+    def run(self):
+        w = self.createWindow()
+        p, s = self.createPipeline(w)
+        try:
+            self.magic(p, s, sys.argv[1:])
+            gtk.main()
+        except DemoException, e:
+            print e.reason
+            print self.__usage__
+            sys.exit(-1)
+
+# if this file is being run directly, create the demo and run it
+if __name__ == '__main__':
+    Demo().run()
\ No newline at end of file
diff --git a/tests/ffmpeg_dv_jack_fifo_shout b/tests/ffmpeg_dv_jack_fifo_shout
new file mode 100755 (executable)
index 0000000..07075a1
--- /dev/null
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+set -e
+
+case "$1" in
+ start)
+   fifo=/tmp/video_fifo
+   if [ ! -e $fifo ]; then
+   mkfifo $fifo
+   fi
+
+   dir=/mnt/data1/video_tests
+   now=`date -R`
+
+   dvgrab -buffers 1 - | ffmpeg -f dv -i - -f jack -i ffmpeg -vcodec libtheora -s 480x320 -aspect 16:9 -acodec libvorbis -b 300k -f ogg -y $fifo -map 0.0 -map 1.0 &
+
+   sleep 5
+   jack_connect jack_rack:out_1 ffmpeg:input_1
+   jack_connect jack_rack:out_2 ffmpeg:input_2
+   #jack_connect jack_rack:out_1 ffmpeg:input_1
+   #jack_connect jack_rack:out_2 ffmpeg:input_2
+
+   sleep 1
+   cat $fifo | tee "$dir/Pre-Barreau_-_Augustins_-_Video_Live_-_$now.ogg" | oggfwd -d "pb_video_live" -g "Teaching"  -n "pb_video_live" localhost 8000 source2parisson /pb_video_live.ogg &
+   ;;
+ stop)
+   jack_disconnect jack_rack:out_1 ffmpeg:input_1
+   jack_disconnect jack_rack:out_2 ffmpeg:input_2
+   pkill ffmpeg
+   ;;
+esac
+
+
+
+
+
diff --git a/tests/ffmpeg_usb_jack_fifo_shout b/tests/ffmpeg_usb_jack_fifo_shout
new file mode 100755 (executable)
index 0000000..47fe2ed
--- /dev/null
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+    set -e
+
+case "$1" in
+ start)
+   fifo=/tmp/video_fifo2
+   if [ ! -e $fifo ]; then
+   mkfifo $fifo
+   fi
+
+   dir=$HOME/archives/2011
+   now=`date -R`
+   file=$dir/video_test2.ogg
+
+   ffmpeg -f video4linux2 -i /dev/video0 -f jack -i ffmpeg -itsoffset 00:00:00.8 -r 20 -f ogg -vcodec libtheora -s 320x240 -b 380k -acodec libvorbis -ab 64k -ar 44100 -ac 1 -y $file -map 0.0 -map 1.0 &
+
+   sleep 3
+   jack_connect jack_rack:out_1 ffmpeg:input_1
+   #jack_connect jack_rack:out_1 ffmpeg:input_2
+
+   sleep 2
+   cat $file | tee $file.ogg | oggfwd -d "TeleCaster Live Video Services" -g "Vocal"  -n "TeleCaster Live Video" localhost 8000 source2parisson /telecaster_live_video.ogg &
+   ;;
+ stop)
+   jack_disconnect jack_rack:out_1 ffmpeg:input_1
+#   jack_disconnect jack_rack:out_1 ffmpeg:input_2
+   pkill -9 ffmpeg
+   ;;
+esac
+
+
+
+
+
+# http://www.kkoncepts.net/node/69
\ No newline at end of file
diff --git a/tests/gst_dv_jack_shout b/tests/gst_dv_jack_shout
new file mode 100755 (executable)
index 0000000..ef59dfc
--- /dev/null
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+gst-launch-0.10 dv1394src ! queue ! dvdemux name=d ! queue ! dvdec  \
+       ! queue ! videoscale ! video/x-raw-yuv, width=480, height=368 \
+       ! queue ! ffmpegcolorspace ! theoraenc bitrate=500 ! muxout. \
+        oggmux name=muxout \
+       ! queue ! shout2send mount=/telecaster_live_video.ogg port=8000 password=source2parisson ip=127.0.0.1 \
diff --git a/tests/gst_osc_multi.py b/tests/gst_osc_multi.py
new file mode 100755 (executable)
index 0000000..d47e17f
--- /dev/null
@@ -0,0 +1,175 @@
+#!/usr/bin/python
+import gobject; gobject.threads_init()
+import pygst; pygst.require("0.10")
+import gst
+from threading import Thread
+
+
+class OSCController(Thread):
+
+    def __init__(self, port):
+        Thread.__init__(self)
+        import liblo
+        self.port = port
+        try:
+            self.server = liblo.Server(self.port)
+        except liblo.ServerError, err:
+            print str(err)
+
+    def add_method(self, path, type, method):
+        self.server.add_method(path, type, method)
+
+    def run(self):
+        while True:
+            self.server.recv(100)
+
+
+class GSTSrcVideo(object):
+
+    def __init__(self, pipe=None, mime_type='video/x-raw-yuv', framerate='24/1',
+                 width=160, height=90, xpos=0, ypos=0):
+        self.mime_type = mime_type
+        self.framerate = framerate
+        self.width = width
+        self.height = height
+        self.xpos = xpos
+        self.ypos = ypos
+        if not pipe:
+            pipe = 'videotestsrc pattern="snow"'
+        self.pipe = pipe + ' ! %s, framerate=%s, width=%s, height=%s' \
+                        % (self.mime_type, self.framerate, str(self.width), str(self.height))
+
+
+class GSTSrcAudio(object):
+
+    def __init__(self, pipe=None, mime_type='audio/x-raw-float', channels=2):
+        self.mime_type = mime_type
+        self.channels = channels
+        if not pipe:
+            pipe = 'jackaudiosrc connect=2'
+        self.pipe = pipe + ' ! %s, channels=%s' % (self.mime_type, str(self.channels))
+
+
+class V4lControl(object):
+
+    def __init__(self, device=0):
+        self.program = 'v4l2-ctl'
+        self.device = device
+
+    def execute(self, args):
+        command = ' '.join([self.program, '-d', self.device, '-c', args])
+        os.system(command)
+
+    def power_line_frequency(self, value):
+          arg = 'power_line_frequency=' + value
+          self.execute(arg)
+
+
+class GSTWebmStreamer(object):
+
+    def __init__(self, host='127.0.0.1', port=9000, blocksize=65536):
+        self.host = host
+        self.port = port
+        self.blocksize = blocksize
+        self.muxer = """webmmux streamable=true name=muxer \
+                    ! queue ! tcpserversink host=%s port=%s protocol=none blocksize=%s sync-method=1
+                    """ % (self.host, str(self.port), str(self.blocksize))
+
+    def video_setup(self, threads=4, quality=10):
+        self.video = """! queue ! ffmpegcolorspace ! queue ! vp8enc speed=2 threads=%s quality=%s \
+                        max-latency=25 max-keyframe-distance=96 auto-alt-ref-frames=true  \
+                        ! queue ! muxer.""" % (str(threads), str(quality))
+
+    def audio_setup(self, quality=0.3):
+        self.audio = "! queue ! audioconvert ! queue ! vorbisenc quality=%s ! queue ! muxer." % str(self.quality)
+
+    @property
+    def pipe(self):
+        return ' '.join([self.video, self.audio, self.muxer])
+
+
+class GSTMixer(object):
+
+    def __init__(self, osc_port=8338):
+        self.name = 'mixer'
+        self.pipe = ['videomixer name=mixer ! ffmpegcolorspace ! xvimagesink']
+        self.srcs = []
+        self.i= 0
+        self.osc_port = osc_port
+        self.osc = OSCController(self.osc_port)
+
+    def osc_callback(self, path, value):
+        paths = path.split('/')
+        sink = paths[1]
+        param = paths[2]
+        for src in self.srcs:
+            if src['sink'] == sink:
+                break
+        src['control'].set(param, 5 * gst.SECOND, value[0])
+
+    def osc_alpha_callback(self, path, value):
+        paths = path.split('/')
+        layer = paths[1]
+        param = paths[2]
+        id = int(param[-1])-1
+        for src in self.srcs:
+            if src['id'] == id:
+                break
+        src['control'].set('alpha', 5 * gst.SECOND, value[0])
+
+    def osc_xy_callback(self, path, value):
+        for src in self.srcs:
+            if src['id'] == 2:
+                break
+        src['control'].set("xpos", 5 * gst.SECOND, int(value[0]*480))
+        src['control'].set("ypos", 5 * gst.SECOND, int(value[1]*270))
+
+    def add_src(self, src):
+        self.srcs.append({'id': self.i, 'src': src, 'sink': 'sink_' + str(self.i)})
+        self.i += 1
+
+    def setup(self):
+        self.srcs.reverse()
+
+        for src in self.srcs:
+            self.pipe.append(' '.join([src['src'].pipe, '! ' + self.name + '.' + src['sink']]))
+
+        print ' '.join(self.pipe)
+        self.process = gst.parse_launch(' '.join(self.pipe))
+        mixer = self.process.get_by_name("mixer")
+
+        for src in self.srcs:
+            src['pad'] = mixer.get_pad(src['sink'])
+            src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")
+
+            src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
+            src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
+
+            src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
+            src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
+
+            src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
+            src['control'].set("alpha", 5 * gst.SECOND, 1.0)
+
+            self.osc.add_method('/1/fader'+str(src['id']+1), 'f', self.osc_alpha_callback)
+
+        self.osc.add_method('/3/xy', 'ff', self.osc_xy_callback)
+
+    def run(self):
+        self.osc.start()
+        self.process.set_state(gst.STATE_PLAYING)
+        gobject.MainLoop().run()
+
+
+if __name__ == '__main__':
+    src1 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc pattern="black" ')
+    src2 = GSTSrcVideo(width=640, height=360, framerate='24/1', pipe='videotestsrc ')
+    src3 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=200, ypos=150)
+    src4 = GSTSrcVideo(width=160, height=90, framerate='24/1', xpos=300, ypos=250)
+    mixer = GSTMixer()
+    mixer.add_src(src1)
+    mixer.add_src(src2)
+    mixer.add_src(src3)
+    mixer.add_src(src4)
+    mixer.setup()
+    mixer.run()
diff --git a/tests/gst_video_double_shout b/tests/gst_video_double_shout
new file mode 100755 (executable)
index 0000000..246d01e
--- /dev/null
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+gst-launch v4l2src device=/dev/video0 ! videoscale ! video/x-raw-yuv, width=160, height=120 ! videomixer name=mix sink_1::xpos=20 sink_1::ypos=20 sink_1::alpha=0.9  ! queue ! ffmpegcolorspace ! theoraenc quality=40 ! muxout. jackaudiosrc connect=1 ! audioconvert ! audio/x-raw-int,rate=44100,channels=1,width=16 ! queue ! audioconvert ! vorbisenc ! queue ! muxout. oggmux name=muxout ! tee name=t ! queue ! filesink location="video_test.ogg" t. ! queue ! shout2send mount=/telecaster_live_video.ogg port=8000 password=source2parisson ip=127.0.0.1 v4l2src device=/dev/video1 ! videoscale ! video/x-raw-yuv, width=480, height=270 ! mix.
diff --git a/tests/gst_video_double_shout2.sh b/tests/gst_video_double_shout2.sh
new file mode 100755 (executable)
index 0000000..cd25b51
--- /dev/null
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=480
+HEIGHT=270
+
+gst-launch v4l2src device=/dev/video0 ! queue ! videoscale ! video/x-raw-yuv, width=160, height=120 \
+       ! queue ! videorate ! video/x-raw-yuv,framerate=25/1 \
+       ! queue ! videomixer name=mix sink_1::xpos=0 sink_1::ypos=0 sink_1::alpha=0.9 \
+       ! queue ! ffmpegcolorspace ! queue ! theoraenc quality=25 ! muxout. \
+       jackaudiosrc connect=1 ! audioconvert ! audio/x-raw-int,rate=44100,channels=1,width=16 \
+       ! queue ! audioconvert ! vorbisenc ! queue ! muxout.  \
+       oggmux name=muxout ! queue ! shout2send mount=/telecaster_live_video.ogg port=8000 password=source2parisson ip=127.0.0.1 \
+       v4l2src device=/dev/video1 ! queue ! videoscale ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT \
+       ! queue ! videorate ! video/x-raw-yuv,framerate=25/1 ! mix. \
+       > /dev/null &
+               
+sleep 2
+
+jack_disconnect system:capture_1 gst-launch-0.10:in_jackaudiosrc0_1
+jack_connect   jack_rack:out_1  gst-launch-0.10:in_jackaudiosrc0_1
+
diff --git a/tests/gst_video_jack_fifo_shout b/tests/gst_video_jack_fifo_shout
new file mode 100755 (executable)
index 0000000..1c19e27
--- /dev/null
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+set -e
+
+case "$1" in
+ start)
+   dir=/home/$USER/trash
+   dat=`date '+%y-%m-%d-%H:%M:%S'`
+   file=$dir/video_test_$dat.ogg
+
+   gst-launch-0.10 v4l2src device=/dev/video0 ! queue ! videorate ! video/x-raw-yuv,width=320 ! queue  ! theoraenc quality=60 ! queue ! muxout. jackaudiosrc connect=1 ! audioconvert ! audio/x-raw-int,rate=44100,channels=1,width=16 ! queue ! audioconvert ! vorbisenc ! queue ! muxout. oggmux name=muxout ! filesink location=$file sync=true &
+   
+#    gst-launch v4l2src ! queue ! videorate ! video/x-raw-yuv,fps=30,width=320 ! queue  ! theoraenc quality=60 ! queue ! muxout. jackaudiosrc connect=1 ! audioconvert ! audio/x-raw-int,rate=44100,channels=1,width=16 ! queue ! audioconvert ! vorbisenc ! queue ! muxout. oggmux name=muxout ! shout2send mount=/telecaster_live_video.ogg port=8000 password=source2parisson ip=127.0.0.1 &
+   
+#    
+    sleep 10
+    cat $file | oggfwd -d "TeleCaster Live Video Services" -g "Vocal"  -n "TeleCaster Live Video" localhost 8000 source2parisson /telecaster_live_video.ogg &
+   ;;
+ stop)
+   pkill -9 oggfwd
+   pkill -9 gst-launch-0.10
+   ;;
+esac
diff --git a/tests/gst_video_jack_shout b/tests/gst_video_jack_shout
new file mode 100755 (executable)
index 0000000..f5d25ba
--- /dev/null
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+set -e
+
+case "$1" in
+ start)
+   dir=/home/$USER/trash
+   dat=`date '+%y-%m-%d-%H:%M:%S'`
+   file=$dir/video_test_$dat.ogg
+
+#   gst-launch-0.10 v4l2src device=/dev/video0 ! queue ! videorate ! video/x-raw-yuv,width=320 ! queue  ! theoraenc quality=60 ! queue ! muxout. jackaudiosrc connect=1 ! audioconvert ! audio/x-raw-int,rate=44100,channels=1,width=16 ! queue ! audioconvert ! vorbisenc ! queue ! muxout. oggmux name=muxout ! filesink location=$file sync=true &
+   
+    gst-launch-0.10 v4l2src ! queue ! videorate ! video/x-raw-yuv,fps=30,width=320 ! queue  ! theoraenc quality=60 ! queue ! muxout. jackaudiosrc connect=1 ! audioconvert ! audio/x-raw-int,rate=44100,channels=1,width=16 ! queue ! audioconvert ! vorbisenc ! queue ! muxout. oggmux name=muxout ! shout2send mount=/telecaster_live_video.ogg port=8000 password=source2parisson ip=127.0.0.1 &
+   
+#    
+#    sleep 10
+#    cat $file | oggfwd -d "TeleCaster Live Video Services" -g "Vocal"  -n "TeleCaster Live Video" localhost 8000 source2parisson /telecaster_live_video.ogg &
+   ;;
+ stop)
+   pkill -9 oggfwd
+   pkill -9 gst-launch-0.10
+   ;;
+esac
diff --git a/tests/gst_video_simple_ogg b/tests/gst_video_simple_ogg
new file mode 100755 (executable)
index 0000000..af63af4
--- /dev/null
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv,width=640, height=480 \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! theoraenc quality=20 ! mux. \
+       alsasrc device=hw:0,0 ! audio/x-raw-int,rate=44100,channels=2,depth=16 \
+       !  queue ! audioconvert ! audio/x-raw-float,rate=44100,channels=2,depth=16 ! queue ! vorbisenc ! mux. \
+       oggmux name=mux ! filesink location=/var/www/test/test.ogg \
+
+# ! queue ! videorate ! video/x-raw-yuv, framerate=25/1 \
+#jackaudiosrc connect=1 ! queue ! audioconvert ! audio/x-raw-int,rate=44100,channels=1,width=16
\ No newline at end of file
diff --git a/tests/gst_video_simple_ogg_jack b/tests/gst_video_simple_ogg_jack
new file mode 100755 (executable)
index 0000000..69d2091
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv,width=640, height=480 \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! theoraenc quality=25 ! mux. \
+       jackaudiosrc connect=1 ! queue ! audioconvert ! queue ! vorbisenc ! mux. \
+       oggmux name=mux ! filesink location=/var/www/test/test.ogg \
+
+# ! queue ! videorate ! video/x-raw-yuv, framerate=25/1 \
diff --git a/tests/gst_video_triple_shout b/tests/gst_video_triple_shout
new file mode 100755 (executable)
index 0000000..d6aeec8
--- /dev/null
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+gst-launch v4l2src device=/dev/video0 ! queue ! videoscale ! video/x-raw-yuv, width=160, height=120 \
+       ! queue ! ffmpegcolorspace ! video/x-raw-rgb \
+       ! queue ! videomixer name=mix \
+               sink_1::xpos=40 sink_1::ypos=60 sink_1::alpha=0.9 sink_2::xpos=40 sink_2::ypos=180 sink_2::alpha=0.9 \
+       ! queue ! videoscale ! video/x-raw-rgb, width=480, height=270 \
+       ! queue ! ffmpegcolorspace ! theoraenc quality=10 ! oggmux name=muxout \
+       ! queue ! shout2send mount=/telecaster_live_video.ogg port=8000 password=source2parisson ip=127.0.0.1 \
+       ximagesrc ! queue ! videorate ! video/x-raw-rgb, framerate=30/1 ! videoscale ! video/x-raw-rgb, width=160, height=120 ! mix. \
+    v4l2src device=/dev/video1 ! queue ! ffmpegcolorspace ! video/x-raw-rgb ! videoscale ! video/x-raw-rgb, width=640, height=360 ! mix. \
\ No newline at end of file
diff --git a/tests/gtk_sink_pad.py b/tests/gtk_sink_pad.py
new file mode 100644 (file)
index 0000000..0b0c53f
--- /dev/null
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+
+import sys, os
+import pygtk, gtk, gobject
+import pygst
+pygst.require("0.10")
+import gst
+
+class GTK_Main:
+
+       def __init__(self):
+               window = gtk.Window(gtk.WINDOW_TOPLEVEL)
+               window.set_title("Vorbis-Player")
+               window.set_default_size(500, 200)
+               window.connect("destroy", gtk.main_quit, "WM destroy")
+               vbox = gtk.VBox()
+               window.add(vbox)
+               self.entry = gtk.Entry()
+               vbox.pack_start(self.entry, False)
+               self.button = gtk.Button("Start")
+               vbox.add(self.button)
+               self.button.connect("clicked", self.start_stop)
+               window.show_all()
+
+               self.player = gst.Pipeline("player")
+               source = gst.element_factory_make("filesrc", "file-source")
+               demuxer = gst.element_factory_make("oggdemux", "demuxer")
+               demuxer.connect("pad-added", self.demuxer_callback)
+               self.audio_decoder = gst.element_factory_make("vorbisdec", "vorbis-decoder")
+               audioconv = gst.element_factory_make("audioconvert", "converter")
+               audiosink = gst.element_factory_make("autoaudiosink", "audio-output")
+
+               self.player.add(source, demuxer, self.audio_decoder, audioconv, audiosink)
+               gst.element_link_many(source, demuxer)
+               gst.element_link_many(self.audio_decoder, audioconv, audiosink)
+
+               bus = self.player.get_bus()
+               bus.add_signal_watch()
+               bus.connect("message", self.on_message)
+
+       def start_stop(self, w):
+               if self.button.get_label() == "Start":
+                       filepath = self.entry.get_text()
+                       if os.path.isfile(filepath):
+                               self.button.set_label("Stop")
+                               self.player.get_by_name("file-source").set_property("location", filepath)
+                               self.player.set_state(gst.STATE_PLAYING)
+               else:
+                       self.player.set_state(gst.STATE_NULL)
+                       self.button.set_label("Start")
+
+       def on_message(self, bus, message):
+               t = message.type
+               if t == gst.MESSAGE_EOS:
+                       self.player.set_state(gst.STATE_NULL)
+                       self.button.set_label("Start")
+               elif t == gst.MESSAGE_ERROR:
+                       err, debug = message.parse_error()
+                       print "Error: %s" % err, debug
+                       self.player.set_state(gst.STATE_NULL)
+                       self.button.set_label("Start")
+
+       def demuxer_callback(self, demuxer, pad):
+               adec_pad = self.audio_decoder.get_pad("sink")
+               pad.link(adec_pad)
+
+GTK_Main()
+gtk.gdk.threads_init()
+gtk.main()
\ No newline at end of file
diff --git a/tests/osc_play.py b/tests/osc_play.py
new file mode 100644 (file)
index 0000000..76e4922
--- /dev/null
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import liblo, sys
+
+# send all messages to port 1234 on the local machine
+try:
+    target = liblo.Address(12345)
+except liblo.AddressError, err:
+    print str(err)
+    sys.exit()
+
+# send message "/foo/message1" with int, float and string arguments
+liblo.send(target, "/play", 1)
diff --git a/tests/osc_record_start.py b/tests/osc_record_start.py
new file mode 100755 (executable)
index 0000000..12356a4
--- /dev/null
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import liblo, sys
+
+port = int(sys.argv[-1])
+
+# send all messages to port 1234 on the local machine
+try:
+    target = liblo.Address(port)
+except liblo.AddressError, err:
+    print str(err)
+    sys.exit()
+
+# send message "/foo/message1" with int, float and string arguments
+liblo.send(target, "/record", 1)
diff --git a/tests/osc_record_stop.py b/tests/osc_record_stop.py
new file mode 100755 (executable)
index 0000000..81da1a1
--- /dev/null
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import liblo, sys
+
+port = int(sys.argv[-1])
+
+# send all messages to port 1234 on the local machine
+try:
+    target = liblo.Address(port)
+except liblo.AddressError, err:
+    print str(err)
+    sys.exit()
+
+# send message "/foo/message1" with int, float and string arguments
+liblo.send(target, "/record", 0)
diff --git a/tests/osc_stop.py b/tests/osc_stop.py
new file mode 100644 (file)
index 0000000..cce3314
--- /dev/null
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import liblo, sys
+
+# send all messages to port 1234 on the local machine
+try:
+    target = liblo.Address(12345)
+except liblo.AddressError, err:
+    print str(err)
+    sys.exit()
+
+# send message "/foo/message1" with int, float and string arguments
+liblo.send(target, "/play", 0)
diff --git a/tests/osc_test.py b/tests/osc_test.py
new file mode 100644 (file)
index 0000000..0f4337e
--- /dev/null
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import liblo, sys
+
+# send all messages to port 1234 on the local machine
+try:
+    target = liblo.Address(13000)
+except liblo.AddressError, err:
+    print str(err)
+    sys.exit()
+
+# send message "/foo/message1" with int, float and string arguments
+liblo.send(target, "/sink_2/xpos", 200)
diff --git a/tests/playbin.py b/tests/playbin.py
new file mode 100644 (file)
index 0000000..32051e1
--- /dev/null
@@ -0,0 +1,17 @@
+import pygst 
+pygst.require("0.10") 
+import gst 
+
+#pipeline = gst.Pipeline() 
+playbin = gst.element_factory_make("playbin2", 'player') 
+#sink = gst.element_factory_make("autoaudiosink", None) 
+
+playbin.set_property("uri", "/home/momo/music_local/test/sweep.wav")
+#playbin.set_property("uri", "/home/momo/video_local/webm/ocean-clip.webm")
+#playbin.set_property("audio-sink", sink)
+
+#pipeline.add(playbin) 
+
+import time 
+playbin.set_state(gst.STATE_PLAYING) 
+time.sleep(200) 
diff --git a/tests/radiodelay.py b/tests/radiodelay.py
new file mode 100644 (file)
index 0000000..39fc21c
--- /dev/null
@@ -0,0 +1,44 @@
+#!/usr/bin/python
+
+import pygst
+pygst.require("0.10")
+import gst
+import pygtk
+import gtk
+import sys
+
+class Main:
+    def __init__(self):
+       #this just reads the command line args
+       try:
+               DELAY = float(sys.argv[1])
+               DELAY = long(DELAY * 1000000000)
+               print DELAY 
+       except IndexError:
+               DELAY = 0
+
+        self.delay_pipeline = gst.Pipeline("mypipeline")
+       #ALSA
+       self.audiosrc = gst.element_factory_make("alsasrc", "audio")
+       self.audiosrc.set_property("device","default")
+        self.delay_pipeline.add(self.audiosrc)
+       #Queue
+       self.audioqueue = gst.element_factory_make("queue","queue1")
+       self.audioqueue.set_property("max-size-time",0)
+       self.audioqueue.set_property("max-size-buffers",0)
+       self.audioqueue.set_property("max-size-bytes",0)
+       self.audioqueue.set_property("min-threshold-time",DELAY)
+       self.audioqueue.set_property("leaky","no")
+       self.delay_pipeline.add(self.audioqueue)
+       #Audio Output
+        self.sink = gst.element_factory_make("autoaudiosink", "sink")
+        self.delay_pipeline.add(self.sink)
+       #Link the elements
+        self.audiosrc.link(self.audioqueue)
+       self.audioqueue.link(self.sink)
+       #Begin Playing
+        self.delay_pipeline.set_state(gst.STATE_PLAYING)
+
+start=Main()
+gtk.main()
+
diff --git a/tests/rtpx264.sh b/tests/rtpx264.sh
new file mode 100755 (executable)
index 0000000..e3ca8cf
--- /dev/null
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+gst-launch -v  gstrtpbin name=rtpbin \
+ v4l2src ! video/x-raw-yuv,width=640,height=480 \
+ ! queue ! x264enc byte-stream=true bitrate=500 bframes=4 ref=4 me=hex subme=4 weightb=true threads=4 ! rtph264pay \
+ ! rtpbin.send_rtp_sink_0 \
+ rtpbin.send_rtp_src_0 ! udpsink port=5000 host=127.0.0.1 \
+ rtpbin.send_rtcp_src_0 ! udpsink port=5001 host=127.0.0.1 sync=false async=false  \
+ udpsrc port=5002 ! rtpbin.recv_rtcp_sink_0 
\ No newline at end of file
diff --git a/tests/rtpx264_2.sh b/tests/rtpx264_2.sh
new file mode 100755 (executable)
index 0000000..86ae994
--- /dev/null
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+gst-launch -v  gstrtpbin name=rtpbin \
+ v4l2src \
+ ! queue ! videoscale method=1 ! video/x-raw-yuv,width=640,height=360 \
+ ! queue ! x264enc byte-stream=true bitrate=1000 bframes=4 ref=4 me=hex subme=4 weightb=true threads=4 ! rtph264pay \
+ ! rtpbin.send_rtp_sink_0 \
+ rtpbin.send_rtp_src_0 ! udpsink port=5000 host=127.0.0.1 \
+ rtpbin.send_rtcp_src_0 ! udpsink port=5001 host=127.0.0.1 sync=false async=false  \
+ udpsrc port=5002 ! rtpbin.recv_rtcp_sink_0 > /dev/null &
\ No newline at end of file
diff --git a/tests/rtpx264_pl.sh b/tests/rtpx264_pl.sh
new file mode 100755 (executable)
index 0000000..c4445cd
--- /dev/null
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+gst-launch -v gstrtpbin name=rtpbin latency=200 \
+ udpsrc caps="application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96" port=5000 \
+ ! rtpbin.recv_rtp_sink_0 \
+ rtpbin. ! rtph264depay ! queue ! tee name=t ! ffdec_h264 ! xvimagesink \
+ t. ! queue ! filesink location=/tmp/video.mp4 \
+ udpsrc port=5001 ! rtpbin.recv_rtcp_sink_0 \
+ rtpbin.send_rtcp_src_0 ! udpsink port=5002 host=127.0.0.1 sync=false async=false \
diff --git a/tests/rtpx264_pl_fs.sh b/tests/rtpx264_pl_fs.sh
new file mode 100755 (executable)
index 0000000..308b176
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+gst-launch -v gstrtpbin name=rtpbin latency=200 \
+ udpsrc caps="application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264, payload=(int)96" port=5000 \
+ ! rtpbin.recv_rtp_sink_0 \
+ rtpbin. ! rtph264depay ! tee name=t ! ffdec_h264 ! xvimagesink \
+ udpsrc port=5001 ! rtpbin.recv_rtcp_sink_0 \
+ rtpbin.send_rtcp_src_0 ! udpsink port=5002 host=127.0.0.1 sync=false async=false \
+ t. ! filesink location=/tmp/video.mp4 
diff --git a/tests/simple-effect-gtk.py b/tests/simple-effect-gtk.py
new file mode 100644 (file)
index 0000000..19824f7
--- /dev/null
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+"""Extends basic demo with a gnl composition"""
+
+from demo import Demo, DemoException
+import gtk
+import gst
+import sys
+import os
+
+def create_decodebin():
+    try:
+        return gst.element_factory_make("decodebin2")
+    except:
+        return gst.element_factory_make("decodebin")
+
+class SimpleEffectDemo(Demo):
+    __name__ = "Basic GStreamer Effect Demo"
+    __usage__ = '''python %s file
+    display file with a color_balance effect''' % sys.argv[0]
+    __def_win_size__ = (320, 500)
+    # <excerpt 1>
+    def magic(self, pipeline, sink, args):
+
+        def onPad(obj, pad, target):
+            sinkpad = target.get_compatible_pad(pad, pad.get_caps())
+            pad.link(sinkpad)
+            return True
+
+        assert os.path.exists(sys.argv[-1])
+
+        # create the following pipeline
+        # filesrc location = sys.argv[1] ! decodebin ! videobalance ! ...
+        src = gst.element_factory_make("filesrc")
+        src.set_property("location", sys.argv[-1])
+        decode = create_decodebin()
+
+        self.balance = gst.element_factory_make("videobalance")
+
+        pipeline.add(src, decode, self.balance)
+        src.link(decode)
+        decode.connect("pad-added", onPad, self.balance)
+        self.balance.link(sink)
+
+        return
+    # </excerpt>
+
+    # <excerpt 2>
+    # overriding from parent
+    def customWidgets(self):
+        """Create a control for each property in the videobalance
+        widget"""
+
+        # to be called a property value needs to change
+        def onValueChanged(widget, prop):
+            # set the corresponding property of the videobalance element
+            self.balance.set_property(prop, widget.get_value())
+
+        # videobalance has several properties, with the following range
+        # and defaults
+        properties = [("contrast", 0, 2, 1),
+                      ("brightness", -1, 1, 0),
+                      ("hue", -1, 1, 0),
+                      ("saturation", 0, 2, 1)]
+
+        # create a place to hold our controls
+        controls = gtk.VBox()
+        labels = gtk.VBox()
+        # for every propety, create a control and set its attributes
+        for prop, lower, upper, default in properties:
+            widget = gtk.HScale(); label = gtk.Label(prop)
+
+            # set appropriate atributes
+            widget.set_update_policy(gtk.UPDATE_CONTINUOUS)
+            widget.set_value(default)
+            widget.set_draw_value(True)
+            widget.set_range(lower, upper)
+
+            # connect to our signal handler, specifying the property
+            # to adjust
+            widget.connect("value-changed", onValueChanged, prop)
+
+            # pack widget into box
+            controls.pack_start(widget, True, True)
+            labels.pack_start(label, True, False)
+
+        layout = gtk.HBox()
+        layout.pack_start(labels, False, False)
+        layout.pack_end(controls, True, True)
+        return layout
+
+    # </excerpt>
+
+if __name__ == '__main__':
+    SimpleEffectDemo().run()
\ No newline at end of file
diff --git a/tests/tc_flu_simple_webm.sh b/tests/tc_flu_simple_webm.sh
new file mode 100755 (executable)
index 0000000..e86ca98
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=640
+HEIGHT=360
+#WIDTH=1024
+#HEIGHT=576
+
+
+pipe="v4l2src device=/dev/video0  \
+       ! vp8-encoder ! muxout. \
+       jackaudiosrc ! vorbis-encoder ! muxout.  \
+       webmmux streamable=true name=muxout"
+
+flumotion-launch pipeline-producer pipeline=$pipe ! http-streamer port=8800 
+
+sleep 2
+
+jack_disconnect system:capture_1 flumotion-launch:in_jackaudiosrc0_1
+jack_connect   jack_rack:out_1  flumotion-launch:in_jackaudiosrc0_1
+
diff --git a/tests/tc_video_alsa_webm_stream.sh b/tests/tc_video_alsa_webm_stream.sh
new file mode 100755 (executable)
index 0000000..7754217
--- /dev/null
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=320
+HEIGHT=240
+#WIDTH=1024
+#HEIGHT=576
+
+gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! vp8enc speed=2 threads=2 quality=9.0 ! queue ! muxout. \
+       alsasrc device=hw:0 \
+       ! queue ! audioconvert ! queue ! vorbisenc quality=0.3 ! queue ! muxout.  \
+       webmmux streamable=true name=muxout \
+       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none \
+       > /dev/null 
+
diff --git a/tests/tc_video_dv_webm_stream.sh b/tests/tc_video_dv_webm_stream.sh
new file mode 100755 (executable)
index 0000000..054abc6
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+#WIDTH=640
+#HEIGHT=360
+#WIDTH=1024
+#HEIGHT=576
+WIDTH=480
+HEIGHT=320
+
+gst-launch dv1394src ! dvdemux ! queue ! dvdec ! queue ! deinterlace \
+       ! queue ! videoscale ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT \
+       ! queue ! ffmpegcolorspace \
+        ! queue ! vp8enc speed=2 threads=2 quality=10.0 max-latency=25 max-keyframe-distance=96 \
+        ! queue ! muxout. \
+       jackaudiosrc connect=1 ! audio/x-raw-float, channels=2 \
+       ! queue ! audioconvert ! queue ! vorbisenc quality=0.6 ! queue ! muxout.  \
+       webmmux streamable=true name=muxout \
+       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none blocksize=65536 sync-method=1 
+
+
diff --git a/tests/tc_video_only_simple_webm.sh b/tests/tc_video_only_simple_webm.sh
new file mode 100755 (executable)
index 0000000..7c92b88
--- /dev/null
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=640
+HEIGHT=480
+#WIDTH=1024
+#HEIGHT=576
+
+gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! vp8enc speed=2 threads=4 quality=5.0 ! queue ! muxout. \
+       webmmux streamable=true name=muxout \
+       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none 
+
diff --git a/tests/tc_video_simple_file.sh b/tests/tc_video_simple_file.sh
new file mode 100755 (executable)
index 0000000..181dd2f
--- /dev/null
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=432
+HEIGHT=240
+
+gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! theoraenc bitrate=400 speed-level=0 ! queue ! muxout. \
+       jackaudiosrc connect=1 \
+       ! queue ! audioconvert ! queue ! vorbisenc ! queue ! muxout.  \
+       oggmux name=muxout ! filesink location=/home/telecaster/archives/test.ogg \
+       > /dev/null &
+
+sleep 2
+
+jack_disconnect system:capture_1 gst-launch-0.10:in_jackaudiosrc0_1
+jack_connect   jack_rack:out_1  gst-launch-0.10:in_jackaudiosrc0_1
+
diff --git a/tests/tc_video_simple_file_webm.sh b/tests/tc_video_simple_file_webm.sh
new file mode 100755 (executable)
index 0000000..d08dc06
--- /dev/null
@@ -0,0 +1,23 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=640
+HEIGHT=360
+#WIDTH=1024
+#HEIGHT=576
+
+gst-launch v4l2src device=/dev/video1 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
+       ! queue ! videoflip method=rotate-180 \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! vp8enc speed=2 threads=2 quality=5 ! queue ! muxout. \
+       jackaudiosrc connect=1 \
+       ! queue ! audioconvert ! queue ! vorbisenc quality=3 ! queue ! muxout.  \
+       webmmux streamable=true name=muxout ! filesink location=/home/telecaster/trash/test.webm \
+       > /dev/null &
+
+sleep 2
+
+jack_disconnect system:capture_1 gst-launch-0.10:in_jackaudiosrc0_1
+jack_connect   jack_rack:out_1  gst-launch-0.10:in_jackaudiosrc0_1
+
diff --git a/tests/tc_video_simple_start.sh b/tests/tc_video_simple_start.sh
new file mode 100755 (executable)
index 0000000..061e290
--- /dev/null
@@ -0,0 +1,20 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=432
+HEIGHT=240
+
+gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! theoraenc quality=10 ! queue ! muxout. \
+       jackaudiosrc connect=1 \
+       ! queue ! audioconvert ! queue ! vorbisenc quality=3 ! queue ! muxout.  \
+       oggmux name=muxout ! shout2send mount=/telecaster_live_video.ogg port=8000 password=source2parisson ip=127.0.0.1 \
+       > /dev/null &
+
+sleep 2
+
+jack_disconnect system:capture_1 gst-launch-0.10:in_jackaudiosrc0_1
+jack_connect   jack_rack:out_1  gst-launch-0.10:in_jackaudiosrc0_1
+
diff --git a/tests/tc_video_simple_webm_ice.sh b/tests/tc_video_simple_webm_ice.sh
new file mode 100755 (executable)
index 0000000..58c1573
--- /dev/null
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=640
+HEIGHT=360
+
+gst-launch v4l2src device=/dev/video0 ! video/x-raw-rgb, width=$WIDTH, height=$HEIGHT  \
+    ! queue ! ffmpegcolorspace \
+    ! queue ! vp8enc speed=2 threads=2 quality=9.0 \
+    ! webmmux streamable=true \
+    ! shout2send mount=/telecaster_live_video.webm port=8000 password=source2parisson ip=127.0.0.1
\ No newline at end of file
diff --git a/tests/tc_video_simple_webm_stream.sh b/tests/tc_video_simple_webm_stream.sh
new file mode 100755 (executable)
index 0000000..d41d76f
--- /dev/null
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=640
+HEIGHT=480
+
+gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT, framerate={30/1}  \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! vp8enc speed=2 threads=4 quality=7.0 max-latency=2 max-keyframe-distance=3 auto-alt-ref-frames=true  ! queue ! muxout. \
+       jackaudiosrc connect=2 client-name=webmenc ! audio/x-raw-float, channels=2 \
+       ! queue ! audioconvert ! queue ! vorbisenc quality=0.3 ! queue ! muxout.  \
+       webmmux streamable=true name=muxout \
+       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none blocksize=65536 sync-method=1
+
diff --git a/tests/tc_video_simple_webm_stream_hd.sh b/tests/tc_video_simple_webm_stream_hd.sh
new file mode 100755 (executable)
index 0000000..7afd26c
--- /dev/null
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=1024
+HEIGHT=576
+
+gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
+       ! queue ! videoflip method=rotate-180 \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! vp8enc speed=2 threads=2 quality=5 ! queue ! muxout. \
+       jackaudiosrc connect=1 \
+       ! queue ! audioconvert ! queue ! vorbisenc quality=3 ! queue ! muxout.  \
+       webmmux streamable=true name=muxout \
+       ! tee name=t ! queue ! tcpserversink host=127.0.0.1 port=9000 \
+       t. ! queue ! filesink location=/home/telecaster/trash/test.webm \
+       > /dev/null &
+
diff --git a/tests/tc_video_simple_webm_stream_hd_alsa_test.sh b/tests/tc_video_simple_webm_stream_hd_alsa_test.sh
new file mode 100755 (executable)
index 0000000..3894ae2
--- /dev/null
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=1280
+HEIGHT=720
+
+gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! vp8enc speed=2 threads=1 quality=9.0 ! queue ! muxout. \
+       alsasrc \
+       ! queue ! audioconvert ! queue ! vorbisenc quality=0.3 ! queue ! muxout.  \
+       webmmux streamable=true name=muxout \
+       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none
+
+       
+
diff --git a/tests/tc_video_simple_webm_stream_hd_jack_test.sh b/tests/tc_video_simple_webm_stream_hd_jack_test.sh
new file mode 100755 (executable)
index 0000000..0cfb252
--- /dev/null
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=1280
+HEIGHT=720
+
+gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! vp8enc speed=2 threads=4 quality=9.0 ! queue ! muxout. \
+       jackaudiosrc connect=1 \
+       ! queue ! audioconvert ! queue ! vorbisenc quality=0.3 ! queue ! muxout.  \
+       webmmux streamable=true name=muxout \
+       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none
+
+       
+
diff --git a/tests/tc_video_simple_webm_stream_hd_test.sh b/tests/tc_video_simple_webm_stream_hd_test.sh
new file mode 100755 (executable)
index 0000000..8522473
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=1280
+HEIGHT=720
+
+gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! vp8enc speed=2 threads=4 quality=9.0 \
+       ! queue ! webmmux streamable=true name=muxout \
+       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none
+
diff --git a/tests/tc_video_simple_webm_stream_m.sh b/tests/tc_video_simple_webm_stream_m.sh
new file mode 100755 (executable)
index 0000000..b4d8403
--- /dev/null
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=640
+HEIGHT=360
+#WIDTH=1024
+#HEIGHT=576
+
+gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
+       ! queue ! videoflip method=rotate-180 \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! vp8enc speed=2 threads=2 quality=5 ! queue ! muxout. \
+       jackaudiosrc connect=1 \
+       ! queue ! audioconvert ! queue ! vorbisenc quality=3 ! queue ! muxout.  \
+       webmmux streamable=true name=muxout \
+       ! tee name=t ! queue ! multifdsink name=sink sync=false recover-policy=3 \
+       t. ! queue ! filesink location=/home/telecaster/trash/test.webm \
+       > /dev/null &
+
+sleep 2
+
+jack_disconnect system:capture_1 gst-launch-0.10:in_jackaudiosrc0_1
+jack_connect   jack_rack:out_1  gst-launch-0.10:in_jackaudiosrc0_1
+
diff --git a/tests/tc_video_simple_webm_stream_sd_test.sh b/tests/tc_video_simple_webm_stream_sd_test.sh
new file mode 100755 (executable)
index 0000000..0fb2ce0
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=800
+HEIGHT=600
+
+gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! vp8enc speed=2 threads=4 quality=9.0 \
+       ! queue ! webmmux streamable=true name=muxout \
+       ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none
+
diff --git a/tests/tc_video_simple_webm_tee.sh b/tests/tc_video_simple_webm_tee.sh
new file mode 100755 (executable)
index 0000000..d006917
--- /dev/null
@@ -0,0 +1,25 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+WIDTH=640
+HEIGHT=360
+#WIDTH=1024
+#HEIGHT=576
+
+gst-launch v4l2src device=/dev/video0 ! video/x-raw-yuv, width=$WIDTH, height=$HEIGHT  \
+       ! queue ! videoflip method=rotate-180 \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! vp8enc speed=2 threads=2 quality=9.0 ! queue ! muxout. \
+       jackaudiosrc connect=1 \
+       ! queue ! audioconvert ! queue ! vorbisenc quality=0.3 ! queue ! muxout.  \
+       webmmux streamable=true name=muxout \
+       ! tee name=t ! queue ! tcpserversink host=127.0.0.1 port=9000 protocol=none \
+       t. ! queue ! filesink location=/home/telecaster/trash/test.webm \
+       > /dev/null &
+
+sleep 4
+
+jack_disconnect system:capture_1 gst-launch-0.10:in_jackaudiosrc0_1
+jack_connect   jack_rack:out_1  gst-launch-0.10:in_jackaudiosrc0_1
+
diff --git a/tests/tcp2x.sh b/tests/tcp2x.sh
new file mode 100755 (executable)
index 0000000..8e61926
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+gst-launch tcpclientsrc host=192.168.0.18 port=9000 \
+    ! matroskademux \
+    ! vp8dec ! ffmpegcolorspace \
+    ! ximagesink
+
+# tcpclientsrc host=192.168.0.18 port=9000 protocol=none \
\ No newline at end of file
diff --git a/tests/video_player_qt.py b/tests/video_player_qt.py
new file mode 100644 (file)
index 0000000..91e946a
--- /dev/null
@@ -0,0 +1,73 @@
+import sys, os
+from PyQt4 import QtCore, QtGui, uic
+from PyQt4.phonon import Phonon
+
+class VideoPlayer(QtGui.QWidget):
+    def __init__(self, url, parent = None):
+
+        self.url = url
+
+        QtGui.QWidget.__init__(self, parent)
+        self.setSizePolicy(QtGui.QSizePolicy.Expanding,
+            QtGui.QSizePolicy.Preferred)
+
+
+        self.player = Phonon.VideoPlayer(Phonon.VideoCategory,self)
+        self.player.load(Phonon.MediaSource(self.url))
+        self.player.mediaObject().setTickInterval(100)
+        self.player.mediaObject().tick.connect(self.tock)
+
+        self.play_pause = QtGui.QPushButton(self)
+        self.play_pause.setIcon(QtGui.QIcon(':/icons/player_play.svg'))
+        self.play_pause.clicked.connect(self.playClicked)
+        self.player.mediaObject().stateChanged.connect(self.stateChanged)
+
+        self.slider = Phonon.SeekSlider(self.player.mediaObject() , self)
+
+        self.status = QtGui.QLabel(self)
+        self.status.setAlignment(QtCore.Qt.AlignRight |
+            QtCore.Qt.AlignVCenter)
+
+        self.download = QtGui.QPushButton("Download", self)
+        self.download.clicked.connect(self.fetch)
+        topLayout = QtGui.QVBoxLayout(self)
+        topLayout.addWidget(self.player)
+        layout = QtGui.QHBoxLayout(self)
+        layout.addWidget(self.play_pause)
+        layout.addWidget(self.slider)
+        layout.addWidget(self.status)
+        layout.addWidget(self.download)
+        topLayout.addLayout(layout)
+        self.setLayout(topLayout)
+
+    def playClicked(self):
+        if self.player.mediaObject().state() == Phonon.PlayingState:
+            self.player.pause()
+        else:
+            self.player.play()
+
+    def stateChanged(self, new, old):
+        if new == Phonon.PlayingState:
+            self.play_pause.setIcon(QtGui.QIcon(':/icons/player_pause.svg'))
+        else:
+            self.play_pause.setIcon(QtGui.QIcon(':/icons/player_play.svg'))
+
+    def tock(self, time):
+        time = time/1000
+        h = time/3600
+        m = (time-3600*h) / 60
+        s = (time-3600*h-m*60)
+        self.status.setText('%02d:%02d:%02d'%(h,m,s))
+
+    def fetch(self):
+        print 'Should download %s'%self.url
+
+def main():
+    app = QtGui.QApplication(sys.argv)
+    window=VideoPlayer(sys.argv[1])
+    window.show()
+    # It's exec_ because exec is a reserved word in Python
+    sys.exit(app.exec_())
+
+if __name__ == "__main__":
+    main()
diff --git a/tests/vumeter.py b/tests/vumeter.py
new file mode 100644 (file)
index 0000000..c2838ec
--- /dev/null
@@ -0,0 +1,113 @@
+from PyQt4 import QtCore, QtGui
+import pygst
+import sys, os, time, math
+pygst.require("0.10")
+import gst
+import gobject
+
+#This class runs the code it contains in another thread using QThread
+class Player(QtCore.QThread):
+     def __init__(self):
+          QtCore.QThread.__init__(self)
+
+     def run(self):
+          #create the pipeline
+          player = gst.Pipeline("player")
+          #filesrc element
+          source = gst.element_factory_make("filesrc", "file-source")
+          #volume element to adjust volume of audio
+          volume = gst.element_factory_make("volume", "volume")
+          #level element to get the rms/peak property
+          level = gst.element_factory_make("level", "volume-level")
+          #decoder to play mp3 files
+          decoder = gst.element_factory_make("mad", "mp3-decoder")
+          #convert the audio to play to speakers
+          conv = gst.element_factory_make("audioconvert", "converter")
+          #autosink if not alsa
+          sink = gst.element_factory_make("autoaudiosink", "audio-output")
+
+          #add the elements to the pipeline
+          player.add(source, volume, level, decoder, conv, sink)
+
+          #link the elements in order
+          gst.element_link_many(source, decoder, conv, volume, level, sink)
+          #set properties of elements
+          player.get_by_name("volume").set_property('volume', 1)
+          player.get_by_name("volume-level").set_property('peak-ttl' , 0)
+          player.get_by_name("volume-level").set_property('peak-falloff', 20)
+          #add bus to listen signal from
+          bus = gst.Pipeline.get_bus(player)
+          gst.Bus.add_signal_watch(bus)
+
+          #the source of the player
+          filepath = "/home/momo/music_local/test/aboul.wav.mp3"
+          #set the property of the element filesrc
+          player.get_by_name("file-source").set_property('location', filepath)
+          #play the file
+          player.set_state(gst.STATE_PLAYING)
+          #get the current thread in Qt
+          play_thread_id = self.currentThread
+
+          #set the minimum decibels
+          MIN_DB = -45
+          #set the maximum decibels
+          MAX_DB = 0
+          #if current thread is running
+          while play_thread_id == self.currentThread:
+               #listen to messages that emit during playing
+               messagePoll = bus.poll(gst.MESSAGE_ANY,-1)
+               #if the message is level
+               if messagePoll.src == level:
+                    #get the structure of the message
+                    struc = messagePoll.structure
+               #if the structure message is rms
+               if struc.has_key('rms'):
+                    rms = struc["rms"]
+                    #get the values of rms in a list
+                    rms0 = abs(float(rms[0]))
+                    #compute for rms to decibels
+                    rmsdb = 10 * math.log(rms0 / 32768 )
+                    #compute for progress bar
+                    vlrms = (rmsdb-MIN_DB) * 100 / (MAX_DB-MIN_DB)
+                    #emit the signal to the qt progress bar
+                    self.emit(QtCore.SIGNAL("setLabel"), abs(vlrms))
+               #set timer
+               time.sleep(0.05)
+
+#this code produced using pyuic from qt designer
+class Ui_Dialog(object):
+     def setupUi(self, Dialog):
+          Dialog.setObjectName("Dialog")
+          Dialog.resize(QtCore.QSize(QtCore.QRect(0,0,94,300).size()).expandedTo(Dialog.minimumSizeHint()))
+
+          self.progressBar = QtGui.QProgressBar(Dialog)
+          self.progressBar.setGeometry(QtCore.QRect(10,10,31,281))
+          self.progressBar.setProperty("value",QtCore.QVariant(24))
+          self.progressBar.setOrientation(QtCore.Qt.Vertical)
+          self.progressBar.setObjectName("progressBar")
+          self.progressBar.setValue(0)
+          self.progressBar.setMinimum(0)
+          self.progressBar.setMaximum(100)
+
+          self.retranslateUi(Dialog)
+          QtCore.QMetaObject.connectSlotsByName(Dialog)
+          #sets the value of the progress bar emited
+     def setLabel(self,value):
+          self.progressBar.setValue(value)
+
+     def retranslateUi(self, Dialog):
+          Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
+
+if __name__ == "__main__":
+     app = QtGui.QApplication(sys.argv)
+     window = QtGui.QDialog()
+     ui = Ui_Dialog()
+     ui.setupUi(window)
+     window.show()
+     #creates instance of the Player class
+     player=Player()
+     #connect to signal emitted in Player class
+     QtCore.QObject.connect(player, QtCore.SIGNAL("setLabel"), ui.setLabel, QtCore.Qt.QueuedConnection)
+     #run the Player class thread
+     player.start()
+     app.exec_()
diff --git a/tests/x264_2.sh b/tests/x264_2.sh
new file mode 100755 (executable)
index 0000000..ae35e7c
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+gst-launch v4l2src ! video/x-raw-yuv,width=640,height=480 \
+ ! queue ! x264enc byte-stream=true bitrate=500 bframes=4 ref=4 me=hex subme=4 weightb=true threads=4 \
+ ! tcpserversink host=127.0.0.1 port=9000 protocol=none
diff --git a/tests/x264_pl2.sh b/tests/x264_pl2.sh
new file mode 100755 (executable)
index 0000000..8595bed
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+gst-launch tcpclientsrc host=127.0.0.1 port=9000 \
+  ! ffdec_h264 ! xvimagesink 
diff --git a/tests/x264_relay.sh b/tests/x264_relay.sh
new file mode 100755 (executable)
index 0000000..8d1a65b
--- /dev/null
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+gst-launch tcpclientsrc host=192.168.0.18 port=9000 \
+    ! matroskademux \
+    ! queue ! vp8dec \
+    ! queue ! ffmpegcolorspace \
+    ! queue ! x264enc bitrate=200 bframes=4 ref=4 me=hex subme=4 weightb=true threads=0 ! muxout. \
+       mp4mux name=muxout \
+       ! queue ! filesink location=/tmp/video.mp4
+
+# tcpclientsrc host=192.168.0.18 port=9000 protocol=none \
diff --git a/tests/x264_relay_x.sh b/tests/x264_relay_x.sh
new file mode 100755 (executable)
index 0000000..3a30d1b
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+gst-launch tcpclientsrc host=192.168.0.18 port=9000 \
+    ! matroskademux \
+    ! vp8dec ! ffmpegcolorspace \
+    ! queue ! x264enc \
+    ! queue ! vdpauh264dec ! ffmpegcolorspace ! ximagesink
+
+# tcpclientsrc host=192.168.0.18 port=9000 protocol=none \
\ No newline at end of file
diff --git a/tests/x_jack_webm.sh b/tests/x_jack_webm.sh
new file mode 100755 (executable)
index 0000000..8abeab6
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+# Start TeleCaster video channel
+
+gst-launch ximagesrc ! video/x-raw-rgb,framerate=30/1 \
+       ! queue ! ffmpegcolorspace \
+       ! queue ! vp8enc speed=2 threads=2 quality=9.0 ! queue ! muxout. \
+       jackaudiosrc connect=1 \
+       ! queue ! audioconvert ! queue ! vorbisenc quality=0.3 ! queue ! muxout.  \
+       webmmux streamable=true name=muxout \
+       ! queue ! filesink location=/home/momo/tmp/desktop.webm
+       
+