]> git.parisson.com Git - telemeta.git/commitdiff
mv scripts back
authorGuillaume Pellerin <guillaume.pellerin@ircam.fr>
Thu, 4 Feb 2016 15:17:55 +0000 (16:17 +0100)
committerGuillaume Pellerin <guillaume.pellerin@ircam.fr>
Thu, 4 Feb 2016 15:17:55 +0000 (16:17 +0100)
42 files changed:
data/backup/backup_db.sh [deleted file]
data/backup/restore_db.sh [deleted file]
scripts/kdenlive/__init__.py [new file with mode: 0644]
scripts/kdenlive/auto_fade.py [new file with mode: 0755]
scripts/kdenlive/auto_fade_batch.py [new file with mode: 0755]
scripts/kdenlive/fade.py [new file with mode: 0644]
scripts/kdenlive/mlt_fix_threads.sh [new file with mode: 0755]
scripts/kdenlive/mlt_process_batch.py [new file with mode: 0755]
scripts/kdenlive/session.py [new file with mode: 0644]
scripts/old/crem_checker.py [new file with mode: 0755]
scripts/old/process-waveform-cgi.py [new file with mode: 0755]
scripts/old/telemeta-backup.py [new file with mode: 0755]
scripts/old/telemeta-crem-import-alt_ids.py [new file with mode: 0755]
scripts/old/telemeta-crem-import-test.py [new file with mode: 0755]
scripts/old/telemeta-crem-import.py [new file with mode: 0755]
scripts/old/telemeta-media-link.py [new file with mode: 0755]
scripts/sql/backup_db.sh [new file with mode: 0755]
scripts/sql/convert_myisam_to_innodb.sql [new file with mode: 0644]
scripts/sql/import_sql.sh [new file with mode: 0755]
scripts/sql/restore_db.sh [new file with mode: 0755]
scripts/transcode/create_thumbs.py [new file with mode: 0755]
scripts/transcode/remux_fix_media.py [new file with mode: 0755]
scripts/transcode/transcode.py [new file with mode: 0755]
telemeta/util/import_sql.sh [deleted file]
telemeta/util/kdenlive/__init__.py [deleted file]
telemeta/util/kdenlive/auto_fade.py [deleted file]
telemeta/util/kdenlive/auto_fade_batch.py [deleted file]
telemeta/util/kdenlive/fade.py [deleted file]
telemeta/util/kdenlive/mlt_fix_threads.sh [deleted file]
telemeta/util/kdenlive/mlt_process_batch.py [deleted file]
telemeta/util/kdenlive/session.py [deleted file]
telemeta/util/old/crem_checker.py [deleted file]
telemeta/util/old/process-waveform-cgi.py [deleted file]
telemeta/util/old/telemeta-backup.py [deleted file]
telemeta/util/old/telemeta-crem-import-alt_ids.py [deleted file]
telemeta/util/old/telemeta-crem-import-test.py [deleted file]
telemeta/util/old/telemeta-crem-import.py [deleted file]
telemeta/util/old/telemeta-media-link.py [deleted file]
telemeta/util/sql/convert_myisam_to_innodb.sql [deleted file]
telemeta/util/transcode/create_thumbs.py [deleted file]
telemeta/util/transcode/remux_fix_media.py [deleted file]
telemeta/util/transcode/transcode.py [deleted file]

diff --git a/data/backup/backup_db.sh b/data/backup/backup_db.sh
deleted file mode 100755 (executable)
index b28c0f4..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-
-NOW=$(date +"%T-%m-%d-%Y")
-mysqldump -hdb -uroot -pmysecretpassword telemeta | gzip > /srv/backup/telemeta-$NOW.sql.gz
diff --git a/data/backup/restore_db.sh b/data/backup/restore_db.sh
deleted file mode 100755 (executable)
index 8666bf8..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-file=$1
-
-if [[ $file == *".gz" ]]; then
-    echo 'ok'
-    gunzip < /srv/backup/$file | mysql -hdb -uroot -pmysecretpassword telemeta
-else
-    mysql -hdb -uroot -pmysecretpassword telemeta < /srv/backup/$file
-fi
diff --git a/scripts/kdenlive/__init__.py b/scripts/kdenlive/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/scripts/kdenlive/auto_fade.py b/scripts/kdenlive/auto_fade.py
new file mode 100755 (executable)
index 0000000..c716a8c
--- /dev/null
@@ -0,0 +1,11 @@
+#/usr/bin/python
+
+import sys
+from telemeta.util.kdenlive.fade import AutoFade
+
+path = sys.argv[-1]
+fade = AutoFade(path)
+data = fade.run()
+f = open(path, 'w')
+f.write(data)
+f.close()
diff --git a/scripts/kdenlive/auto_fade_batch.py b/scripts/kdenlive/auto_fade_batch.py
new file mode 100755 (executable)
index 0000000..2704776
--- /dev/null
@@ -0,0 +1,20 @@
+
+import os, sys
+from telemeta.util.kdenlive.fade import AutoFade
+
+if __name__ == '__main__':
+    dir = sys.argv[-2]
+    ext = sys.argv[-1]
+
+    for filename in os.listdir(dir):
+        prefix, extension = os.path.splitext(filename)
+        path = dir + os.sep + filename
+        flag = path + '.faded'
+        if ext in extension and not os.path.exists(flag):
+            os.system('cp ' + path + ' ' + path + '.bak')
+            fade = AutoFade(path)
+            data = fade.run()
+            f = open(path, 'w')
+            f.write(data)
+            f.close()
+            os.system('touch ' + flag)
diff --git a/scripts/kdenlive/fade.py b/scripts/kdenlive/fade.py
new file mode 100644 (file)
index 0000000..c590194
--- /dev/null
@@ -0,0 +1,200 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2012-2013 Guillaume Pellerin <yomguy@parisson.com>
+
+# This file is part of TimeSide.
+
+# TimeSide is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+
+# TimeSide is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with TimeSide.  If not, see <http://www.gnu.org/licenses/>.
+
+# Author: Guillaume Pellerin <yomguy@parisson.com>
+
+
+from telemeta.util.xmltodict2 import *
+
+
+class AutoFade(object):
+    """ Automatically applies a fade in and a fade out trasitions between each segment of a KdenLive session.
+        Each video clip needs to be splitted into one video track and an audio one ("Split audio"),
+        so that an audio fade in/out is also applied.
+
+        MLT files are also supported.
+    """
+
+    def __init__(self, path, audio_frames_out=2, audio_frames_in=1,
+                       video_frames_out=3, video_frames_in=3):
+        self.audio_frames_in = audio_frames_in
+        self.audio_frames_out = audio_frames_out
+        self.video_frames_in = video_frames_in
+        self.video_frames_out = video_frames_out
+        self.path = path
+        self.session = xmltodict(self.path)
+
+    def audio_fade_out(self, frame_out):
+        child = {'attributes': {u'id': u'fadeout',
+        u'in': unicode(int(frame_out)-self.audio_frames_out),
+        u'out': unicode(frame_out)},
+       'children': [{'attributes': {u'name': u'track'},
+         'cdata': '0',
+         'name': 'property'},
+        {'attributes': {u'name': u'window'},
+         'cdata': '75',
+         'name': 'property'},
+        {'attributes': {u'name': u'max_gain'},
+         'cdata': '20dB',
+         'name': 'property'},
+        {'attributes': {u'name': u'mlt_type'},
+         'cdata': 'filter',
+         'name': 'property'},
+        {'attributes': {u'name': u'mlt_service'},
+         'cdata': 'volume',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_id'},
+         'cdata': 'fadeout',
+         'name': 'property'},
+        {'attributes': {u'name': u'tag'},
+         'cdata': 'volume',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_ix'},
+         'cdata': '1',
+         'name': 'property'},
+        {'attributes': {u'name': u'gain'}, 'cdata': '1', 'name': 'property'},
+        {'attributes': {u'name': u'end'}, 'cdata': '0', 'name': 'property'}],
+       'name': 'filter'}
+
+        return child
+
+    def audio_fade_in(self, frame_in):
+        child = {'attributes': {u'id': u'fadein',
+        u'in': unicode(frame_in),
+        u'out': unicode(int(frame_in)+self.audio_frames_in)},
+       'children': [{'attributes': {u'name': u'track'},
+         'cdata': '0',
+         'name': 'property'},
+        {'attributes': {u'name': u'window'},
+         'cdata': '75',
+         'name': 'property'},
+        {'attributes': {u'name': u'max_gain'},
+         'cdata': '20dB',
+         'name': 'property'},
+        {'attributes': {u'name': u'mlt_type'},
+         'cdata': 'filter',
+         'name': 'property'},
+        {'attributes': {u'name': u'mlt_service'},
+         'cdata': 'volume',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_id'},
+         'cdata': 'fadein',
+         'name': 'property'},
+        {'attributes': {u'name': u'tag'},
+         'cdata': 'volume',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_ix'},
+         'cdata': '1',
+         'name': 'property'},
+        {'attributes': {u'name': u'gain'}, 'cdata': '0', 'name': 'property'},
+        {'attributes': {u'name': u'end'}, 'cdata': '1', 'name': 'property'}],
+       'name': 'filter'}
+
+        return child
+
+
+    def video_fade_out(self, frame_out):
+        child = {'attributes': {u'id': u'fade_to_black',
+        u'in': unicode(int(frame_out)-self.video_frames_out),
+        u'out': unicode(frame_out)},
+       'children': [{'attributes': {u'name': u'track'},
+         'cdata': '0',
+         'name': 'property'},
+        {'attributes': {u'name': u'start'}, 'cdata': '1', 'name': 'property'},
+        {'attributes': {u'name': u'mlt_type'},
+         'cdata': 'filter',
+         'name': 'property'},
+        {'attributes': {u'name': u'mlt_service'},
+         'cdata': 'brightness',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_id'},
+         'cdata': 'fade_to_black',
+         'name': 'property'},
+        {'attributes': {u'name': u'tag'},
+         'cdata': 'brightness',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_ix'},
+         'cdata': '1',
+         'name': 'property'},
+        {'attributes': {u'name': u'end'}, 'cdata': '0', 'name': 'property'}],
+       'name': 'filter'}
+
+        return child
+
+
+    def video_fade_in(self, frame_in):
+        child = {'attributes': {u'id': u'fade_from_black',
+        u'in': unicode(frame_in),
+        u'out': unicode(int(frame_in)+self.video_frames_in)},
+       'children': [{'attributes': {u'name': u'track'},
+         'cdata': '0',
+         'name': 'property'},
+        {'attributes': {u'name': u'start'}, 'cdata': '0', 'name': 'property'},
+        {'attributes': {u'name': u'mlt_type'},
+         'cdata': 'filter',
+         'name': 'property'},
+        {'attributes': {u'name': u'mlt_service'},
+         'cdata': 'brightness',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_id'},
+         'cdata': 'fade_from_black',
+         'name': 'property'},
+        {'attributes': {u'name': u'tag'},
+         'cdata': 'brightness',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_ix'},
+         'cdata': '1',
+         'name': 'property'},
+        {'attributes': {u'name': u'end'}, 'cdata': '1', 'name': 'property'}],
+       'name': 'filter'}
+
+        return child
+
+    def run(self):
+        audio_count = 0
+        video_count = 0
+        
+        for attr in self.session['children']:
+            if 'playlist' in attr['name'] and 'children' in attr:
+                for att in attr['children']:
+                    if 'producer' in att['attributes'] and not 'children' in att:                        
+                        producer = att['attributes']['producer']
+                        if producer != 'black':
+                        
+                            frame_in = att['attributes']['in']
+                            frame_out = att['attributes']['out']
+
+                            if 'audio' in producer:
+                                if not audio_count % 2:
+                                    att['children'] = [self.audio_fade_out(frame_out)]
+                                else:
+                                    att['children'] = [self.audio_fade_in(frame_in)]
+                                audio_count += 1
+
+
+                            if 'video' in producer:
+                                if not video_count % 2:
+                                    att['children'] = [self.video_fade_out(frame_out)]
+                                else:
+                                    att['children'] = [self.video_fade_in(frame_in)]
+                                video_count += 1
+
+        return dicttoxml(self.session).encode('utf-8')
+
+
diff --git a/scripts/kdenlive/mlt_fix_threads.sh b/scripts/kdenlive/mlt_fix_threads.sh
new file mode 100755 (executable)
index 0000000..60b0061
--- /dev/null
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+#threads=$1
+dir=$1
+
+for file in `ls $dir/*.sh`; do
+ perl -pi -e 's/threads=6/threads=4/g' $file
+ perl -pi -e 's/threads=2/threads=4/g' $file
+ perl -pi -e 's/threads=1/threads=4/g' $file
+done
diff --git a/scripts/kdenlive/mlt_process_batch.py b/scripts/kdenlive/mlt_process_batch.py
new file mode 100755 (executable)
index 0000000..7d346c6
--- /dev/null
@@ -0,0 +1,39 @@
+#!/usr/bin/python
+
+import os, sys
+
+if __name__ == '__main__':
+    root_dir = sys.argv[-1]
+
+    fading = False
+    if '--fading' in sys.argv:
+        fading = True
+
+    for root, dirs, files in os.walk(root_dir):
+        for filename in files:
+            prefix, extension = os.path.splitext(filename)
+            path = root + os.sep + filename
+
+            flag = path + '.processed'
+            if 'sh' in extension and not os.path.exists(flag):
+                if fading:
+                    from telemeta.util.kdenlive.fade import AutoFade
+                    local_files = os.listdir(root)
+                    for local_file in local_files:
+                        local_name, local_ext = os.path.splitext(local_file)
+                        if 'mlt' in local_ext:
+                            local_path = root + os.sep + local_file
+                            local_flag = local_path + '.faded'
+                            if not os.path.exists(local_flag):
+                                print 'fading :        ' + local_path 
+                                os.system('cp ' + local_path + ' ' + local_path + '.bak')
+                                fade = AutoFade(local_path)
+                                data = fade.run()
+                                f = open(local_path, 'w')
+                                f.write(data)
+                                f.close()
+                                os.system('touch ' + local_flag)
+
+                print 'processing :    ' + path
+                os.system('nice -n 19 ' + path)
+                os.system('touch ' + flag)
diff --git a/scripts/kdenlive/session.py b/scripts/kdenlive/session.py
new file mode 100644 (file)
index 0000000..74eb425
--- /dev/null
@@ -0,0 +1,152 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2012-2013 Parisson SARL
+
+# This software is a computer program whose purpose is to backup, analyse,
+# transcode and stream any audio content with its metadata over a web frontend.
+
+# This software is governed by the CeCILL  license under French law and
+# abiding by the rules of distribution of free software.  You can  use,
+# modify and/ or redistribute the software under the terms of the CeCILL
+# license as circulated by CEA, CNRS and INRIA at the following URL
+# "http://www.cecill.info".
+
+# As a counterpart to the access to the source code and  rights to copy,
+# modify and redistribute granted by the license, users are provided only
+# with a limited warranty  and the software's author,  the holder of the
+# economic rights,  and the successive licensors  have only  limited
+# liability.
+
+# In this respect, the user's attention is drawn to the risks associated
+# with loading,  using,  modifying and/or developing or reproducing the
+# software by the user in light of its specific status of free software,
+# that may mean  that it is complicated to manipulate,  and  that  also
+# therefore means  that it is reserved for developers  and  experienced
+# professionals having in-depth computer knowledge. Users are therefore
+# encouraged to load and test the software's suitability as regards their
+# requirements in conditions enabling the security of their systems and/or
+# data to be ensured and,  more generally, to use and operate it in the
+# same conditions as regards security.
+
+# The fact that you are presently reading this means that you have had
+# knowledge of the CeCILL license and that you accept its terms.
+
+# Authors: Guillaume Pellerin <yomguy@parisson.com>
+
+
+import time
+from telemeta.util.xmltodict2 import *
+
+
+class KDEnLiveSession(object):
+
+    def __init__(self, path):
+        self.session = xmltodict(path)
+
+    def entries(self):
+        entries = []
+        for attr in self.session['children']:
+            if 'playlist' in attr['name'] and 'children' in attr:
+                for att in attr['children']:
+                    if 'entry' in att['name'] and att['attributes']['producer'] != 'black':
+                        entries.append(att['attributes'])
+        return entries
+
+    def video_entries(self):
+        entries = []
+        for attr in self.session['children']:
+            if 'playlist' in attr['name'] and 'children' in attr:
+                for att in attr['children']:
+                    if 'entry' in att['name'] and att['attributes']['producer'] != 'black' \
+                            and not 'audio' in att['attributes']['producer']:
+                        entries.append(att['attributes'])
+        return entries
+
+    def entries_sorted(self):
+        return sorted(self.entries(), key=lambda k: int(k['in']), reverse=False)
+
+    def entries_video_seconds(self):
+        fps = float(self.profile()['frame_rate_num'])
+        list = []
+        entries = self.video_entries()
+        for i in range(0,len(entries)):
+            id = entries[i]['producer'].split('_')[0]
+            t_in = int(entries[i]['in'])/fps
+            t_out = int(entries[i]['out'])/fps
+
+            if i == 0:
+                t = 0
+            else:
+                t = list[i-1]['t'] + int(entries[i-1]['out'])/fps - int(entries[i-1]['in'])/fps
+
+            list.append({'id' : id, 't': t, 'in': t_in , 'out': t_out })
+
+        return list
+
+    def cuts(self, entries):
+        i = 0
+        cuts = [0, ]
+        for entry in entries:
+            if i > 0:
+                cuts.append(cuts[i-1] + int(entries[i]['in'])-int(entries[i-1]['out']))
+            i += 1
+        return cuts
+
+    def first_video_frame(self):
+        return int(self.entries_sorted()[0]['in'])
+
+    def profile(self):
+        for attr in self.session['children']:
+            if 'profile' in attr['name']:
+                return attr['attributes']
+
+    def fix_text(self, text):
+        try:
+            s = text.split(' ')
+            i = int(s[1])
+            s.insert(2, ':')
+            return ' '.join(s)
+        except:
+            return text
+
+    def markers(self, offset=0, from_first_marker=False):
+        """ by default return a dict of markers with timecodes relative to an origin
+
+            if from_first_marker=False: the origin is the first entry timecode
+            if from_first_marker=True: the origin is the first entry timecode before the first marker
+
+            offset: general origin offset
+        """
+
+        abs_time = 0
+        markers = []
+        i = 0
+        entries = self.entries_video_seconds()
+
+        for attr in self.session['children']:
+            if 'kdenlivedoc' in attr['name']:
+
+                for att in attr['children']:
+                    if 'markers' in att['name'] and 'children' in att.keys():
+
+                        for at in att['children']:
+                            if 'marker' in at['name']:
+
+                                marker_time = float(at['attributes']['time'].replace(',','.'))
+                                id = at['attributes']['id']
+                                rel_time = 0
+
+                                for entry in entries:
+                                    if marker_time >= entry['in'] and marker_time <= entry['out'] and id == entry['id']:
+                                        if i == 0 and from_first_marker:
+                                            abs_time = entry['t']
+                                        rel_time = entry['t'] + (marker_time - entry['in']) - abs_time + offset
+                                        break
+
+                                at['attributes']['time'] = rel_time
+                                at['attributes']['session_timecode'] = time.strftime('%H:%M:%S', time.gmtime(rel_time))
+                                at['attributes']['comment'] = self.fix_text(at['attributes']['comment'])
+                                markers.append(at['attributes'])
+
+                            i += 1
+        return markers
+
diff --git a/scripts/old/crem_checker.py b/scripts/old/crem_checker.py
new file mode 100755 (executable)
index 0000000..9b5088a
--- /dev/null
@@ -0,0 +1,340 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Vérifier que les nouvelles cotes d'item :
+
+- correspondent toutes à la collection décrite par le fichier .csv
+  (le fichier .csv est nommé d'après la nouvelle cote de collection)
+
+- sont uniques dans le fichiers .csv
+
+- ont un des formats suivant :
+    - soit CNRSMH_I_aaaa_nnn_mmm
+    - soit CNRSMH_I_aaaa_nnn_mmm_tt
+    - soit CNRSMH_I_aaaa_nnn_mmm_tt_pp
+    - soit CNRSMH_E_aaaa_nnn_mmm_tt
+    - soit CNRSMH_E_aaaa_nnn_mmm_tt_pp
+
+- correspondent à fichier .wav (et qu'il n'y a pas de fichiers .wav
+  supplémentaire)
+
+Vérifier que le répertoire est nommé d'apprès la nouvelle cote de collection
+
+Vérifier que la nouvelle cote de collection a l'un des formats suivant :
+    - soit CNRSMH_I_aaaa_nnn
+    - soit CNRSMH_E_aaaa_nnn_mmm
+
+Vérifier que les fichiers .wav sont lisibles, ont une durée et sont identifés
+comme WAV par audiolab.
+"""
+
+
+import os
+import re
+import sys
+import csv
+import xlrd
+import datetime
+import logging
+import shutil
+
+COLLECTION_OLD_PATTERN = [
+        { 'format': 'BM.aaa.nnn.mmm',           'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})$'},
+        { 'format': 'BM.aaaa.nnn.mmm/pp',       'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
+        { 'format': 'BM.aaaa.nnn.mmm',          'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})$'},
+        { 'format': 'BM.aaaa.nnn.mmm/',         'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/$'},
+        { 'format': 'BM.aaaa.nnn.mmm/ppp',      'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/[0-9]{3}$'},
+        { 'format': 'BM.aaaa.nnn.mm/pp',        'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{2})/[0-9]{2}$'},
+        { 'format': 'BM.aaaa.nnn',              'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})$'},
+        { 'format': 'BM.aaa.nnn.mmm/pp',        'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
+        { 'format': 'BM.aaa.nnn FANTOME',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3}) FANTOME$'},
+        { 'format': 'BM.aaa.nnn',               'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})$'},
+        { 'format': 'BM.aaa.nnnBISoo/pp',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})BIS([0-9]{2})/[0-9]{2}$'},
+        { 'format': 'BM.aaa.nnn.mmm.ppp',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})\.[0-9]{3}$'},
+        { 'format': 'BM.aaa.nnn.mmm/ppp',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{3}$'},
+        { 'format': 'BM.aaa.nnn/pp',            'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
+        { 'format': 'BM.aaa.nnn-BIS.ooo/pp',    'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})-BIS\.([0-9]{3})/[0-9]{2}$'},
+        { 'format': 'BM.aaaa.nnn.mmm/NN',       'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/NN$'},
+        { 'format': 'BM.aaa.nnn.mmm/pp-DEPOT',  'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}-DEPOT$'},
+        { 'format': 'BM.aaa.nnn.mmm-o>p',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})-[0-9]>[0-9]$'},
+        { 'format': 'CY.aaaa.nnn',              'regex': r'^(CY)\.([0-9]{4})\.([0-9]{3})$'},
+        { 'format': 'DI.aaaa.nnn.mmm',          'regex': r'^(DI)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})$'},
+        { 'format': 'DI.aaaa.nnn.mmm/pp',       'regex': r'^(DI)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
+        { 'format': 'DI.aaa.nnn.mmm',           'regex': r'^(DI)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})$'},
+        { 'format': 'DI.aaa.nnn.mmm/pp',        'regex': r'^(DI)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
+        { 'format': 'DI.aaa.nnn.mmm-o/p',       'regex': r'^(DI)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})-[0-9]/[0-9]$'},
+        { 'format': 'FANTOME 2*',               'regex': r'FANTOME 2\*$'},
+
+        ## yomguy
+        { 'format': 'BM.aaaa.nnn.mm',       'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})$'},
+        #{ 'format': 'BM.aaaa.nnn.mmm/pp:ii-jj', 'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/([0-9]{2})\:([0-9]{2})\-([0-9]{2})$'},
+        #{ 'format': 'BM.aaaa.nnn.mmm/ppp:ii-jj', 'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/([0-9]{2})\:([0-9]{2})\-([0-9]{2})$'},
+        #{ 'format': 'BM.aaaa.nnn.mmm:ii-jj',    'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3}):([0-9]{2})\-([0-9]{2})$'},
+        ]
+
+ITEM_NEW_PATTERN = [
+        { 'format': 'CNRSMH_I_aaaa_nnn_mmm',           'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{3})$'},
+        { 'format': 'CNRSMH_I_aaaa_nnn_mmm_tt',        'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2})$'},
+        { 'format': 'CNRSMH_I_aaaa_nnn_mmm_tt_pp',     'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2})_([0-9]{2})$'},
+        { 'format': 'CNRSMH_E_aaaa_nnn_mmm_tt',        'regex': r'^(CNRSMH)_E_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2})$'},
+        { 'format': 'CNRSMH_E_aaaa_nnn_mmm_tt_pp',     'regex': r'^(CNRSMH)_E_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2,3})_([0-9]{2})$'},
+
+        # yomguy
+        { 'format': 'CNRSMH_I_aaaa_nnn_mm',           'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{2})$'},
+        ]
+
+COLLECTION_PATTERN = [
+        { 'format': 'CNRSMH_I_aaaa_nnn',           'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})$'},
+        { 'format': 'CNRSMH_E_aaaa_nnn_mmm',        'regex': r'^(CNRSMH)_E_([0-9]{4})_([0-9]{3})_([0-9]{3})$'},
+        ]
+
+
+def check_name(patterns, name):
+    match = False
+    for pattern in patterns:
+        match = re.match(pattern['regex'], name)
+        if match:
+            break
+    return match
+
+
+class Logger:
+
+    def __init__(self, file):
+        self.logger = logging.getLogger('myapp')
+        self.hdlr = logging.FileHandler(file)
+        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+        self.hdlr.setFormatter(self.formatter)
+        self.logger.addHandler(self.hdlr)
+        self.logger.setLevel(logging.INFO)
+
+    def write_info(self, prefix, message):
+        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
+
+    def write_error(self, prefix, message):
+        self.logger.error(prefix + ' : ' + message.decode('utf8'))
+
+
+class CremCollection:
+
+    def __init__(self, dir, logger):
+        self.dir = dir
+        self.dir_name = self.dir.split(os.sep)[-1]
+        self.file_list = os.listdir(self.dir)
+        self.logger = logger
+
+    def xls_list(self):
+        file_list = []
+        for file in self.file_list:
+            filename = os.path.basename(file)
+            ext = os.path.splitext(file)[1]
+            if not '.' == filename[0] and (ext == '.xls' or ext == '.XLS'):
+                file_list.append(file)
+        print file_list
+        return file_list
+
+    def wav_list(self):
+        list = []
+        for file in self.file_list:
+            filename = os.path.basename(file)
+            ext = os.path.splitext(file)[1]
+            if not '.' == filename[0] and (ext == '.wav' or ext == '.WAV'):
+                list.append(file)
+            elif '.' == filename[0]:
+                self.logger.write_error(file, 'Warning : fichier caché présent !')
+        return list
+
+
+class CremCSV:
+
+    def __init__(self, file):
+        self.csv_file = open(file, 'w')
+        self.csv = csv.writer(self.csv_file,  delimiter=';')
+
+    def close(self):
+        self.csv_file.close()
+
+class CremXLS:
+
+    def __init__(self, file):
+        self.first_row = 8
+        self.original_col = 0
+        self.new_col = 1
+        self.book = xlrd.open_workbook(file)
+        self.sheet = self.book.sheet_by_index(0)
+        self.original_refs = self.original_refs()
+        self.new_refs = self.new_refs()
+        #print len(self.new_refs)
+        while True:
+            if len(self.original_refs) == 0 or len(self.new_refs) == 0:
+                break
+            else:
+                if not 'CNRS' in self.new_refs[0].encode('utf8') \
+                 and not  self.original_refs[0].encode('utf8') == '':
+                    self.original_refs = self.original_refs[1:]
+                    self.new_refs = self.new_refs[1:]
+                else:
+                    break
+
+        self.size = max(len(self.new_refs), len(self.original_refs))
+
+    def original_refs(self):
+        col = self.sheet.col(self.original_col)
+        list = []
+        for cell in col[self.first_row:]:
+            if cell.ctype == 1:
+                list.append(cell.value)
+        return list
+
+    def new_refs(self):
+        col = self.sheet.col(self.new_col)
+        list = []
+        for cell in col[self.first_row:]:
+            if cell.ctype == 1:
+                list.append(cell.value)
+        return list
+
+
+class CremItemFile:
+
+    def __init__(self):
+        self.media = ''
+
+    def set_media(self, media):
+        self.media = media
+
+    def properties(self):
+        self.frames = self.audio_file.get_nframes()
+        self.samplerate = self.audio_file.get_samplerate()
+        self.channels = self.audio_file.get_channels()
+        self.format = self.audio_file.get_file_format()
+        self.encoding = self.audio_file.get_encoding()
+
+
+class CremCheck:
+
+    def __init__(self, root_dir, log_file):
+        self.root_dir = root_dir
+        self.logger = Logger(log_file)
+        dir_list = os.listdir(self.root_dir)
+        list = []
+        for dir in dir_list:
+           if not dir[0] == '.':
+               list.append(dir)
+        self.dir_list = list
+
+    def check_new_refs(self):
+        for name in self.new_refs:
+            return check_name(ITEM_PATTERN, name)
+
+    def check(self):
+        for dir in self.dir_list:
+            collection = CremCollection(self.root_dir + dir, self.logger)
+            msg = '************************ ' + collection.dir_name + ' ******************************'
+            self.logger.write_info(collection.dir, msg[:70])
+
+            xls_list = collection.xls_list()
+            wav_list = collection.wav_list()
+
+            if not check_name(COLLECTION_PATTERN, dir):
+                self.logger.write_error(collection.dir, 'Le dossier de la collection est mal nommé -> SORTIE')
+            elif len(xls_list) == 0:
+                self.logger.write_error(collection.dir, 'PAS de fichier XLS dans le dossier collection -> SORTIE')
+            elif len(xls_list) > 1:
+                self.logger.write_error(collection.dir, 'Plusieurs fichiers XLS dans le dossier collection -> SORTIE')
+
+            else:
+                xls = CremXLS(self.root_dir + os.sep + dir + os.sep + xls_list[0])
+                self.logger.write_info(collection.dir, 'XLS : ' + xls_list[0] + ' - Feuille : ' + xls.sheet.name.encode('utf8'))
+                self.logger.write_info(collection.dir, 'Nombre d\'items détectés : ' + str(xls.size))
+                csv_file = CremCSV(self.root_dir + dir + os.sep + collection.dir_name + '.csv')
+
+                if len(wav_list) != xls.size:
+                    self.logger.write_error(collection.dir, \
+                    'Le nombre de références du fichier XLS (' + str(xls.size) + ') diffère du nombre de fichiers (' + str(len(wav_list)) + ')')
+
+                temp_list = []
+                item_file = CremItemFile()
+
+                for i in range(0,xls.size):
+                    error = False
+
+                    try:
+                        item_old = xls.original_refs[i]
+                        #self.logger.write_error(collection.dir, item_old)
+                    except:
+                        item_old = ''
+                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : l\'ancienne référence d\'item est inexistante'
+                        self.logger.write_error(collection.dir, msg)
+                        error = True
+                        continue
+
+                    try:
+                        item = xls.new_refs[i]
+                        #self.logger.write_error(collection.dir, item)
+                    except:
+                        item = ''
+                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : la nouvelle référence d\'item est inexistante'
+                        self.logger.write_error(collection.dir, msg)
+                        error = True
+                        continue
+
+                    if not item in temp_list:
+                        temp_list.append(item)
+                    else:
+                        msg =  'Ligne ' + str(i+xls.first_row+1) + ' : la référence d\'item ' + item.encode('utf8') + ' est multiple'
+                        self.logger.write_error(collection.dir, msg)
+                        error = True
+
+                    #if not check_name(ITEM_OLD_PATTERN, item_old):
+                        #msg = 'Ligne ' + str(i+xls.first_row+1) + ' : l\'ancienne référence d\'item ' + item_old.encode('utf8') + ' est mal formatée'
+                        #self.logger.write_error(collection.dir, msg)
+
+                    if not check_name(ITEM_NEW_PATTERN, item):
+                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : la nouvelle référence d\'item ' + item.encode('utf8') + ' est mal formatée'
+                        self.logger.write_error(collection.dir, msg)
+                        error = True
+
+                    if not collection.dir_name in item:
+                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : la référence d\'item ' + item.encode('utf8') + ' ne correspond pas à celle de la collection'
+                        self.logger.write_error(collection.dir, msg)
+                        error = True
+
+                    name_wav = item.encode('utf8') + '.wav'
+                    if not name_wav in wav_list:
+                        self.logger.write_error(collection.dir, 'Le fichier ' + item.encode('utf8') + '.wav n\'existe pas')
+                    else:
+                        item_file.set_media(collection.dir + os.sep + name_wav)
+                        #if not item_file.is_wav():
+                        #    self.logger.write_error(collection.dir, 'Le fichier ' + item.encode('utf8') + '.wav n\'est pas valide')
+                        #    error = True
+
+                    if not error:
+                        csv_file.csv.writerow([xls.original_refs[i], xls.new_refs[i]])
+
+                csv_file.close()
+
+                for filename in wav_list:
+                    if not check_name(ITEM_NEW_PATTERN, os.path.splitext(filename)[0]):
+                        self.logger.write_error(collection.dir, 'Le nom du fichier ' + str(os.path.splitext(filename)[0]) + ' est mal formaté')
+
+            msg = '********************************************************************************'
+            self.logger.write_info(collection.dir, msg[:70])
+
+
+def main():
+    log_file = sys.argv[-1]
+    root_dir = sys.argv[-2]
+    log_tmp = log_file+'.tmp'
+
+    c = CremCheck(root_dir, log_tmp)
+    c.check()
+
+    date = datetime.datetime.now().strftime("%x-%X").replace('/','_')
+    shutil.copy(log_tmp,log_file+'-'+date+'.log')
+    shutil.move(log_tmp,log_file)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/scripts/old/process-waveform-cgi.py b/scripts/old/process-waveform-cgi.py
new file mode 100755 (executable)
index 0000000..317878b
--- /dev/null
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2009-2010 Guillaume Pellerin <yomguy@parisson.com>
+
+# This file is part of TimeSide.
+
+# TimeSide is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+
+# TimeSide is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with TimeSide.  If not, see <http://www.gnu.org/licenses/>.
+
+# Author: Guillaume Pellerin <yomguy@parisson.com>
+
+# for python2.5
+
+version = '0.5'
+
+
+import os
+import sys
+import time
+import shutil
+import datetime
+import timeside
+
+# soon with python2.6
+#from multiprocessing import Process
+
+from django.core.management import setup_environ
+from django.core.files.base import ContentFile
+import cgi
+fs = cgi.FieldStorage()
+
+
+orig_media_dir = '/mnt/awdiomusic/musicbase'
+project_dir = '/mnt/awdio'
+log_file = project_dir + '/logs/process.log'
+sys.path.append('/home/awdio/apps/telemeta-awdio')
+
+
+class GrapherScheme:
+
+    def __init__(self):
+        self.color = 255
+        self.color_scheme = {
+            'waveform': [ # Four (R,G,B) tuples for three main color channels for the spectral centroid method
+                        (self.color,self.color,self.color)
+#                        (0, 0, 0), (0, 0, 0), (0, 0, 0), (0,0,0)
+                        ],
+            'spectrogram': [
+                        (0, 0, 0), (58/4,68/4,65/4), (80/2,100/2,153/2), (90,180,100), (224,224,44), (255,60,30), (255,255,255)
+                        ]}
+
+        # Grapher id
+        self.id = 'waveform_awdio'
+
+        # Width of the image
+        self.width = 1800
+
+        # Height of the image
+        self.height = 233
+
+        # Background color
+        self.bg_color = None
+
+        # Force computation. By default, the class doesn't overwrite existing image files.
+        self.force = False
+        
+        # Nb of threads
+        # FIXME: memory leak for > 1 !
+        self.threads = 1
+
+      
+class TelemetaPreprocessImport(object):
+
+    def __init__(self, root_dir, dest_dir, log_file):
+       from telemeta.cache import TelemetaCache as Cache
+       from telemeta.util.logger import Logger
+       self.media_item_dir = 'items'
+        self.root_dir = root_dir + 'items'
+        self.dest_dir = dest_dir
+        self.threads = 1
+        self.logger = Logger(log_file)
+        self.counter = 0
+        self.force = 0
+        self.cache = Cache(self.dest_dir)
+
+        self.scheme = GrapherScheme()
+        self.width = self.scheme.width
+        self.height = self.scheme.height
+        self.bg_color = self.scheme.bg_color
+        self.color_scheme = self.scheme.color_scheme
+        self.force = self.scheme.force
+        self.threads = self.scheme.threads
+        self.logger = Logger(log_file)
+        self.counter = 0
+        self.collection_name = 'awdio'
+        self.collection = self.set_collection(self.collection_name)
+        
+        self.analyzers = timeside.core.processors(timeside.api.IAnalyzer)
+        self.grapher = timeside.grapher.WaveformAwdio(width=self.width, 
+                                                         height=self.height, 
+                                                         bg_color=self.bg_color, 
+                                                         color_scheme=self.color_scheme)
+        
+
+    def set_collection(self, collection_name):
+        import telemeta.models
+        collections = telemeta.models.media.MediaCollection.objects.filter(code=collection_name)
+        if not collections:
+            c = telemeta.models.media.MediaCollection(code=collection_name)
+            c.title = collection_name
+            c.save()
+            msg = 'added'
+            self.logger.logger.info(collection_name, msg)
+            collection = c
+        else:
+            collection = collections[0]
+        return collection
+
+    def process(self):
+       import telemeta.models
+       keys = fs.keys()
+       if keys[0] == 'file':
+           filename = fs['file'].value
+           media_orig = orig_media_dir + os.sep + filename
+           media = self.root_dir + os.sep + filename
+           
+           if not os.path.exists(media):
+               shutil.copy(media_orig, media)
+               os.system('chmod 644 ' + media)
+            
+            name, ext = os.path.splitext(filename)
+            size = str(self.width) + '_' + str(self.height)
+            image_name = name + '.' + self.scheme.id + '.' + size + '.png'
+            image = self.dest_dir + os.sep + image_name
+            xml = name + '.xml'
+            
+            if not self.cache.exists(image_name) or not self.cache.exists(xml):
+                mess = 'Processing ' + media
+                self.logger.logger.info(mess)
+           
+               print "Content-type: text/plain\n"
+               print mess
+               decoder  = timeside.decoder.FileDecoder(media)
+               pipe = decoder | self.grapher
+               analyzers = []
+               analyzers_sub = []
+               for analyzer in self.analyzers:
+                   subpipe = analyzer()
+                   analyzers_sub.append(subpipe)
+                   pipe = pipe | subpipe
+               pipe.run()
+               
+               mess = 'Rendering ' + image
+               self.logger.logger.info(mess)
+               self.grapher.render(output=image)
+               
+               mess = 'Frames / Pixel = ' + str(self.grapher.graph.samples_per_pixel)
+               self.logger.logger.info(mess)
+               
+               for analyzer in analyzers_sub:
+                   value = analyzer.result()
+                   if analyzer.id() == 'duration':
+                       value = datetime.timedelta(0,value)
+                   analyzers.append({'name':analyzer.name(),
+                           'id':analyzer.id(),
+                           'unit':analyzer.unit(),
+                           'value':str(value)})
+               
+               self.cache.write_analyzer_xml(analyzers, xml)
+               
+               item = telemeta.models.media.MediaItem.objects.filter(code=name)
+                           
+               if not item:
+                   item = telemeta.models.media.MediaItem(collection=self.collection, code=name)
+                   item.title = name
+                   item.file = self.media_item_dir + os.sep + filename
+                   item.save()
+                   msg = 'added item : ' + filename
+                   self.logger.logger.info(self.collection_name, msg)
+
+               pipe = 0
+               decoder = 0
+               
+               print "OK"
+               
+               #except:
+                   #pipe = 0
+                   #decoder = 0
+                   #mess = 'Could NOT process : ' + media
+                   #self.logger.logger.error(mess)
+                   #print mess
+                   
+           else:
+               mess = "Nothing to do with file : " + media
+               self.logger.logger.info(mess)
+               print "Content-type: text/plain\n"
+               print mess
+       
+       else:
+           print "Content-type: text/plain\n"
+           print "No file given !"
+       
+
+if __name__ == '__main__':
+    sys.path.append(project_dir)
+    import settings
+    setup_environ(settings)
+    media_dir = settings.MEDIA_ROOT
+    data_dir = settings.TELEMETA_DATA_CACHE_DIR
+    t = TelemetaPreprocessImport(media_dir, data_dir, log_file)
+    t.process()
diff --git a/scripts/old/telemeta-backup.py b/scripts/old/telemeta-backup.py
new file mode 100755 (executable)
index 0000000..0a31499
--- /dev/null
@@ -0,0 +1,94 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2007 Samalyse SARL
+
+# This software is a computer program whose purpose is to backup, analyse,
+# transcode and stream any audio content with its metadata over a web frontend.
+
+# This software is governed by the CeCILL  license under French law and
+# abiding by the rules of distribution of free software.  You can  use,
+# modify and/ or redistribute the software under the terms of the CeCILL
+# license as circulated by CEA, CNRS and INRIA at the following URL
+# "http://www.cecill.info".
+
+# As a counterpart to the access to the source code and  rights to copy,
+# modify and redistribute granted by the license, users are provided only
+# with a limited warranty  and the software's author,  the holder of the
+# economic rights,  and the successive licensors  have only  limited
+# liability.
+
+# In this respect, the user's attention is drawn to the risks associated
+# with loading,  using,  modifying and/or developing or reproducing the
+# software by the user in light of its specific status of free software,
+# that may mean  that it is complicated to manipulate,  and  that  also
+# therefore means  that it is reserved for developers  and  experienced
+# professionals having in-depth computer knowledge. Users are therefore
+# encouraged to load and test the software's suitability as regards their
+# requirements in conditions enabling the security of their systems and/or
+# data to be ensured and,  more generally, to use and operate it in the
+# same conditions as regards security.
+
+# The fact that you are presently reading this means that you have had
+# knowledge of the CeCILL license and that you accept its terms.
+#
+# Author: Olivier Guilyardi <olivier@samalyse.com>
+
+import os
+import sys
+import time
+from django.core.management import setup_environ
+
+def print_usage(toolname):
+    print "Usage: " + toolname + " <project_dir> <backup_dir>"
+    print "  project_dir: the directory of the Django project which hosts Telemeta"
+    print "  backup_dir: the destination backup folder (must exist)"
+
+def write_readme(dest_dir, coll_num):
+    readme = open(dest_dir + "/" + "README", "w")
+    timestr = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
+    readme.write("Telemeta Backup\n\n")
+    readme.write("- date: " + timestr + "\n")
+    readme.write("- number of collections: " + str(coll_num) + "\n\n")
+    readme.close()
+
+def backup(dest_dir):
+    from telemeta.models import MediaCollection
+    from telemeta.backup import CollectionSerializer
+
+    collections = MediaCollection.objects.order_by('id')
+    count = collections.count()
+
+    print "Writing README file..",
+    write_readme(dest_dir, count)
+    print "Done."
+
+    i = 0
+    for collection in collections:
+        if i % 100 == 0:
+            set_dir = dest_dir + ("/collections-%d-%d" % (i+1, i+100))
+            os.mkdir(set_dir)
+        i += 1
+        print "Processing collection %d/%d (%d%%) with id: %s.. " \
+            % (i, count, i*100/count, collection.id),
+        sys.stdout.flush()
+        serializer = CollectionSerializer(collection)
+        serializer.backup(set_dir)
+        print "Done"
+
+def run():
+    if len(sys.argv) != 3:
+        print_usage(os.path.basename(sys.argv[0]))
+        sys.exit(1)
+    else:
+        project_dir = sys.argv[1]
+        backup_dir = sys.argv[2]
+        sys.path.append(project_dir)
+        import settings
+        setup_environ(settings)
+        backup(backup_dir)
+
+if __name__ == '__main__':
+    run()
diff --git a/scripts/old/telemeta-crem-import-alt_ids.py b/scripts/old/telemeta-crem-import-alt_ids.py
new file mode 100755 (executable)
index 0000000..84c673d
--- /dev/null
@@ -0,0 +1,97 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2011 Guillaume Pellerin
+# All rights reserved.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
+#
+# Author: Guillaume Pellerin <yomguy@parisson.com>
+#
+
+import os
+import sys
+import xlrd
+import logging
+import datetime
+from django.core.management import setup_environ
+from django.core.files.base import ContentFile
+
+class Logger:
+
+    def __init__(self, file):
+        self.logger = logging.getLogger('myapp')
+        self.hdlr = logging.FileHandler(file)
+        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+        self.hdlr.setFormatter(self.formatter)
+        self.logger.addHandler(self.hdlr)
+        self.logger.setLevel(logging.INFO)
+
+    def write_info(self, prefix, message):
+        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
+
+    def write_error(self, prefix, message):
+        self.logger.error(prefix + ' : ' + message.decode('utf8'))
+
+
+class TelemetaAltIdsImport:
+
+    def __init__(self, xls_file, log_file):
+        self.logger = Logger(log_file)
+        self.xls = xls_file
+        self.row = 0
+
+    def alt_ids_import(self):
+        from telemeta.models import MediaCollection
+        self.book = xlrd.open_workbook(self.xls)
+        self.sheet = self.book.sheet_by_index(0)
+        self.length = len(self.sheet.col(0))-1
+        
+        while True:
+            ids = []
+            self.row += 1
+            row = self.sheet.row(self.row)
+            if self.row == self.length:
+                break
+            collection_id = row[0].value
+            cell_alt_id = row[1]
+            if cell_alt_id.ctype == 1:
+                for i in range(1,len(row)):
+                    cell_alt_id = row[i]
+                    if cell_alt_id.ctype == 1:
+                        ids.append(cell_alt_id.value)
+                alt_ids = ' '.join(ids)
+                try:
+                    collection = MediaCollection.objects.get(old_code=collection_id)
+                    collection.alt_ids = alt_ids
+                    collection.save()
+                    print self.row, collection_id, alt_ids
+                except:
+                    msg = 'No collection found for this id'
+                    self.logger.write_error(collection_id, msg)
+                    continue
+            
+                
+def print_usage(tool_name):
+    print "Usage: "+tool_name+" <project_dir> <xls_file> <log_file>"
+    print "  project_dir: the directory of the Django project which hosts Telemeta"
+    print "  xls_file: the excel file containing all collection alt_ids"
+
+def run():
+    if len(sys.argv) < 3:
+        print_usage(os.path.basename(sys.argv[0]))
+        sys.exit(1)
+    else:
+        project_dir = sys.argv[-3]
+        xls_file = sys.argv[-2]
+        log_file = sys.argv[-1]
+        sys.path.append(project_dir)
+        import settings
+        setup_environ(settings)
+        t = TelemetaAltIdsImport(xls_file, log_file)
+        t.alt_ids_import()
+
+if __name__ == '__main__':
+    run()
diff --git a/scripts/old/telemeta-crem-import-test.py b/scripts/old/telemeta-crem-import-test.py
new file mode 100755 (executable)
index 0000000..021e9a2
--- /dev/null
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2010 Guillaume Pellerin
+# All rights reserved.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
+#
+# Author: Guillaume Pellerin <yomguy@parisson.com>
+#
+
+import os
+import sys
+import csv
+import logging
+import datetime
+from django.core.management import setup_environ
+from django.core.files.base import ContentFile
+
+
+class Logger:
+
+    def __init__(self, file):
+        self.logger = logging.getLogger('myapp')
+        self.hdlr = logging.FileHandler(file)
+        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+        self.hdlr.setFormatter(self.formatter)
+        self.logger.addHandler(self.hdlr)
+        self.logger.setLevel(logging.INFO)
+
+    def info(self, prefix, message):
+        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
+
+    def error(self, prefix, message):
+        self.logger.error(prefix + ' : ' + message.decode('utf8'))
+
+
+class TelemetaWavImport:
+
+    def __init__(self, source_dir, log_file, pattern, domain):
+        from django.contrib.auth.models import User
+        self.logger = Logger(log_file)
+        self.source_dir = source_dir
+        self.collections = os.listdir(self.source_dir)
+        self.pattern = pattern
+        self.user = User.objects.filter(username='admin')[0]
+        self.domain = domain
+
+    def write_file(self, item, wav_file, overwrite=False):
+        filename = wav_file.split(os.sep)[-1]
+        if os.path.exists(wav_file):
+            if not item.file or overwrite:
+#                f = open(wav_file, 'r')
+#                file_content = ContentFile(f.read())
+#                item.file.save(filename, file_content)
+#                f.close()
+                item.save()
+                item.set_revision(self.user)
+            else:
+                msg = item.code + ' : fichier ' + item.file.name + ' deja inscrit dans la base de donnees !'
+                self.logger.error('item', msg)
+        else:
+            msg = item.code + ' : fichier audio ' + filename + ' inexistant dans le dossier !'
+            self.logger.error('item', msg)
+            
+    def wav_import(self):
+        from telemeta.models import MediaItem,  MediaCollection
+        
+        collections = []
+        for collection in self.collections:
+            collection_dir = self.source_dir + os.sep + collection
+            collection_files = os.listdir(collection_dir)
+            
+            
+            if not '/.' in collection_dir and self.pattern in collection_dir:
+                collection_name = collection.split(os.sep)[-1]
+                collections.append(collection_name)
+                c = MediaCollection.objects.filter(code=collection_name)
+                
+                if not c and collection + '.csv' in collection_files:
+                    msg = collection + ' collection NON présente dans la base de données, SORTIE '
+                    self.logger.error(collection, msg)
+                    sys.exit(msg)
+                elif not c:
+                    msg = 'collection NON présente dans la base de données, CREATION '
+                    self.logger.info(collection, msg)
+                    c = MediaCollection(code=collection_name)
+                    c.save()
+                    c.set_revision(self.user)
+                else:
+                    msg = 'collection présente dans la base de données, SELECTION'
+                    self.logger.info(collection, msg)
+                    
+        for collection in collections:
+            collection_dir = self.source_dir + os.sep + collection
+            collection_name = collection
+            collection_files = os.listdir(collection_dir)
+            msg = '************************ ' + collection + ' ******************************'
+            self.logger.info(collection, msg[:70])
+            overwrite = True
+            csv_file = ''
+            rows = {}
+            
+            if collection + '.csv' in collection_files:
+                csv_file = self.source_dir + os.sep + collection + os.sep + collection + '.csv'
+                csv_data = csv.reader(open(csv_file), delimiter=';')
+                for row in csv_data:
+                    rows[row[1].strip()] = row[0].strip()
+                msg = collection + ' import du fichier CSV de la collection'
+                self.logger.info(collection, msg[:70])
+            else:
+                msg = collection + ' pas de fichier CSV dans la collection'
+                self.logger.info(collection, msg[:70])
+            
+            c = MediaCollection.objects.filter(code=collection_name)
+            if not c:
+                c = MediaCollection(code=collection_name)
+                c.save()
+                msg = ' collection NON présente dans la BDD, CREATION '
+                self.logger.info(c.code, msg)
+            else:
+                c = c[0]
+                msg = ' id = '+str(c.id)
+                self.logger.info(c.code, msg)
+            
+            audio_files = []
+            for file in collection_files:
+                ext = ['WAV', 'wav']
+                if file.split('.')[-1] in ext:
+                    audio_files.append(file)
+            
+            audio_files.sort()
+            nb_items = c.items.count()
+            counter = 0
+            
+            for file in audio_files:
+                code = file.split('.')[0]
+                wav_file = self.source_dir + os.sep + collection + os.sep + file
+                
+                if len(audio_files) <= nb_items:
+                    items = MediaItem.objects.filter(code=code)
+                    
+                    old_ref = ''
+                    if code in rows and not items:
+                        old_ref = rows[code]
+                        items = MediaItem.objects.filter(old_code=old_ref)
+                        
+                    if items:
+                        item = items[0]
+                        msg = code + ' : ' + item.old_code + ' : Cas 1 ou 2 : id = ' + str(item.id)
+                        self.logger.info('item', msg)
+                        item.code = code
+                        item.save()
+                    else:
+                        item = MediaItem(code=code, collection=c)
+                        msg = code + ' : ' + old_ref + ' : Cas 1 ou 2 : item NON présent dans la base de données, CREATION'
+                        self.logger.info('item', msg)
+                    
+                    self.write_file(item, wav_file, overwrite)
+                    
+                elif nb_items == 1 and len(audio_files) > 1:
+                    if counter == 0:
+                        msg = code + ' : Cas 3a : item n°01 présent dans la base de données, PASSE'
+                        self.logger.info('item', msg)
+                    else:
+                        item = MediaItem(code=code, collection=c)
+                        msg = code + ' : Cas 3a : item NON présent dans la base de données, CREATION'
+                        self.logger.info('item', msg)
+                        self.write_file(item, wav_file, overwrite)
+                
+                elif nb_items > 1 and nb_items < len(audio_files):
+                    msg = code + ' : Cas 3b : nb items < nb de fichiers audio, PAS de creation'
+                    self.logger.info('item', msg)
+
+                counter += 1
+        
+        msg = 'Liste des URLs des collections importées :'
+        self.logger.info('INFO', msg)
+        for collection in collections:
+            msg = 'http://'+self.domain+'/collections/'+collection
+            self.logger.info(collection, msg)
+            
+        
+def print_usage(tool_name):
+    print "Usage: "+tool_name+" <project_dir> <source_dir> <pattern> <log_file> <domain>"
+    print "  project_dir: the directory of the Django project which hosts Telemeta"
+    print "  source_dir: the directory containing the wav files to include"
+    print "  pattern: a pattern to match the collection names"
+    print "  log_file: a log file to write logs"
+    print "  domain: root domain for collections"
+
+def run():
+    if len(sys.argv) < 3:
+        print_usage(os.path.basename(sys.argv[0]))
+        sys.exit(1)
+    else:
+        project_dir = sys.argv[-5]
+        source_dir = sys.argv[-4]
+        pattern = sys.argv[-3]
+        log_file = sys.argv[-2]
+        url = sys.argv[-1]
+        sys.path.append(project_dir)
+        import settings
+        setup_environ(settings)
+        t = TelemetaWavImport(source_dir, log_file, pattern, url)
+        t.wav_import()
+
+if __name__ == '__main__':
+    run()
diff --git a/scripts/old/telemeta-crem-import.py b/scripts/old/telemeta-crem-import.py
new file mode 100755 (executable)
index 0000000..dcdf5c1
--- /dev/null
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2010 Guillaume Pellerin
+# All rights reserved.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
+#
+# Author: Guillaume Pellerin <yomguy@parisson.com>
+#
+
+import os
+import sys
+import csv
+import logging
+import datetime
+from django.core.management import setup_environ
+from django.core.files.base import ContentFile
+
+
+class Logger:
+
+    def __init__(self, file):
+        self.logger = logging.getLogger('myapp')
+        self.hdlr = logging.FileHandler(file)
+        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+        self.hdlr.setFormatter(self.formatter)
+        self.logger.addHandler(self.hdlr)
+        self.logger.setLevel(logging.INFO)
+
+    def info(self, prefix, message):
+        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
+
+    def error(self, prefix, message):
+        self.logger.error(prefix + ' : ' + message.decode('utf8'))
+
+
+class TelemetaWavImport:
+
+    def __init__(self, source_dir, log_file, pattern, domain):
+        from django.contrib.auth.models import User
+        self.logger = Logger(log_file)
+        self.source_dir = source_dir
+        self.collections = os.listdir(self.source_dir)
+        self.pattern = pattern
+        self.user = User.objects.filter(username='admin')[0]
+        self.domain = domain
+
+    def write_file(self, item, wav_file, overwrite=False):
+        filename = wav_file.split(os.sep)[-1]
+        if os.path.exists(wav_file):
+            if not item.file or overwrite:
+                f = open(wav_file, 'r')
+                file_content = ContentFile(f.read())
+                item.file.save(filename, file_content)
+                f.close()
+                item.save()
+                item.set_revision(self.user)
+            else:
+                msg = item.code + ' : fichier ' + item.file.name + ' deja inscrit dans la base de donnees !'
+                self.logger.error('item', msg)
+        else:
+            msg = item.code + ' : fichier audio ' + filename + ' inexistant dans le dossier !'
+            self.logger.error('item', msg)
+
+    def wav_import(self):
+        from telemeta.models import MediaItem,  MediaCollection
+
+        collections = []
+        for collection in self.collections:
+            collection_dir = self.source_dir + os.sep + collection
+            collection_files = os.listdir(collection_dir)
+
+
+            if not '/.' in collection_dir and self.pattern in collection_dir:
+                collection_name = collection.split(os.sep)[-1]
+                collections.append(collection_name)
+                c = MediaCollection.objects.filter(code=collection_name)
+
+                if not c and collection + '.csv' in collection_files:
+                    msg = collection + ' collection NON présente dans la base de données, SORTIE '
+                    self.logger.error(collection, msg)
+                    sys.exit(msg)
+                elif not c:
+                    msg = 'collection NON présente dans la base de données, CREATION '
+                    self.logger.info(collection, msg)
+                    c = MediaCollection(code=collection_name, title=collection_name)
+                    c.save()
+                    c.set_revision(self.user)
+                else:
+                    msg = 'collection présente dans la base de données, SELECTION'
+                    self.logger.info(collection, msg)
+
+        for collection in collections:
+            collection_dir = self.source_dir + os.sep + collection
+            collection_name = collection
+            collection_files = os.listdir(collection_dir)
+            msg = '************************ ' + collection + ' ******************************'
+            self.logger.info(collection, msg[:70])
+            overwrite = True
+            csv_file = ''
+            rows = {}
+
+            if collection + '.csv' in collection_files:
+                csv_file = self.source_dir + os.sep + collection + os.sep + collection + '.csv'
+                csv_data = csv.reader(open(csv_file), delimiter=';')
+                for row in csv_data:
+                    rows[row[1].strip()] = row[0].strip()
+                msg = collection + ' import du fichier CSV de la collection'
+                self.logger.info(collection, msg[:70])
+            else:
+                msg = collection + ' pas de fichier CSV dans la collection'
+                self.logger.info(collection, msg[:70])
+
+            c = MediaCollection.objects.filter(code=collection_name)
+            if not c:
+                c = MediaCollection(code=collection_name)
+                c.save()
+                msg = ' collection NON présente dans la BDD, CREATION '
+                self.logger.info(c.code, msg)
+            else:
+                c = c[0]
+                msg = ' id = '+str(c.id)
+                self.logger.info(c.code, msg)
+
+            audio_files = []
+            for file in collection_files:
+                ext = ['WAV', 'wav']
+                if file.split('.')[-1] in ext and file[0] != '.':
+                    audio_files.append(file)
+
+            audio_files.sort()
+            nb_items = c.items.count()
+            counter = 0
+
+            for file in audio_files:
+                code = file.split('.')[0]
+                wav_file = self.source_dir + os.sep + collection + os.sep + file
+
+                if len(audio_files) <= nb_items:
+                    items = MediaItem.objects.filter(code=code)
+
+                    old_ref = ''
+                    if code in rows and not items:
+                        old_ref = rows[code]
+                        items = MediaItem.objects.filter(old_code=old_ref)
+
+                    if items:
+                        item = items[0]
+                        msg = code + ' : ' + item.old_code + ' : Cas 1 ou 2 : id = ' + str(item.id)
+                        self.logger.info('item', msg)
+                        item.code = code
+                        item.save()
+                    else:
+                        item = MediaItem(code=code, collection=c)
+                        msg = code + ' : ' + old_ref + ' : Cas 1 ou 2 : item NON présent dans la base de données, CREATION'
+                        self.logger.info('item', msg)
+
+                    self.write_file(item, wav_file, overwrite)
+
+                elif nb_items == 1 and len(audio_files) > 1:
+                    if counter == 0:
+                        msg = code + ' : Cas 3a : item n°01 présent dans la base de données, PASSE'
+                        self.logger.info('item', msg)
+                    else:
+                        item = MediaItem(code=code, collection=c)
+                        msg = code + ' : Cas 3a : item NON présent dans la base de données, CREATION'
+                        self.logger.info('item', msg)
+                        self.write_file(item, wav_file, overwrite)
+
+                elif nb_items > 1 and nb_items < len(audio_files):
+                    msg = code + ' : Cas 3b : nb items < nb de fichiers audio, PAS de creation'
+                    self.logger.info('item', msg)
+
+                counter += 1
+
+        msg = 'Liste des URLs des collections importées :'
+        self.logger.info('INFO', msg)
+        for collection in collections:
+            msg = 'http://'+self.domain+'/archives/collections/'+collection
+            self.logger.info(collection, msg)
+
+
+def print_usage(tool_name):
+    print "Usage: "+tool_name+" <project_dir> <source_dir> <pattern> <log_file> <domain>"
+    print "  project_dir: the directory of the Django project which hosts Telemeta"
+    print "  source_dir: the directory containing the wav files to include"
+    print "  pattern: a pattern to match the collection names"
+    print "  log_file: a log file to write logs"
+    print "  domain: root domain for collections"
+
+def run():
+    if len(sys.argv) < 3:
+        print_usage(os.path.basename(sys.argv[0]))
+        sys.exit(1)
+    else:
+        project_dir = sys.argv[-5]
+        source_dir = sys.argv[-4]
+        pattern = sys.argv[-3]
+        log_file = sys.argv[-2]
+        url = sys.argv[-1]
+        sys.path.append(project_dir)
+        import settings
+        setup_environ(settings)
+        t = TelemetaWavImport(source_dir, log_file, pattern, url)
+        t.wav_import()
+
+if __name__ == '__main__':
+    run()
diff --git a/scripts/old/telemeta-media-link.py b/scripts/old/telemeta-media-link.py
new file mode 100755 (executable)
index 0000000..118fe95
--- /dev/null
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2010 Guillaume Pellerin
+# All rights reserved.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
+#
+# Author: Guillaume Pellerin <yomguy@parisson.com>
+#
+
+import os
+import re
+import sys
+import logging
+import datetime
+import timeside
+from django.core.management import setup_environ
+from django.core.files.base import ContentFile
+
+mapping = {
+             'title': 'title',
+             'album': 'collection',
+             'date': 'recorded_from_date',
+             'artist': 'author',
+             'track-number': 'track',
+             'encoder': 'comment',
+             'genre': 'generic_style',
+             'audio-codec': 'comment',
+             'container-format': 'comment',
+             }
+
+class Logger:
+
+    def __init__(self, file):
+        self.logger = logging.getLogger('myapp')
+        self.hdlr = logging.FileHandler(file)
+        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+        self.hdlr.setFormatter(self.formatter)
+        self.logger.addHandler(self.hdlr)
+        self.logger.setLevel(logging.INFO)
+
+    def write_info(self, prefix, message):
+        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
+
+    def write_error(self, prefix, message):
+        self.logger.error(prefix + ' : ' + message.decode('utf8'))
+
+
+class TelemetaMediaImport:
+
+    def __init__(self, media_dir, log_file):
+        self.logger = Logger(log_file)
+        self.media_dir = media_dir
+        self.medias = self.get_medias()
+    
+    def get_medias(self):
+        os.chdir(self.media_dir)
+        file_list = []
+        for root, dirs, files in os.walk('.'):
+            for file in files:
+                path = root + os.sep + file
+                if not os.sep+'.' in path:
+                    file_list.append({'root': root, 'file': file})
+        return file_list
+        
+    def set_collection(self, collection_name):
+        if not collection_name:
+            collection_name = 'Unkown'
+        code = collection_name.replace(' ','_')
+        code = re.sub(r'\W+', '_', code)
+        from telemeta.models.media import MediaCollection
+        collections = MediaCollection.objects.filter(code=code)
+        if not collections:
+            collection = MediaCollection(code=code,title=collection_name)
+            collection.save()
+            msg = 'created'
+            self.logger.write_info('collection ' + collection_name, msg)
+        else:
+            collection = collections[0]
+        return collection
+        
+    def media_import(self):
+        from telemeta.models.media import MediaItem
+        for media in self.medias:
+            path = media['root'] + os.sep + media['file']
+            print 'checking ' + path
+            filename,  ext = os.path.splitext(media['file'])
+            item = MediaItem.objects.filter(code=filename)
+            if not item:
+                print 'importing ' + path
+                decoder = timeside.decoder.FileDecoder(path)
+                try:
+                    metadata = decoder.metadata()
+                    print metadata
+                    collection = self.set_collection(metadata['album'])
+                    item = MediaItem(collection=collection)
+                    item.code = re.sub(r'\W+', '_', metadata['title'])
+                    for tag in mapping.keys():
+                        try:
+                            if tag == 'date':
+                                date = metadata[tag].split(' ')[1].split('/')
+                                metadata[tag] = date[2]+'-'+date[1]+'-'+date[0]    
+                            if mapping[tag] == 'comment':
+                                item[mapping[tag]] = item[mapping[tag]] + '\n' + metadata[tag]
+                            else:
+                                item[mapping[tag]] = metadata[tag]
+                        except:
+                            continue
+                    item.file = path
+                    item.save()
+                    msg = 'added item : ' + path
+                    self.logger.write_info(collection.code, msg)
+                except:
+                    continue
+                
+
+def run():
+    project_dir = sys.argv[-2]
+    log_file = sys.argv[-1]
+    sys.path.append(project_dir)
+    import settings
+    setup_environ(settings)
+    media_dir = settings.MEDIA_ROOT
+    t = TelemetaMediaImport(media_dir, log_file)
+    t.media_import()
+
+if __name__ == '__main__':
+    run()
diff --git a/scripts/sql/backup_db.sh b/scripts/sql/backup_db.sh
new file mode 100755 (executable)
index 0000000..b28c0f4
--- /dev/null
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+NOW=$(date +"%T-%m-%d-%Y")
+mysqldump -hdb -uroot -pmysecretpassword telemeta | gzip > /srv/backup/telemeta-$NOW.sql.gz
diff --git a/scripts/sql/convert_myisam_to_innodb.sql b/scripts/sql/convert_myisam_to_innodb.sql
new file mode 100644 (file)
index 0000000..91e36d5
--- /dev/null
@@ -0,0 +1,83 @@
+ALTER TABLE `vernacular_styles` ENGINE=InnoDB;
+ALTER TABLE `users` ENGINE=InnoDB;
+ALTER TABLE `thumbnail_kvstore` ENGINE=InnoDB;
+ALTER TABLE `telemeta_media_transcoded` ENGINE=InnoDB;
+ALTER TABLE `tape_width` ENGINE=InnoDB;
+ALTER TABLE `tape_wheel_diameter` ENGINE=InnoDB;
+ALTER TABLE `tape_vendor` ENGINE=InnoDB;
+ALTER TABLE `tape_speed` ENGINE=InnoDB;
+ALTER TABLE `tape_length` ENGINE=InnoDB;
+ALTER TABLE `south_migrationhistory` ENGINE=InnoDB;
+ALTER TABLE `search_criteria` ENGINE=InnoDB;
+ALTER TABLE `searches_criteria` ENGINE=InnoDB;
+ALTER TABLE `searches` ENGINE=InnoDB;
+ALTER TABLE `rights` ENGINE=InnoDB;
+ALTER TABLE `revisions` ENGINE=InnoDB;
+ALTER TABLE `recording_contexts` ENGINE=InnoDB;
+ALTER TABLE `publishing_status` ENGINE=InnoDB;
+ALTER TABLE `publisher_collections` ENGINE=InnoDB;
+ALTER TABLE `publishers` ENGINE=InnoDB;
+ALTER TABLE `profiles` ENGINE=InnoDB;
+ALTER TABLE `playlist_resources` ENGINE=InnoDB;
+ALTER TABLE `playlists` ENGINE=InnoDB;
+ALTER TABLE `physical_formats` ENGINE=InnoDB;
+ALTER TABLE `original_format` ENGINE=InnoDB;
+ALTER TABLE `original_channel_number` ENGINE=InnoDB;
+ALTER TABLE `organization` ENGINE=InnoDB;
+ALTER TABLE `metadata_writers` ENGINE=InnoDB;
+ALTER TABLE `metadata_authors` ENGINE=InnoDB;
+ALTER TABLE `media_type` ENGINE=InnoDB;
+ALTER TABLE `media_transcoding` ENGINE=InnoDB;
+ALTER TABLE `media_status` ENGINE=InnoDB;
+ALTER TABLE `media_parts` ENGINE=InnoDB;
+ALTER TABLE `media_markers` ENGINE=InnoDB;
+ALTER TABLE `media_item_related` ENGINE=InnoDB;
+ALTER TABLE `media_item_performances` ENGINE=InnoDB;
+ALTER TABLE `media_item_keywords` ENGINE=InnoDB;
+ALTER TABLE `media_item_identifier` ENGINE=InnoDB;
+ALTER TABLE `media_items` ENGINE=InnoDB;
+ALTER TABLE `media_formats` ENGINE=InnoDB;
+ALTER TABLE `media_fonds_related` ENGINE=InnoDB;
+ALTER TABLE `media_fonds_children` ENGINE=InnoDB;
+ALTER TABLE `media_fonds` ENGINE=InnoDB;
+ALTER TABLE `media_corpus_related` ENGINE=InnoDB;
+ALTER TABLE `media_corpus_children` ENGINE=InnoDB;
+ALTER TABLE `media_corpus` ENGINE=InnoDB;
+ALTER TABLE `media_collection_related` ENGINE=InnoDB;
+ALTER TABLE `media_collection_identifier` ENGINE=InnoDB;
+ALTER TABLE `media_collections` ENGINE=InnoDB;
+ALTER TABLE `media_analysis` ENGINE=InnoDB;
+ALTER TABLE `location_types` ENGINE=InnoDB;
+ALTER TABLE `location_relations` ENGINE=InnoDB;
+ALTER TABLE `location_aliases` ENGINE=InnoDB;
+ALTER TABLE `locations` ENGINE=InnoDB;
+ALTER TABLE `legal_rights` ENGINE=InnoDB;
+ALTER TABLE `languages` ENGINE=InnoDB;
+ALTER TABLE `jqchat_room` ENGINE=InnoDB;
+ALTER TABLE `jqchat_message` ENGINE=InnoDB;
+ALTER TABLE `ipauth_range` ENGINE=InnoDB;
+ALTER TABLE `instrument_relations` ENGINE=InnoDB;
+ALTER TABLE `instrument_alias_relations` ENGINE=InnoDB;
+ALTER TABLE `instrument_aliases` ENGINE=InnoDB;
+ALTER TABLE `instruments` ENGINE=InnoDB;
+ALTER TABLE `identifier_type` ENGINE=InnoDB;
+ALTER TABLE `googletools_siteverificationcode` ENGINE=InnoDB;
+ALTER TABLE `googletools_analyticscode` ENGINE=InnoDB;
+ALTER TABLE `generic_styles` ENGINE=InnoDB;
+ALTER TABLE `ethnic_group_aliases` ENGINE=InnoDB;
+ALTER TABLE `ethnic_groups` ENGINE=InnoDB;
+ALTER TABLE `django_site` ENGINE=InnoDB;
+ALTER TABLE `django_session` ENGINE=InnoDB;
+ALTER TABLE `django_content_type` ENGINE=InnoDB;
+ALTER TABLE `django_admin_log` ENGINE=InnoDB;
+ALTER TABLE `copy_type` ENGINE=InnoDB;
+ALTER TABLE `context_keywords` ENGINE=InnoDB;
+ALTER TABLE `auth_user_user_permissions` ENGINE=InnoDB;
+ALTER TABLE `auth_user_groups` ENGINE=InnoDB;
+ALTER TABLE `auth_user` ENGINE=InnoDB;
+ALTER TABLE `auth_permission` ENGINE=InnoDB;
+ALTER TABLE `auth_message` ENGINE=InnoDB;
+ALTER TABLE `auth_group_permissions` ENGINE=InnoDB;
+ALTER TABLE `auth_group` ENGINE=InnoDB;
+ALTER TABLE `ad_conversions` ENGINE=InnoDB;
+ALTER TABLE `acquisition_modes` ENGINE=InnoDB;
diff --git a/scripts/sql/import_sql.sh b/scripts/sql/import_sql.sh
new file mode 100755 (executable)
index 0000000..10c0ca4
--- /dev/null
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+if [[ $# -ne 4 ]]; then
+       echo "Usage: $0 <username> <password> <database> </path/to/sql_file.sql.gz>"
+       exit 1
+fi
+
+echo "=> Starting MySQL Server"
+/usr/bin/mysqld_safe > /dev/null 2>&1 &
+PID=$!
+
+RET=1
+while [[ RET -ne 0 ]]; do
+    echo "=> Waiting for confirmation of MySQL service startup"
+    sleep 5
+    mysql -u"$1" -p"$2" -e "status" > /dev/null 2>&1
+RET=$?
+done
+
+echo "   Started with PID ${PID}"
+
+echo "=> Importing SQL file"
+gunzip -c "$4" | mysql -u"$1" -p"$2" "$3"
+
+echo "=> Stopping MySQL Server"
+mysqladmin -u"$1" -p"$2" shutdown
+
+echo "=> Done!"
diff --git a/scripts/sql/restore_db.sh b/scripts/sql/restore_db.sh
new file mode 100755 (executable)
index 0000000..8666bf8
--- /dev/null
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+file=$1
+
+if [[ $file == *".gz" ]]; then
+    echo 'ok'
+    gunzip < /srv/backup/$file | mysql -hdb -uroot -pmysecretpassword telemeta
+else
+    mysql -hdb -uroot -pmysecretpassword telemeta < /srv/backup/$file
+fi
diff --git a/scripts/transcode/create_thumbs.py b/scripts/transcode/create_thumbs.py
new file mode 100755 (executable)
index 0000000..dc3fd20
--- /dev/null
@@ -0,0 +1,42 @@
+#!/usr/bin/python
+
+import os, sys, string
+import logging
+
+class Logger:
+    """A logging object"""
+
+    def __init__(self, file):
+        self.logger = logging.getLogger('myapp')
+        self.hdlr = logging.FileHandler(file)
+        self.formatter = logging.Formatter('%(message)s')
+        self.hdlr.setFormatter(self.formatter)
+        self.logger.addHandler(self.hdlr)
+        self.logger.setLevel(logging.INFO)
+
+log_file = 'thumbs.log'
+logger = Logger(log_file)
+root_dir = sys.argv[-1]
+args = sys.argv[1:-1]
+source_format = 'webm'
+done = []
+preview_tc = '00:00:05'
+
+if os.path.exists(log_file):
+    f = open(log_file, 'r')
+    for line in f.readlines():
+        done.append(line[:-1])
+    f.close()
+
+for root, dirs, files in os.walk(root_dir):
+    for file in files:
+        path = os.path.abspath(root + os.sep + file)
+        name, ext = os.path.splitext(file)
+        if ext[1:] == source_format:
+            dest = os.path.abspath(root + os.sep + name + '.png')
+            if not dest in done or '--force' in args:
+                command = 'ffmpeg -ss '+ preview_tc + ' -i ' + path + '  -y ' + dest
+                os.system(command)
+                logger.logger.info(dest)
+
+print "DONE!"
diff --git a/scripts/transcode/remux_fix_media.py b/scripts/transcode/remux_fix_media.py
new file mode 100755 (executable)
index 0000000..39cfd9f
--- /dev/null
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+
+import os, sys, psutil
+import datetime
+from ebml.utils.ebml_data import *
+
+class FixCheckMedia(object):
+
+    def __init__(self, dir, tmp_dir):
+        self.dir = dir
+        self.tmp_dir = tmp_dir
+        if not os.path.exists(self.tmp_dir):
+            os.makedirs(self.tmp_dir)
+
+    def process(self):
+        webm_fixed_log = 'webm.fixed'
+        webm_tofix_log = 'webm.tofix'
+        mp3_fixed_log = 'mp3.fixed'
+        mp3_tofix_log = 'mp3.tofix'
+
+        for root, dirs, files in os.walk(self.dir):
+            for filename in files:
+                source = root + os.sep + filename
+                name = os.path.splitext(filename)[0]
+                ext = os.path.splitext(filename)[1][1:]
+
+                if ext == 'webm' and os.path.getsize(source):
+                    dir_files = os.listdir(root)
+
+                    if not webm_fixed_log in dir_files:
+                        print source
+                        self.fix_webm(source)
+                        f = open(root + os.sep + webm_fixed_log, 'w')
+                        f.close()
+                        if os.path.exists(root + os.sep + webm_tofix_log):
+                            os.remove(root + os.sep + webm_tofix_log)
+
+                    if mp3_tofix_log in dir_files or not mp3_fixed_log in dir_files:
+                        for file in dir_files:
+                            dest_ext = os.path.splitext(file)[1][1:]
+                            if dest_ext == 'mp3':
+                                dest = root + os.sep + file
+                                print dest
+                                self.fix_mp3(source, dest)
+                                f = open(root + os.sep + mp3_fixed_log, 'w')
+                                f.close()
+                                if os.path.exists(root + os.sep + mp3_tofix_log):
+                                    os.remove(root + os.sep + mp3_tofix_log)
+                                #break
+
+
+    def hard_fix_webm(self, path):
+        try:
+            tmp_file = self.tmp_dir + 'out.webm '
+            command = 'ffmpeg -loglevel 0 -i "'+ path + '" -vcodec libvpx -vb 500k -acodec libvorbis -aq 7 -f webm -y "' + tmp_file + '" > /dev/null'
+            print command
+            os.system(command)
+            command = 'mv '  + tmp_file + path
+            os.system(command)
+        except:
+            pass
+
+
+    def fix_webm(self, path):
+        try:
+            tmp_file = self.tmp_dir + 'out.webm'
+            command = '/usr/local/bin/ffmpeg -loglevel 0 -i "' + path + '" -vcodec copy -acodec copy -f webm -y "' + tmp_file + '" > /dev/null'
+            print command
+            os.system(command)
+            ebml_obj = EBMLData(tmp_file)
+            offset = ebml_obj.get_first_cluster_seconds()
+            command = '/usr/local/bin/ffmpeg -loglevel 0 -ss ' + str(offset) + ' -i "' + tmp_file + '" -vcodec copy -acodec copy -f webm -y "' + path + '" > /dev/null'
+            print command
+            os.system(command)
+        except:
+            pass
+
+    def fix_mp3(self, source, path):
+        try:
+            command = '/usr/local/bin/ffmpeg -loglevel 0 -i "'+ source + '" -vn -aq 6 -y "' + path + '" > /dev/null'
+            print command
+            os.system(command)
+        except:
+            pass
+
+def get_pids(name, args=None):
+    """Get a process pid filtered by arguments and uid"""
+    pids = []
+    for proc in psutil.process_iter():
+        if proc.cmdline:
+            if name == proc.name:
+                if args:
+                    if args in proc.cmdline:
+                        pids.append(proc.pid)
+                else:
+                    pids.append(proc.pid)
+    return pids
+
+dir = sys.argv[-2]
+tmp_dir = sys.argv[-1]
+
+path =  os.path.abspath(__file__)
+pids = get_pids('python2.6',args=path)
+
+print datetime.datetime.now()
+if len(pids) <= 1:
+    print 'starting process...'
+    f = FixCheckMedia(dir, tmp_dir)
+    f.process()
+    print 'process finished.\n'
+else:
+    print 'already started !\n'
+
diff --git a/scripts/transcode/transcode.py b/scripts/transcode/transcode.py
new file mode 100755 (executable)
index 0000000..efaa113
--- /dev/null
@@ -0,0 +1,76 @@
+#!/usr/bin/python
+
+import os, sys, string
+import logging
+
+
+class Logger:
+    """A logging object"""
+
+    def __init__(self, file):
+        self.logger = logging.getLogger('myapp')
+        self.hdlr = logging.FileHandler(file)
+        self.formatter = logging.Formatter('%(asctime)s %(message)s')
+        self.hdlr.setFormatter(self.formatter)
+        self.logger.addHandler(self.hdlr)
+        self.logger.setLevel(logging.INFO)
+
+
+class TelemetaTranscode(object):
+    """docstring for TelemetaTranscode"""
+
+    threads = 4
+    source_formats = ['webm', 'mp4']
+    dest_formats = {
+                   'mp3' : '-vn -acodec libmp3lame -aq 6',
+                   'ogg' : '-vn -acodec libvorbis -aq 6',
+                   'mp4' : '-vcodec libx264 -threads ' + str(threads) + \
+                           ' -c:v libx264 -crf 17 -maxrate 1100k -bufsize 1835k -acodec libfaac -ab 96k',
+                   'png' : '',
+                   'webm' : '-vcodec libvpx -threads ' + str(threads) + \
+                           ' -c:v libvpx -crf 17 -b:v 1100k',
+                  }
+
+
+    def __init__(self, args):
+        self.args = args
+        self.log_file = args[-1]
+        self.root_dir = args[-2]
+        self.logger = Logger(self.log_file)
+
+
+    def get_ext_in_dir(self, extension, root):
+        files = os.listdir(root)
+        exts = []
+        for f in files:
+            name, ext = os.path.splitext(f)
+            ext = ext[1:]
+            if not ext in exts:
+                exts.append(ext)
+        return extension in exts
+
+    def run(self):
+        for root, dirs, files in os.walk(self.root_dir):
+            for file in files:
+                path = os.path.abspath(root + os.sep + file)
+                name, ext = os.path.splitext(file)
+                ext = ext[1:]
+                if ext in self.source_formats:
+                    for format, ffmpeg_args in self.dest_formats.iteritems():
+                        local_file = name + '.' + format
+                        dest = os.path.abspath(root + os.sep + local_file)
+                        local_files = os.listdir(root)
+                        if not (local_file in local_files or self.get_ext_in_dir(format, root)) or '--force' in self.args:
+                            if ext == 'webm' and format == 'ogg':
+                                ffmpeg_args = '-vn -acodec copy'
+                            command = 'ffmpeg -loglevel 0 -i "' + path + '" ' + ffmpeg_args + ' -y "' + dest + '"'
+                            self.logger.logger.info(command)
+                            if not '--dry-run' in self.args:
+                                os.system(command)
+                            else:
+                                print command
+
+
+if __name__ == '__main__':
+    t = TelemetaTranscode(sys.argv[1:])
+    t.run()
diff --git a/telemeta/util/import_sql.sh b/telemeta/util/import_sql.sh
deleted file mode 100755 (executable)
index 10c0ca4..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-
-if [[ $# -ne 4 ]]; then
-       echo "Usage: $0 <username> <password> <database> </path/to/sql_file.sql.gz>"
-       exit 1
-fi
-
-echo "=> Starting MySQL Server"
-/usr/bin/mysqld_safe > /dev/null 2>&1 &
-PID=$!
-
-RET=1
-while [[ RET -ne 0 ]]; do
-    echo "=> Waiting for confirmation of MySQL service startup"
-    sleep 5
-    mysql -u"$1" -p"$2" -e "status" > /dev/null 2>&1
-RET=$?
-done
-
-echo "   Started with PID ${PID}"
-
-echo "=> Importing SQL file"
-gunzip -c "$4" | mysql -u"$1" -p"$2" "$3"
-
-echo "=> Stopping MySQL Server"
-mysqladmin -u"$1" -p"$2" shutdown
-
-echo "=> Done!"
diff --git a/telemeta/util/kdenlive/__init__.py b/telemeta/util/kdenlive/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/telemeta/util/kdenlive/auto_fade.py b/telemeta/util/kdenlive/auto_fade.py
deleted file mode 100755 (executable)
index c716a8c..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#/usr/bin/python
-
-import sys
-from telemeta.util.kdenlive.fade import AutoFade
-
-path = sys.argv[-1]
-fade = AutoFade(path)
-data = fade.run()
-f = open(path, 'w')
-f.write(data)
-f.close()
diff --git a/telemeta/util/kdenlive/auto_fade_batch.py b/telemeta/util/kdenlive/auto_fade_batch.py
deleted file mode 100755 (executable)
index 2704776..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-
-import os, sys
-from telemeta.util.kdenlive.fade import AutoFade
-
-if __name__ == '__main__':
-    dir = sys.argv[-2]
-    ext = sys.argv[-1]
-
-    for filename in os.listdir(dir):
-        prefix, extension = os.path.splitext(filename)
-        path = dir + os.sep + filename
-        flag = path + '.faded'
-        if ext in extension and not os.path.exists(flag):
-            os.system('cp ' + path + ' ' + path + '.bak')
-            fade = AutoFade(path)
-            data = fade.run()
-            f = open(path, 'w')
-            f.write(data)
-            f.close()
-            os.system('touch ' + flag)
diff --git a/telemeta/util/kdenlive/fade.py b/telemeta/util/kdenlive/fade.py
deleted file mode 100644 (file)
index c590194..0000000
+++ /dev/null
@@ -1,200 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2012-2013 Guillaume Pellerin <yomguy@parisson.com>
-
-# This file is part of TimeSide.
-
-# TimeSide is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-
-# TimeSide is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with TimeSide.  If not, see <http://www.gnu.org/licenses/>.
-
-# Author: Guillaume Pellerin <yomguy@parisson.com>
-
-
-from telemeta.util.xmltodict2 import *
-
-
-class AutoFade(object):
-    """ Automatically applies a fade in and a fade out trasitions between each segment of a KdenLive session.
-        Each video clip needs to be splitted into one video track and an audio one ("Split audio"),
-        so that an audio fade in/out is also applied.
-
-        MLT files are also supported.
-    """
-
-    def __init__(self, path, audio_frames_out=2, audio_frames_in=1,
-                       video_frames_out=3, video_frames_in=3):
-        self.audio_frames_in = audio_frames_in
-        self.audio_frames_out = audio_frames_out
-        self.video_frames_in = video_frames_in
-        self.video_frames_out = video_frames_out
-        self.path = path
-        self.session = xmltodict(self.path)
-
-    def audio_fade_out(self, frame_out):
-        child = {'attributes': {u'id': u'fadeout',
-        u'in': unicode(int(frame_out)-self.audio_frames_out),
-        u'out': unicode(frame_out)},
-       'children': [{'attributes': {u'name': u'track'},
-         'cdata': '0',
-         'name': 'property'},
-        {'attributes': {u'name': u'window'},
-         'cdata': '75',
-         'name': 'property'},
-        {'attributes': {u'name': u'max_gain'},
-         'cdata': '20dB',
-         'name': 'property'},
-        {'attributes': {u'name': u'mlt_type'},
-         'cdata': 'filter',
-         'name': 'property'},
-        {'attributes': {u'name': u'mlt_service'},
-         'cdata': 'volume',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_id'},
-         'cdata': 'fadeout',
-         'name': 'property'},
-        {'attributes': {u'name': u'tag'},
-         'cdata': 'volume',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_ix'},
-         'cdata': '1',
-         'name': 'property'},
-        {'attributes': {u'name': u'gain'}, 'cdata': '1', 'name': 'property'},
-        {'attributes': {u'name': u'end'}, 'cdata': '0', 'name': 'property'}],
-       'name': 'filter'}
-
-        return child
-
-    def audio_fade_in(self, frame_in):
-        child = {'attributes': {u'id': u'fadein',
-        u'in': unicode(frame_in),
-        u'out': unicode(int(frame_in)+self.audio_frames_in)},
-       'children': [{'attributes': {u'name': u'track'},
-         'cdata': '0',
-         'name': 'property'},
-        {'attributes': {u'name': u'window'},
-         'cdata': '75',
-         'name': 'property'},
-        {'attributes': {u'name': u'max_gain'},
-         'cdata': '20dB',
-         'name': 'property'},
-        {'attributes': {u'name': u'mlt_type'},
-         'cdata': 'filter',
-         'name': 'property'},
-        {'attributes': {u'name': u'mlt_service'},
-         'cdata': 'volume',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_id'},
-         'cdata': 'fadein',
-         'name': 'property'},
-        {'attributes': {u'name': u'tag'},
-         'cdata': 'volume',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_ix'},
-         'cdata': '1',
-         'name': 'property'},
-        {'attributes': {u'name': u'gain'}, 'cdata': '0', 'name': 'property'},
-        {'attributes': {u'name': u'end'}, 'cdata': '1', 'name': 'property'}],
-       'name': 'filter'}
-
-        return child
-
-
-    def video_fade_out(self, frame_out):
-        child = {'attributes': {u'id': u'fade_to_black',
-        u'in': unicode(int(frame_out)-self.video_frames_out),
-        u'out': unicode(frame_out)},
-       'children': [{'attributes': {u'name': u'track'},
-         'cdata': '0',
-         'name': 'property'},
-        {'attributes': {u'name': u'start'}, 'cdata': '1', 'name': 'property'},
-        {'attributes': {u'name': u'mlt_type'},
-         'cdata': 'filter',
-         'name': 'property'},
-        {'attributes': {u'name': u'mlt_service'},
-         'cdata': 'brightness',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_id'},
-         'cdata': 'fade_to_black',
-         'name': 'property'},
-        {'attributes': {u'name': u'tag'},
-         'cdata': 'brightness',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_ix'},
-         'cdata': '1',
-         'name': 'property'},
-        {'attributes': {u'name': u'end'}, 'cdata': '0', 'name': 'property'}],
-       'name': 'filter'}
-
-        return child
-
-
-    def video_fade_in(self, frame_in):
-        child = {'attributes': {u'id': u'fade_from_black',
-        u'in': unicode(frame_in),
-        u'out': unicode(int(frame_in)+self.video_frames_in)},
-       'children': [{'attributes': {u'name': u'track'},
-         'cdata': '0',
-         'name': 'property'},
-        {'attributes': {u'name': u'start'}, 'cdata': '0', 'name': 'property'},
-        {'attributes': {u'name': u'mlt_type'},
-         'cdata': 'filter',
-         'name': 'property'},
-        {'attributes': {u'name': u'mlt_service'},
-         'cdata': 'brightness',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_id'},
-         'cdata': 'fade_from_black',
-         'name': 'property'},
-        {'attributes': {u'name': u'tag'},
-         'cdata': 'brightness',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_ix'},
-         'cdata': '1',
-         'name': 'property'},
-        {'attributes': {u'name': u'end'}, 'cdata': '1', 'name': 'property'}],
-       'name': 'filter'}
-
-        return child
-
-    def run(self):
-        audio_count = 0
-        video_count = 0
-        
-        for attr in self.session['children']:
-            if 'playlist' in attr['name'] and 'children' in attr:
-                for att in attr['children']:
-                    if 'producer' in att['attributes'] and not 'children' in att:                        
-                        producer = att['attributes']['producer']
-                        if producer != 'black':
-                        
-                            frame_in = att['attributes']['in']
-                            frame_out = att['attributes']['out']
-
-                            if 'audio' in producer:
-                                if not audio_count % 2:
-                                    att['children'] = [self.audio_fade_out(frame_out)]
-                                else:
-                                    att['children'] = [self.audio_fade_in(frame_in)]
-                                audio_count += 1
-
-
-                            if 'video' in producer:
-                                if not video_count % 2:
-                                    att['children'] = [self.video_fade_out(frame_out)]
-                                else:
-                                    att['children'] = [self.video_fade_in(frame_in)]
-                                video_count += 1
-
-        return dicttoxml(self.session).encode('utf-8')
-
-
diff --git a/telemeta/util/kdenlive/mlt_fix_threads.sh b/telemeta/util/kdenlive/mlt_fix_threads.sh
deleted file mode 100755 (executable)
index 60b0061..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-
-#threads=$1
-dir=$1
-
-for file in `ls $dir/*.sh`; do
- perl -pi -e 's/threads=6/threads=4/g' $file
- perl -pi -e 's/threads=2/threads=4/g' $file
- perl -pi -e 's/threads=1/threads=4/g' $file
-done
diff --git a/telemeta/util/kdenlive/mlt_process_batch.py b/telemeta/util/kdenlive/mlt_process_batch.py
deleted file mode 100755 (executable)
index 7d346c6..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/python
-
-import os, sys
-
-if __name__ == '__main__':
-    root_dir = sys.argv[-1]
-
-    fading = False
-    if '--fading' in sys.argv:
-        fading = True
-
-    for root, dirs, files in os.walk(root_dir):
-        for filename in files:
-            prefix, extension = os.path.splitext(filename)
-            path = root + os.sep + filename
-
-            flag = path + '.processed'
-            if 'sh' in extension and not os.path.exists(flag):
-                if fading:
-                    from telemeta.util.kdenlive.fade import AutoFade
-                    local_files = os.listdir(root)
-                    for local_file in local_files:
-                        local_name, local_ext = os.path.splitext(local_file)
-                        if 'mlt' in local_ext:
-                            local_path = root + os.sep + local_file
-                            local_flag = local_path + '.faded'
-                            if not os.path.exists(local_flag):
-                                print 'fading :        ' + local_path 
-                                os.system('cp ' + local_path + ' ' + local_path + '.bak')
-                                fade = AutoFade(local_path)
-                                data = fade.run()
-                                f = open(local_path, 'w')
-                                f.write(data)
-                                f.close()
-                                os.system('touch ' + local_flag)
-
-                print 'processing :    ' + path
-                os.system('nice -n 19 ' + path)
-                os.system('touch ' + flag)
diff --git a/telemeta/util/kdenlive/session.py b/telemeta/util/kdenlive/session.py
deleted file mode 100644 (file)
index 74eb425..0000000
+++ /dev/null
@@ -1,152 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (C) 2012-2013 Parisson SARL
-
-# This software is a computer program whose purpose is to backup, analyse,
-# transcode and stream any audio content with its metadata over a web frontend.
-
-# This software is governed by the CeCILL  license under French law and
-# abiding by the rules of distribution of free software.  You can  use,
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info".
-
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability.
-
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or
-# data to be ensured and,  more generally, to use and operate it in the
-# same conditions as regards security.
-
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-
-# Authors: Guillaume Pellerin <yomguy@parisson.com>
-
-
-import time
-from telemeta.util.xmltodict2 import *
-
-
-class KDEnLiveSession(object):
-
-    def __init__(self, path):
-        self.session = xmltodict(path)
-
-    def entries(self):
-        entries = []
-        for attr in self.session['children']:
-            if 'playlist' in attr['name'] and 'children' in attr:
-                for att in attr['children']:
-                    if 'entry' in att['name'] and att['attributes']['producer'] != 'black':
-                        entries.append(att['attributes'])
-        return entries
-
-    def video_entries(self):
-        entries = []
-        for attr in self.session['children']:
-            if 'playlist' in attr['name'] and 'children' in attr:
-                for att in attr['children']:
-                    if 'entry' in att['name'] and att['attributes']['producer'] != 'black' \
-                            and not 'audio' in att['attributes']['producer']:
-                        entries.append(att['attributes'])
-        return entries
-
-    def entries_sorted(self):
-        return sorted(self.entries(), key=lambda k: int(k['in']), reverse=False)
-
-    def entries_video_seconds(self):
-        fps = float(self.profile()['frame_rate_num'])
-        list = []
-        entries = self.video_entries()
-        for i in range(0,len(entries)):
-            id = entries[i]['producer'].split('_')[0]
-            t_in = int(entries[i]['in'])/fps
-            t_out = int(entries[i]['out'])/fps
-
-            if i == 0:
-                t = 0
-            else:
-                t = list[i-1]['t'] + int(entries[i-1]['out'])/fps - int(entries[i-1]['in'])/fps
-
-            list.append({'id' : id, 't': t, 'in': t_in , 'out': t_out })
-
-        return list
-
-    def cuts(self, entries):
-        i = 0
-        cuts = [0, ]
-        for entry in entries:
-            if i > 0:
-                cuts.append(cuts[i-1] + int(entries[i]['in'])-int(entries[i-1]['out']))
-            i += 1
-        return cuts
-
-    def first_video_frame(self):
-        return int(self.entries_sorted()[0]['in'])
-
-    def profile(self):
-        for attr in self.session['children']:
-            if 'profile' in attr['name']:
-                return attr['attributes']
-
-    def fix_text(self, text):
-        try:
-            s = text.split(' ')
-            i = int(s[1])
-            s.insert(2, ':')
-            return ' '.join(s)
-        except:
-            return text
-
-    def markers(self, offset=0, from_first_marker=False):
-        """ by default return a dict of markers with timecodes relative to an origin
-
-            if from_first_marker=False: the origin is the first entry timecode
-            if from_first_marker=True: the origin is the first entry timecode before the first marker
-
-            offset: general origin offset
-        """
-
-        abs_time = 0
-        markers = []
-        i = 0
-        entries = self.entries_video_seconds()
-
-        for attr in self.session['children']:
-            if 'kdenlivedoc' in attr['name']:
-
-                for att in attr['children']:
-                    if 'markers' in att['name'] and 'children' in att.keys():
-
-                        for at in att['children']:
-                            if 'marker' in at['name']:
-
-                                marker_time = float(at['attributes']['time'].replace(',','.'))
-                                id = at['attributes']['id']
-                                rel_time = 0
-
-                                for entry in entries:
-                                    if marker_time >= entry['in'] and marker_time <= entry['out'] and id == entry['id']:
-                                        if i == 0 and from_first_marker:
-                                            abs_time = entry['t']
-                                        rel_time = entry['t'] + (marker_time - entry['in']) - abs_time + offset
-                                        break
-
-                                at['attributes']['time'] = rel_time
-                                at['attributes']['session_timecode'] = time.strftime('%H:%M:%S', time.gmtime(rel_time))
-                                at['attributes']['comment'] = self.fix_text(at['attributes']['comment'])
-                                markers.append(at['attributes'])
-
-                            i += 1
-        return markers
-
diff --git a/telemeta/util/old/crem_checker.py b/telemeta/util/old/crem_checker.py
deleted file mode 100755 (executable)
index 9b5088a..0000000
+++ /dev/null
@@ -1,340 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-Vérifier que les nouvelles cotes d'item :
-
-- correspondent toutes à la collection décrite par le fichier .csv
-  (le fichier .csv est nommé d'après la nouvelle cote de collection)
-
-- sont uniques dans le fichiers .csv
-
-- ont un des formats suivant :
-    - soit CNRSMH_I_aaaa_nnn_mmm
-    - soit CNRSMH_I_aaaa_nnn_mmm_tt
-    - soit CNRSMH_I_aaaa_nnn_mmm_tt_pp
-    - soit CNRSMH_E_aaaa_nnn_mmm_tt
-    - soit CNRSMH_E_aaaa_nnn_mmm_tt_pp
-
-- correspondent à fichier .wav (et qu'il n'y a pas de fichiers .wav
-  supplémentaire)
-
-Vérifier que le répertoire est nommé d'apprès la nouvelle cote de collection
-
-Vérifier que la nouvelle cote de collection a l'un des formats suivant :
-    - soit CNRSMH_I_aaaa_nnn
-    - soit CNRSMH_E_aaaa_nnn_mmm
-
-Vérifier que les fichiers .wav sont lisibles, ont une durée et sont identifés
-comme WAV par audiolab.
-"""
-
-
-import os
-import re
-import sys
-import csv
-import xlrd
-import datetime
-import logging
-import shutil
-
-COLLECTION_OLD_PATTERN = [
-        { 'format': 'BM.aaa.nnn.mmm',           'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})$'},
-        { 'format': 'BM.aaaa.nnn.mmm/pp',       'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
-        { 'format': 'BM.aaaa.nnn.mmm',          'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})$'},
-        { 'format': 'BM.aaaa.nnn.mmm/',         'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/$'},
-        { 'format': 'BM.aaaa.nnn.mmm/ppp',      'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/[0-9]{3}$'},
-        { 'format': 'BM.aaaa.nnn.mm/pp',        'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{2})/[0-9]{2}$'},
-        { 'format': 'BM.aaaa.nnn',              'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})$'},
-        { 'format': 'BM.aaa.nnn.mmm/pp',        'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
-        { 'format': 'BM.aaa.nnn FANTOME',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3}) FANTOME$'},
-        { 'format': 'BM.aaa.nnn',               'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})$'},
-        { 'format': 'BM.aaa.nnnBISoo/pp',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})BIS([0-9]{2})/[0-9]{2}$'},
-        { 'format': 'BM.aaa.nnn.mmm.ppp',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})\.[0-9]{3}$'},
-        { 'format': 'BM.aaa.nnn.mmm/ppp',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{3}$'},
-        { 'format': 'BM.aaa.nnn/pp',            'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
-        { 'format': 'BM.aaa.nnn-BIS.ooo/pp',    'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})-BIS\.([0-9]{3})/[0-9]{2}$'},
-        { 'format': 'BM.aaaa.nnn.mmm/NN',       'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/NN$'},
-        { 'format': 'BM.aaa.nnn.mmm/pp-DEPOT',  'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}-DEPOT$'},
-        { 'format': 'BM.aaa.nnn.mmm-o>p',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})-[0-9]>[0-9]$'},
-        { 'format': 'CY.aaaa.nnn',              'regex': r'^(CY)\.([0-9]{4})\.([0-9]{3})$'},
-        { 'format': 'DI.aaaa.nnn.mmm',          'regex': r'^(DI)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})$'},
-        { 'format': 'DI.aaaa.nnn.mmm/pp',       'regex': r'^(DI)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
-        { 'format': 'DI.aaa.nnn.mmm',           'regex': r'^(DI)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})$'},
-        { 'format': 'DI.aaa.nnn.mmm/pp',        'regex': r'^(DI)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
-        { 'format': 'DI.aaa.nnn.mmm-o/p',       'regex': r'^(DI)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})-[0-9]/[0-9]$'},
-        { 'format': 'FANTOME 2*',               'regex': r'FANTOME 2\*$'},
-
-        ## yomguy
-        { 'format': 'BM.aaaa.nnn.mm',       'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})$'},
-        #{ 'format': 'BM.aaaa.nnn.mmm/pp:ii-jj', 'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/([0-9]{2})\:([0-9]{2})\-([0-9]{2})$'},
-        #{ 'format': 'BM.aaaa.nnn.mmm/ppp:ii-jj', 'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/([0-9]{2})\:([0-9]{2})\-([0-9]{2})$'},
-        #{ 'format': 'BM.aaaa.nnn.mmm:ii-jj',    'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3}):([0-9]{2})\-([0-9]{2})$'},
-        ]
-
-ITEM_NEW_PATTERN = [
-        { 'format': 'CNRSMH_I_aaaa_nnn_mmm',           'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{3})$'},
-        { 'format': 'CNRSMH_I_aaaa_nnn_mmm_tt',        'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2})$'},
-        { 'format': 'CNRSMH_I_aaaa_nnn_mmm_tt_pp',     'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2})_([0-9]{2})$'},
-        { 'format': 'CNRSMH_E_aaaa_nnn_mmm_tt',        'regex': r'^(CNRSMH)_E_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2})$'},
-        { 'format': 'CNRSMH_E_aaaa_nnn_mmm_tt_pp',     'regex': r'^(CNRSMH)_E_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2,3})_([0-9]{2})$'},
-
-        # yomguy
-        { 'format': 'CNRSMH_I_aaaa_nnn_mm',           'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{2})$'},
-        ]
-
-COLLECTION_PATTERN = [
-        { 'format': 'CNRSMH_I_aaaa_nnn',           'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})$'},
-        { 'format': 'CNRSMH_E_aaaa_nnn_mmm',        'regex': r'^(CNRSMH)_E_([0-9]{4})_([0-9]{3})_([0-9]{3})$'},
-        ]
-
-
-def check_name(patterns, name):
-    match = False
-    for pattern in patterns:
-        match = re.match(pattern['regex'], name)
-        if match:
-            break
-    return match
-
-
-class Logger:
-
-    def __init__(self, file):
-        self.logger = logging.getLogger('myapp')
-        self.hdlr = logging.FileHandler(file)
-        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
-        self.hdlr.setFormatter(self.formatter)
-        self.logger.addHandler(self.hdlr)
-        self.logger.setLevel(logging.INFO)
-
-    def write_info(self, prefix, message):
-        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
-
-    def write_error(self, prefix, message):
-        self.logger.error(prefix + ' : ' + message.decode('utf8'))
-
-
-class CremCollection:
-
-    def __init__(self, dir, logger):
-        self.dir = dir
-        self.dir_name = self.dir.split(os.sep)[-1]
-        self.file_list = os.listdir(self.dir)
-        self.logger = logger
-
-    def xls_list(self):
-        file_list = []
-        for file in self.file_list:
-            filename = os.path.basename(file)
-            ext = os.path.splitext(file)[1]
-            if not '.' == filename[0] and (ext == '.xls' or ext == '.XLS'):
-                file_list.append(file)
-        print file_list
-        return file_list
-
-    def wav_list(self):
-        list = []
-        for file in self.file_list:
-            filename = os.path.basename(file)
-            ext = os.path.splitext(file)[1]
-            if not '.' == filename[0] and (ext == '.wav' or ext == '.WAV'):
-                list.append(file)
-            elif '.' == filename[0]:
-                self.logger.write_error(file, 'Warning : fichier caché présent !')
-        return list
-
-
-class CremCSV:
-
-    def __init__(self, file):
-        self.csv_file = open(file, 'w')
-        self.csv = csv.writer(self.csv_file,  delimiter=';')
-
-    def close(self):
-        self.csv_file.close()
-
-class CremXLS:
-
-    def __init__(self, file):
-        self.first_row = 8
-        self.original_col = 0
-        self.new_col = 1
-        self.book = xlrd.open_workbook(file)
-        self.sheet = self.book.sheet_by_index(0)
-        self.original_refs = self.original_refs()
-        self.new_refs = self.new_refs()
-        #print len(self.new_refs)
-        while True:
-            if len(self.original_refs) == 0 or len(self.new_refs) == 0:
-                break
-            else:
-                if not 'CNRS' in self.new_refs[0].encode('utf8') \
-                 and not  self.original_refs[0].encode('utf8') == '':
-                    self.original_refs = self.original_refs[1:]
-                    self.new_refs = self.new_refs[1:]
-                else:
-                    break
-
-        self.size = max(len(self.new_refs), len(self.original_refs))
-
-    def original_refs(self):
-        col = self.sheet.col(self.original_col)
-        list = []
-        for cell in col[self.first_row:]:
-            if cell.ctype == 1:
-                list.append(cell.value)
-        return list
-
-    def new_refs(self):
-        col = self.sheet.col(self.new_col)
-        list = []
-        for cell in col[self.first_row:]:
-            if cell.ctype == 1:
-                list.append(cell.value)
-        return list
-
-
-class CremItemFile:
-
-    def __init__(self):
-        self.media = ''
-
-    def set_media(self, media):
-        self.media = media
-
-    def properties(self):
-        self.frames = self.audio_file.get_nframes()
-        self.samplerate = self.audio_file.get_samplerate()
-        self.channels = self.audio_file.get_channels()
-        self.format = self.audio_file.get_file_format()
-        self.encoding = self.audio_file.get_encoding()
-
-
-class CremCheck:
-
-    def __init__(self, root_dir, log_file):
-        self.root_dir = root_dir
-        self.logger = Logger(log_file)
-        dir_list = os.listdir(self.root_dir)
-        list = []
-        for dir in dir_list:
-           if not dir[0] == '.':
-               list.append(dir)
-        self.dir_list = list
-
-    def check_new_refs(self):
-        for name in self.new_refs:
-            return check_name(ITEM_PATTERN, name)
-
-    def check(self):
-        for dir in self.dir_list:
-            collection = CremCollection(self.root_dir + dir, self.logger)
-            msg = '************************ ' + collection.dir_name + ' ******************************'
-            self.logger.write_info(collection.dir, msg[:70])
-
-            xls_list = collection.xls_list()
-            wav_list = collection.wav_list()
-
-            if not check_name(COLLECTION_PATTERN, dir):
-                self.logger.write_error(collection.dir, 'Le dossier de la collection est mal nommé -> SORTIE')
-            elif len(xls_list) == 0:
-                self.logger.write_error(collection.dir, 'PAS de fichier XLS dans le dossier collection -> SORTIE')
-            elif len(xls_list) > 1:
-                self.logger.write_error(collection.dir, 'Plusieurs fichiers XLS dans le dossier collection -> SORTIE')
-
-            else:
-                xls = CremXLS(self.root_dir + os.sep + dir + os.sep + xls_list[0])
-                self.logger.write_info(collection.dir, 'XLS : ' + xls_list[0] + ' - Feuille : ' + xls.sheet.name.encode('utf8'))
-                self.logger.write_info(collection.dir, 'Nombre d\'items détectés : ' + str(xls.size))
-                csv_file = CremCSV(self.root_dir + dir + os.sep + collection.dir_name + '.csv')
-
-                if len(wav_list) != xls.size:
-                    self.logger.write_error(collection.dir, \
-                    'Le nombre de références du fichier XLS (' + str(xls.size) + ') diffère du nombre de fichiers (' + str(len(wav_list)) + ')')
-
-                temp_list = []
-                item_file = CremItemFile()
-
-                for i in range(0,xls.size):
-                    error = False
-
-                    try:
-                        item_old = xls.original_refs[i]
-                        #self.logger.write_error(collection.dir, item_old)
-                    except:
-                        item_old = ''
-                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : l\'ancienne référence d\'item est inexistante'
-                        self.logger.write_error(collection.dir, msg)
-                        error = True
-                        continue
-
-                    try:
-                        item = xls.new_refs[i]
-                        #self.logger.write_error(collection.dir, item)
-                    except:
-                        item = ''
-                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : la nouvelle référence d\'item est inexistante'
-                        self.logger.write_error(collection.dir, msg)
-                        error = True
-                        continue
-
-                    if not item in temp_list:
-                        temp_list.append(item)
-                    else:
-                        msg =  'Ligne ' + str(i+xls.first_row+1) + ' : la référence d\'item ' + item.encode('utf8') + ' est multiple'
-                        self.logger.write_error(collection.dir, msg)
-                        error = True
-
-                    #if not check_name(ITEM_OLD_PATTERN, item_old):
-                        #msg = 'Ligne ' + str(i+xls.first_row+1) + ' : l\'ancienne référence d\'item ' + item_old.encode('utf8') + ' est mal formatée'
-                        #self.logger.write_error(collection.dir, msg)
-
-                    if not check_name(ITEM_NEW_PATTERN, item):
-                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : la nouvelle référence d\'item ' + item.encode('utf8') + ' est mal formatée'
-                        self.logger.write_error(collection.dir, msg)
-                        error = True
-
-                    if not collection.dir_name in item:
-                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : la référence d\'item ' + item.encode('utf8') + ' ne correspond pas à celle de la collection'
-                        self.logger.write_error(collection.dir, msg)
-                        error = True
-
-                    name_wav = item.encode('utf8') + '.wav'
-                    if not name_wav in wav_list:
-                        self.logger.write_error(collection.dir, 'Le fichier ' + item.encode('utf8') + '.wav n\'existe pas')
-                    else:
-                        item_file.set_media(collection.dir + os.sep + name_wav)
-                        #if not item_file.is_wav():
-                        #    self.logger.write_error(collection.dir, 'Le fichier ' + item.encode('utf8') + '.wav n\'est pas valide')
-                        #    error = True
-
-                    if not error:
-                        csv_file.csv.writerow([xls.original_refs[i], xls.new_refs[i]])
-
-                csv_file.close()
-
-                for filename in wav_list:
-                    if not check_name(ITEM_NEW_PATTERN, os.path.splitext(filename)[0]):
-                        self.logger.write_error(collection.dir, 'Le nom du fichier ' + str(os.path.splitext(filename)[0]) + ' est mal formaté')
-
-            msg = '********************************************************************************'
-            self.logger.write_info(collection.dir, msg[:70])
-
-
-def main():
-    log_file = sys.argv[-1]
-    root_dir = sys.argv[-2]
-    log_tmp = log_file+'.tmp'
-
-    c = CremCheck(root_dir, log_tmp)
-    c.check()
-
-    date = datetime.datetime.now().strftime("%x-%X").replace('/','_')
-    shutil.copy(log_tmp,log_file+'-'+date+'.log')
-    shutil.move(log_tmp,log_file)
-
-if __name__ == '__main__':
-    main()
-
diff --git a/telemeta/util/old/process-waveform-cgi.py b/telemeta/util/old/process-waveform-cgi.py
deleted file mode 100755 (executable)
index 317878b..0000000
+++ /dev/null
@@ -1,222 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2009-2010 Guillaume Pellerin <yomguy@parisson.com>
-
-# This file is part of TimeSide.
-
-# TimeSide is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-
-# TimeSide is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with TimeSide.  If not, see <http://www.gnu.org/licenses/>.
-
-# Author: Guillaume Pellerin <yomguy@parisson.com>
-
-# for python2.5
-
-version = '0.5'
-
-
-import os
-import sys
-import time
-import shutil
-import datetime
-import timeside
-
-# soon with python2.6
-#from multiprocessing import Process
-
-from django.core.management import setup_environ
-from django.core.files.base import ContentFile
-import cgi
-fs = cgi.FieldStorage()
-
-
-orig_media_dir = '/mnt/awdiomusic/musicbase'
-project_dir = '/mnt/awdio'
-log_file = project_dir + '/logs/process.log'
-sys.path.append('/home/awdio/apps/telemeta-awdio')
-
-
-class GrapherScheme:
-
-    def __init__(self):
-        self.color = 255
-        self.color_scheme = {
-            'waveform': [ # Four (R,G,B) tuples for three main color channels for the spectral centroid method
-                        (self.color,self.color,self.color)
-#                        (0, 0, 0), (0, 0, 0), (0, 0, 0), (0,0,0)
-                        ],
-            'spectrogram': [
-                        (0, 0, 0), (58/4,68/4,65/4), (80/2,100/2,153/2), (90,180,100), (224,224,44), (255,60,30), (255,255,255)
-                        ]}
-
-        # Grapher id
-        self.id = 'waveform_awdio'
-
-        # Width of the image
-        self.width = 1800
-
-        # Height of the image
-        self.height = 233
-
-        # Background color
-        self.bg_color = None
-
-        # Force computation. By default, the class doesn't overwrite existing image files.
-        self.force = False
-        
-        # Nb of threads
-        # FIXME: memory leak for > 1 !
-        self.threads = 1
-
-      
-class TelemetaPreprocessImport(object):
-
-    def __init__(self, root_dir, dest_dir, log_file):
-       from telemeta.cache import TelemetaCache as Cache
-       from telemeta.util.logger import Logger
-       self.media_item_dir = 'items'
-        self.root_dir = root_dir + 'items'
-        self.dest_dir = dest_dir
-        self.threads = 1
-        self.logger = Logger(log_file)
-        self.counter = 0
-        self.force = 0
-        self.cache = Cache(self.dest_dir)
-
-        self.scheme = GrapherScheme()
-        self.width = self.scheme.width
-        self.height = self.scheme.height
-        self.bg_color = self.scheme.bg_color
-        self.color_scheme = self.scheme.color_scheme
-        self.force = self.scheme.force
-        self.threads = self.scheme.threads
-        self.logger = Logger(log_file)
-        self.counter = 0
-        self.collection_name = 'awdio'
-        self.collection = self.set_collection(self.collection_name)
-        
-        self.analyzers = timeside.core.processors(timeside.api.IAnalyzer)
-        self.grapher = timeside.grapher.WaveformAwdio(width=self.width, 
-                                                         height=self.height, 
-                                                         bg_color=self.bg_color, 
-                                                         color_scheme=self.color_scheme)
-        
-
-    def set_collection(self, collection_name):
-        import telemeta.models
-        collections = telemeta.models.media.MediaCollection.objects.filter(code=collection_name)
-        if not collections:
-            c = telemeta.models.media.MediaCollection(code=collection_name)
-            c.title = collection_name
-            c.save()
-            msg = 'added'
-            self.logger.logger.info(collection_name, msg)
-            collection = c
-        else:
-            collection = collections[0]
-        return collection
-
-    def process(self):
-       import telemeta.models
-       keys = fs.keys()
-       if keys[0] == 'file':
-           filename = fs['file'].value
-           media_orig = orig_media_dir + os.sep + filename
-           media = self.root_dir + os.sep + filename
-           
-           if not os.path.exists(media):
-               shutil.copy(media_orig, media)
-               os.system('chmod 644 ' + media)
-            
-            name, ext = os.path.splitext(filename)
-            size = str(self.width) + '_' + str(self.height)
-            image_name = name + '.' + self.scheme.id + '.' + size + '.png'
-            image = self.dest_dir + os.sep + image_name
-            xml = name + '.xml'
-            
-            if not self.cache.exists(image_name) or not self.cache.exists(xml):
-                mess = 'Processing ' + media
-                self.logger.logger.info(mess)
-           
-               print "Content-type: text/plain\n"
-               print mess
-               decoder  = timeside.decoder.FileDecoder(media)
-               pipe = decoder | self.grapher
-               analyzers = []
-               analyzers_sub = []
-               for analyzer in self.analyzers:
-                   subpipe = analyzer()
-                   analyzers_sub.append(subpipe)
-                   pipe = pipe | subpipe
-               pipe.run()
-               
-               mess = 'Rendering ' + image
-               self.logger.logger.info(mess)
-               self.grapher.render(output=image)
-               
-               mess = 'Frames / Pixel = ' + str(self.grapher.graph.samples_per_pixel)
-               self.logger.logger.info(mess)
-               
-               for analyzer in analyzers_sub:
-                   value = analyzer.result()
-                   if analyzer.id() == 'duration':
-                       value = datetime.timedelta(0,value)
-                   analyzers.append({'name':analyzer.name(),
-                           'id':analyzer.id(),
-                           'unit':analyzer.unit(),
-                           'value':str(value)})
-               
-               self.cache.write_analyzer_xml(analyzers, xml)
-               
-               item = telemeta.models.media.MediaItem.objects.filter(code=name)
-                           
-               if not item:
-                   item = telemeta.models.media.MediaItem(collection=self.collection, code=name)
-                   item.title = name
-                   item.file = self.media_item_dir + os.sep + filename
-                   item.save()
-                   msg = 'added item : ' + filename
-                   self.logger.logger.info(self.collection_name, msg)
-
-               pipe = 0
-               decoder = 0
-               
-               print "OK"
-               
-               #except:
-                   #pipe = 0
-                   #decoder = 0
-                   #mess = 'Could NOT process : ' + media
-                   #self.logger.logger.error(mess)
-                   #print mess
-                   
-           else:
-               mess = "Nothing to do with file : " + media
-               self.logger.logger.info(mess)
-               print "Content-type: text/plain\n"
-               print mess
-       
-       else:
-           print "Content-type: text/plain\n"
-           print "No file given !"
-       
-
-if __name__ == '__main__':
-    sys.path.append(project_dir)
-    import settings
-    setup_environ(settings)
-    media_dir = settings.MEDIA_ROOT
-    data_dir = settings.TELEMETA_DATA_CACHE_DIR
-    t = TelemetaPreprocessImport(media_dir, data_dir, log_file)
-    t.process()
diff --git a/telemeta/util/old/telemeta-backup.py b/telemeta/util/old/telemeta-backup.py
deleted file mode 100755 (executable)
index 0a31499..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Samalyse SARL
-
-# This software is a computer program whose purpose is to backup, analyse,
-# transcode and stream any audio content with its metadata over a web frontend.
-
-# This software is governed by the CeCILL  license under French law and
-# abiding by the rules of distribution of free software.  You can  use,
-# modify and/ or redistribute the software under the terms of the CeCILL
-# license as circulated by CEA, CNRS and INRIA at the following URL
-# "http://www.cecill.info".
-
-# As a counterpart to the access to the source code and  rights to copy,
-# modify and redistribute granted by the license, users are provided only
-# with a limited warranty  and the software's author,  the holder of the
-# economic rights,  and the successive licensors  have only  limited
-# liability.
-
-# In this respect, the user's attention is drawn to the risks associated
-# with loading,  using,  modifying and/or developing or reproducing the
-# software by the user in light of its specific status of free software,
-# that may mean  that it is complicated to manipulate,  and  that  also
-# therefore means  that it is reserved for developers  and  experienced
-# professionals having in-depth computer knowledge. Users are therefore
-# encouraged to load and test the software's suitability as regards their
-# requirements in conditions enabling the security of their systems and/or
-# data to be ensured and,  more generally, to use and operate it in the
-# same conditions as regards security.
-
-# The fact that you are presently reading this means that you have had
-# knowledge of the CeCILL license and that you accept its terms.
-#
-# Author: Olivier Guilyardi <olivier@samalyse.com>
-
-import os
-import sys
-import time
-from django.core.management import setup_environ
-
-def print_usage(toolname):
-    print "Usage: " + toolname + " <project_dir> <backup_dir>"
-    print "  project_dir: the directory of the Django project which hosts Telemeta"
-    print "  backup_dir: the destination backup folder (must exist)"
-
-def write_readme(dest_dir, coll_num):
-    readme = open(dest_dir + "/" + "README", "w")
-    timestr = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
-    readme.write("Telemeta Backup\n\n")
-    readme.write("- date: " + timestr + "\n")
-    readme.write("- number of collections: " + str(coll_num) + "\n\n")
-    readme.close()
-
-def backup(dest_dir):
-    from telemeta.models import MediaCollection
-    from telemeta.backup import CollectionSerializer
-
-    collections = MediaCollection.objects.order_by('id')
-    count = collections.count()
-
-    print "Writing README file..",
-    write_readme(dest_dir, count)
-    print "Done."
-
-    i = 0
-    for collection in collections:
-        if i % 100 == 0:
-            set_dir = dest_dir + ("/collections-%d-%d" % (i+1, i+100))
-            os.mkdir(set_dir)
-        i += 1
-        print "Processing collection %d/%d (%d%%) with id: %s.. " \
-            % (i, count, i*100/count, collection.id),
-        sys.stdout.flush()
-        serializer = CollectionSerializer(collection)
-        serializer.backup(set_dir)
-        print "Done"
-
-def run():
-    if len(sys.argv) != 3:
-        print_usage(os.path.basename(sys.argv[0]))
-        sys.exit(1)
-    else:
-        project_dir = sys.argv[1]
-        backup_dir = sys.argv[2]
-        sys.path.append(project_dir)
-        import settings
-        setup_environ(settings)
-        backup(backup_dir)
-
-if __name__ == '__main__':
-    run()
diff --git a/telemeta/util/old/telemeta-crem-import-alt_ids.py b/telemeta/util/old/telemeta-crem-import-alt_ids.py
deleted file mode 100755 (executable)
index 84c673d..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2011 Guillaume Pellerin
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
-#
-# Author: Guillaume Pellerin <yomguy@parisson.com>
-#
-
-import os
-import sys
-import xlrd
-import logging
-import datetime
-from django.core.management import setup_environ
-from django.core.files.base import ContentFile
-
-class Logger:
-
-    def __init__(self, file):
-        self.logger = logging.getLogger('myapp')
-        self.hdlr = logging.FileHandler(file)
-        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
-        self.hdlr.setFormatter(self.formatter)
-        self.logger.addHandler(self.hdlr)
-        self.logger.setLevel(logging.INFO)
-
-    def write_info(self, prefix, message):
-        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
-
-    def write_error(self, prefix, message):
-        self.logger.error(prefix + ' : ' + message.decode('utf8'))
-
-
-class TelemetaAltIdsImport:
-
-    def __init__(self, xls_file, log_file):
-        self.logger = Logger(log_file)
-        self.xls = xls_file
-        self.row = 0
-
-    def alt_ids_import(self):
-        from telemeta.models import MediaCollection
-        self.book = xlrd.open_workbook(self.xls)
-        self.sheet = self.book.sheet_by_index(0)
-        self.length = len(self.sheet.col(0))-1
-        
-        while True:
-            ids = []
-            self.row += 1
-            row = self.sheet.row(self.row)
-            if self.row == self.length:
-                break
-            collection_id = row[0].value
-            cell_alt_id = row[1]
-            if cell_alt_id.ctype == 1:
-                for i in range(1,len(row)):
-                    cell_alt_id = row[i]
-                    if cell_alt_id.ctype == 1:
-                        ids.append(cell_alt_id.value)
-                alt_ids = ' '.join(ids)
-                try:
-                    collection = MediaCollection.objects.get(old_code=collection_id)
-                    collection.alt_ids = alt_ids
-                    collection.save()
-                    print self.row, collection_id, alt_ids
-                except:
-                    msg = 'No collection found for this id'
-                    self.logger.write_error(collection_id, msg)
-                    continue
-            
-                
-def print_usage(tool_name):
-    print "Usage: "+tool_name+" <project_dir> <xls_file> <log_file>"
-    print "  project_dir: the directory of the Django project which hosts Telemeta"
-    print "  xls_file: the excel file containing all collection alt_ids"
-
-def run():
-    if len(sys.argv) < 3:
-        print_usage(os.path.basename(sys.argv[0]))
-        sys.exit(1)
-    else:
-        project_dir = sys.argv[-3]
-        xls_file = sys.argv[-2]
-        log_file = sys.argv[-1]
-        sys.path.append(project_dir)
-        import settings
-        setup_environ(settings)
-        t = TelemetaAltIdsImport(xls_file, log_file)
-        t.alt_ids_import()
-
-if __name__ == '__main__':
-    run()
diff --git a/telemeta/util/old/telemeta-crem-import-test.py b/telemeta/util/old/telemeta-crem-import-test.py
deleted file mode 100755 (executable)
index 021e9a2..0000000
+++ /dev/null
@@ -1,211 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2010 Guillaume Pellerin
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
-#
-# Author: Guillaume Pellerin <yomguy@parisson.com>
-#
-
-import os
-import sys
-import csv
-import logging
-import datetime
-from django.core.management import setup_environ
-from django.core.files.base import ContentFile
-
-
-class Logger:
-
-    def __init__(self, file):
-        self.logger = logging.getLogger('myapp')
-        self.hdlr = logging.FileHandler(file)
-        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
-        self.hdlr.setFormatter(self.formatter)
-        self.logger.addHandler(self.hdlr)
-        self.logger.setLevel(logging.INFO)
-
-    def info(self, prefix, message):
-        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
-
-    def error(self, prefix, message):
-        self.logger.error(prefix + ' : ' + message.decode('utf8'))
-
-
-class TelemetaWavImport:
-
-    def __init__(self, source_dir, log_file, pattern, domain):
-        from django.contrib.auth.models import User
-        self.logger = Logger(log_file)
-        self.source_dir = source_dir
-        self.collections = os.listdir(self.source_dir)
-        self.pattern = pattern
-        self.user = User.objects.filter(username='admin')[0]
-        self.domain = domain
-
-    def write_file(self, item, wav_file, overwrite=False):
-        filename = wav_file.split(os.sep)[-1]
-        if os.path.exists(wav_file):
-            if not item.file or overwrite:
-#                f = open(wav_file, 'r')
-#                file_content = ContentFile(f.read())
-#                item.file.save(filename, file_content)
-#                f.close()
-                item.save()
-                item.set_revision(self.user)
-            else:
-                msg = item.code + ' : fichier ' + item.file.name + ' deja inscrit dans la base de donnees !'
-                self.logger.error('item', msg)
-        else:
-            msg = item.code + ' : fichier audio ' + filename + ' inexistant dans le dossier !'
-            self.logger.error('item', msg)
-            
-    def wav_import(self):
-        from telemeta.models import MediaItem,  MediaCollection
-        
-        collections = []
-        for collection in self.collections:
-            collection_dir = self.source_dir + os.sep + collection
-            collection_files = os.listdir(collection_dir)
-            
-            
-            if not '/.' in collection_dir and self.pattern in collection_dir:
-                collection_name = collection.split(os.sep)[-1]
-                collections.append(collection_name)
-                c = MediaCollection.objects.filter(code=collection_name)
-                
-                if not c and collection + '.csv' in collection_files:
-                    msg = collection + ' collection NON présente dans la base de données, SORTIE '
-                    self.logger.error(collection, msg)
-                    sys.exit(msg)
-                elif not c:
-                    msg = 'collection NON présente dans la base de données, CREATION '
-                    self.logger.info(collection, msg)
-                    c = MediaCollection(code=collection_name)
-                    c.save()
-                    c.set_revision(self.user)
-                else:
-                    msg = 'collection présente dans la base de données, SELECTION'
-                    self.logger.info(collection, msg)
-                    
-        for collection in collections:
-            collection_dir = self.source_dir + os.sep + collection
-            collection_name = collection
-            collection_files = os.listdir(collection_dir)
-            msg = '************************ ' + collection + ' ******************************'
-            self.logger.info(collection, msg[:70])
-            overwrite = True
-            csv_file = ''
-            rows = {}
-            
-            if collection + '.csv' in collection_files:
-                csv_file = self.source_dir + os.sep + collection + os.sep + collection + '.csv'
-                csv_data = csv.reader(open(csv_file), delimiter=';')
-                for row in csv_data:
-                    rows[row[1].strip()] = row[0].strip()
-                msg = collection + ' import du fichier CSV de la collection'
-                self.logger.info(collection, msg[:70])
-            else:
-                msg = collection + ' pas de fichier CSV dans la collection'
-                self.logger.info(collection, msg[:70])
-            
-            c = MediaCollection.objects.filter(code=collection_name)
-            if not c:
-                c = MediaCollection(code=collection_name)
-                c.save()
-                msg = ' collection NON présente dans la BDD, CREATION '
-                self.logger.info(c.code, msg)
-            else:
-                c = c[0]
-                msg = ' id = '+str(c.id)
-                self.logger.info(c.code, msg)
-            
-            audio_files = []
-            for file in collection_files:
-                ext = ['WAV', 'wav']
-                if file.split('.')[-1] in ext:
-                    audio_files.append(file)
-            
-            audio_files.sort()
-            nb_items = c.items.count()
-            counter = 0
-            
-            for file in audio_files:
-                code = file.split('.')[0]
-                wav_file = self.source_dir + os.sep + collection + os.sep + file
-                
-                if len(audio_files) <= nb_items:
-                    items = MediaItem.objects.filter(code=code)
-                    
-                    old_ref = ''
-                    if code in rows and not items:
-                        old_ref = rows[code]
-                        items = MediaItem.objects.filter(old_code=old_ref)
-                        
-                    if items:
-                        item = items[0]
-                        msg = code + ' : ' + item.old_code + ' : Cas 1 ou 2 : id = ' + str(item.id)
-                        self.logger.info('item', msg)
-                        item.code = code
-                        item.save()
-                    else:
-                        item = MediaItem(code=code, collection=c)
-                        msg = code + ' : ' + old_ref + ' : Cas 1 ou 2 : item NON présent dans la base de données, CREATION'
-                        self.logger.info('item', msg)
-                    
-                    self.write_file(item, wav_file, overwrite)
-                    
-                elif nb_items == 1 and len(audio_files) > 1:
-                    if counter == 0:
-                        msg = code + ' : Cas 3a : item n°01 présent dans la base de données, PASSE'
-                        self.logger.info('item', msg)
-                    else:
-                        item = MediaItem(code=code, collection=c)
-                        msg = code + ' : Cas 3a : item NON présent dans la base de données, CREATION'
-                        self.logger.info('item', msg)
-                        self.write_file(item, wav_file, overwrite)
-                
-                elif nb_items > 1 and nb_items < len(audio_files):
-                    msg = code + ' : Cas 3b : nb items < nb de fichiers audio, PAS de creation'
-                    self.logger.info('item', msg)
-
-                counter += 1
-        
-        msg = 'Liste des URLs des collections importées :'
-        self.logger.info('INFO', msg)
-        for collection in collections:
-            msg = 'http://'+self.domain+'/collections/'+collection
-            self.logger.info(collection, msg)
-            
-        
-def print_usage(tool_name):
-    print "Usage: "+tool_name+" <project_dir> <source_dir> <pattern> <log_file> <domain>"
-    print "  project_dir: the directory of the Django project which hosts Telemeta"
-    print "  source_dir: the directory containing the wav files to include"
-    print "  pattern: a pattern to match the collection names"
-    print "  log_file: a log file to write logs"
-    print "  domain: root domain for collections"
-
-def run():
-    if len(sys.argv) < 3:
-        print_usage(os.path.basename(sys.argv[0]))
-        sys.exit(1)
-    else:
-        project_dir = sys.argv[-5]
-        source_dir = sys.argv[-4]
-        pattern = sys.argv[-3]
-        log_file = sys.argv[-2]
-        url = sys.argv[-1]
-        sys.path.append(project_dir)
-        import settings
-        setup_environ(settings)
-        t = TelemetaWavImport(source_dir, log_file, pattern, url)
-        t.wav_import()
-
-if __name__ == '__main__':
-    run()
diff --git a/telemeta/util/old/telemeta-crem-import.py b/telemeta/util/old/telemeta-crem-import.py
deleted file mode 100755 (executable)
index dcdf5c1..0000000
+++ /dev/null
@@ -1,211 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2010 Guillaume Pellerin
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
-#
-# Author: Guillaume Pellerin <yomguy@parisson.com>
-#
-
-import os
-import sys
-import csv
-import logging
-import datetime
-from django.core.management import setup_environ
-from django.core.files.base import ContentFile
-
-
-class Logger:
-
-    def __init__(self, file):
-        self.logger = logging.getLogger('myapp')
-        self.hdlr = logging.FileHandler(file)
-        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
-        self.hdlr.setFormatter(self.formatter)
-        self.logger.addHandler(self.hdlr)
-        self.logger.setLevel(logging.INFO)
-
-    def info(self, prefix, message):
-        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
-
-    def error(self, prefix, message):
-        self.logger.error(prefix + ' : ' + message.decode('utf8'))
-
-
-class TelemetaWavImport:
-
-    def __init__(self, source_dir, log_file, pattern, domain):
-        from django.contrib.auth.models import User
-        self.logger = Logger(log_file)
-        self.source_dir = source_dir
-        self.collections = os.listdir(self.source_dir)
-        self.pattern = pattern
-        self.user = User.objects.filter(username='admin')[0]
-        self.domain = domain
-
-    def write_file(self, item, wav_file, overwrite=False):
-        filename = wav_file.split(os.sep)[-1]
-        if os.path.exists(wav_file):
-            if not item.file or overwrite:
-                f = open(wav_file, 'r')
-                file_content = ContentFile(f.read())
-                item.file.save(filename, file_content)
-                f.close()
-                item.save()
-                item.set_revision(self.user)
-            else:
-                msg = item.code + ' : fichier ' + item.file.name + ' deja inscrit dans la base de donnees !'
-                self.logger.error('item', msg)
-        else:
-            msg = item.code + ' : fichier audio ' + filename + ' inexistant dans le dossier !'
-            self.logger.error('item', msg)
-
-    def wav_import(self):
-        from telemeta.models import MediaItem,  MediaCollection
-
-        collections = []
-        for collection in self.collections:
-            collection_dir = self.source_dir + os.sep + collection
-            collection_files = os.listdir(collection_dir)
-
-
-            if not '/.' in collection_dir and self.pattern in collection_dir:
-                collection_name = collection.split(os.sep)[-1]
-                collections.append(collection_name)
-                c = MediaCollection.objects.filter(code=collection_name)
-
-                if not c and collection + '.csv' in collection_files:
-                    msg = collection + ' collection NON présente dans la base de données, SORTIE '
-                    self.logger.error(collection, msg)
-                    sys.exit(msg)
-                elif not c:
-                    msg = 'collection NON présente dans la base de données, CREATION '
-                    self.logger.info(collection, msg)
-                    c = MediaCollection(code=collection_name, title=collection_name)
-                    c.save()
-                    c.set_revision(self.user)
-                else:
-                    msg = 'collection présente dans la base de données, SELECTION'
-                    self.logger.info(collection, msg)
-
-        for collection in collections:
-            collection_dir = self.source_dir + os.sep + collection
-            collection_name = collection
-            collection_files = os.listdir(collection_dir)
-            msg = '************************ ' + collection + ' ******************************'
-            self.logger.info(collection, msg[:70])
-            overwrite = True
-            csv_file = ''
-            rows = {}
-
-            if collection + '.csv' in collection_files:
-                csv_file = self.source_dir + os.sep + collection + os.sep + collection + '.csv'
-                csv_data = csv.reader(open(csv_file), delimiter=';')
-                for row in csv_data:
-                    rows[row[1].strip()] = row[0].strip()
-                msg = collection + ' import du fichier CSV de la collection'
-                self.logger.info(collection, msg[:70])
-            else:
-                msg = collection + ' pas de fichier CSV dans la collection'
-                self.logger.info(collection, msg[:70])
-
-            c = MediaCollection.objects.filter(code=collection_name)
-            if not c:
-                c = MediaCollection(code=collection_name)
-                c.save()
-                msg = ' collection NON présente dans la BDD, CREATION '
-                self.logger.info(c.code, msg)
-            else:
-                c = c[0]
-                msg = ' id = '+str(c.id)
-                self.logger.info(c.code, msg)
-
-            audio_files = []
-            for file in collection_files:
-                ext = ['WAV', 'wav']
-                if file.split('.')[-1] in ext and file[0] != '.':
-                    audio_files.append(file)
-
-            audio_files.sort()
-            nb_items = c.items.count()
-            counter = 0
-
-            for file in audio_files:
-                code = file.split('.')[0]
-                wav_file = self.source_dir + os.sep + collection + os.sep + file
-
-                if len(audio_files) <= nb_items:
-                    items = MediaItem.objects.filter(code=code)
-
-                    old_ref = ''
-                    if code in rows and not items:
-                        old_ref = rows[code]
-                        items = MediaItem.objects.filter(old_code=old_ref)
-
-                    if items:
-                        item = items[0]
-                        msg = code + ' : ' + item.old_code + ' : Cas 1 ou 2 : id = ' + str(item.id)
-                        self.logger.info('item', msg)
-                        item.code = code
-                        item.save()
-                    else:
-                        item = MediaItem(code=code, collection=c)
-                        msg = code + ' : ' + old_ref + ' : Cas 1 ou 2 : item NON présent dans la base de données, CREATION'
-                        self.logger.info('item', msg)
-
-                    self.write_file(item, wav_file, overwrite)
-
-                elif nb_items == 1 and len(audio_files) > 1:
-                    if counter == 0:
-                        msg = code + ' : Cas 3a : item n°01 présent dans la base de données, PASSE'
-                        self.logger.info('item', msg)
-                    else:
-                        item = MediaItem(code=code, collection=c)
-                        msg = code + ' : Cas 3a : item NON présent dans la base de données, CREATION'
-                        self.logger.info('item', msg)
-                        self.write_file(item, wav_file, overwrite)
-
-                elif nb_items > 1 and nb_items < len(audio_files):
-                    msg = code + ' : Cas 3b : nb items < nb de fichiers audio, PAS de creation'
-                    self.logger.info('item', msg)
-
-                counter += 1
-
-        msg = 'Liste des URLs des collections importées :'
-        self.logger.info('INFO', msg)
-        for collection in collections:
-            msg = 'http://'+self.domain+'/archives/collections/'+collection
-            self.logger.info(collection, msg)
-
-
-def print_usage(tool_name):
-    print "Usage: "+tool_name+" <project_dir> <source_dir> <pattern> <log_file> <domain>"
-    print "  project_dir: the directory of the Django project which hosts Telemeta"
-    print "  source_dir: the directory containing the wav files to include"
-    print "  pattern: a pattern to match the collection names"
-    print "  log_file: a log file to write logs"
-    print "  domain: root domain for collections"
-
-def run():
-    if len(sys.argv) < 3:
-        print_usage(os.path.basename(sys.argv[0]))
-        sys.exit(1)
-    else:
-        project_dir = sys.argv[-5]
-        source_dir = sys.argv[-4]
-        pattern = sys.argv[-3]
-        log_file = sys.argv[-2]
-        url = sys.argv[-1]
-        sys.path.append(project_dir)
-        import settings
-        setup_environ(settings)
-        t = TelemetaWavImport(source_dir, log_file, pattern, url)
-        t.wav_import()
-
-if __name__ == '__main__':
-    run()
diff --git a/telemeta/util/old/telemeta-media-link.py b/telemeta/util/old/telemeta-media-link.py
deleted file mode 100755 (executable)
index 118fe95..0000000
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2010 Guillaume Pellerin
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
-#
-# Author: Guillaume Pellerin <yomguy@parisson.com>
-#
-
-import os
-import re
-import sys
-import logging
-import datetime
-import timeside
-from django.core.management import setup_environ
-from django.core.files.base import ContentFile
-
-mapping = {
-             'title': 'title',
-             'album': 'collection',
-             'date': 'recorded_from_date',
-             'artist': 'author',
-             'track-number': 'track',
-             'encoder': 'comment',
-             'genre': 'generic_style',
-             'audio-codec': 'comment',
-             'container-format': 'comment',
-             }
-
-class Logger:
-
-    def __init__(self, file):
-        self.logger = logging.getLogger('myapp')
-        self.hdlr = logging.FileHandler(file)
-        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
-        self.hdlr.setFormatter(self.formatter)
-        self.logger.addHandler(self.hdlr)
-        self.logger.setLevel(logging.INFO)
-
-    def write_info(self, prefix, message):
-        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
-
-    def write_error(self, prefix, message):
-        self.logger.error(prefix + ' : ' + message.decode('utf8'))
-
-
-class TelemetaMediaImport:
-
-    def __init__(self, media_dir, log_file):
-        self.logger = Logger(log_file)
-        self.media_dir = media_dir
-        self.medias = self.get_medias()
-    
-    def get_medias(self):
-        os.chdir(self.media_dir)
-        file_list = []
-        for root, dirs, files in os.walk('.'):
-            for file in files:
-                path = root + os.sep + file
-                if not os.sep+'.' in path:
-                    file_list.append({'root': root, 'file': file})
-        return file_list
-        
-    def set_collection(self, collection_name):
-        if not collection_name:
-            collection_name = 'Unkown'
-        code = collection_name.replace(' ','_')
-        code = re.sub(r'\W+', '_', code)
-        from telemeta.models.media import MediaCollection
-        collections = MediaCollection.objects.filter(code=code)
-        if not collections:
-            collection = MediaCollection(code=code,title=collection_name)
-            collection.save()
-            msg = 'created'
-            self.logger.write_info('collection ' + collection_name, msg)
-        else:
-            collection = collections[0]
-        return collection
-        
-    def media_import(self):
-        from telemeta.models.media import MediaItem
-        for media in self.medias:
-            path = media['root'] + os.sep + media['file']
-            print 'checking ' + path
-            filename,  ext = os.path.splitext(media['file'])
-            item = MediaItem.objects.filter(code=filename)
-            if not item:
-                print 'importing ' + path
-                decoder = timeside.decoder.FileDecoder(path)
-                try:
-                    metadata = decoder.metadata()
-                    print metadata
-                    collection = self.set_collection(metadata['album'])
-                    item = MediaItem(collection=collection)
-                    item.code = re.sub(r'\W+', '_', metadata['title'])
-                    for tag in mapping.keys():
-                        try:
-                            if tag == 'date':
-                                date = metadata[tag].split(' ')[1].split('/')
-                                metadata[tag] = date[2]+'-'+date[1]+'-'+date[0]    
-                            if mapping[tag] == 'comment':
-                                item[mapping[tag]] = item[mapping[tag]] + '\n' + metadata[tag]
-                            else:
-                                item[mapping[tag]] = metadata[tag]
-                        except:
-                            continue
-                    item.file = path
-                    item.save()
-                    msg = 'added item : ' + path
-                    self.logger.write_info(collection.code, msg)
-                except:
-                    continue
-                
-
-def run():
-    project_dir = sys.argv[-2]
-    log_file = sys.argv[-1]
-    sys.path.append(project_dir)
-    import settings
-    setup_environ(settings)
-    media_dir = settings.MEDIA_ROOT
-    t = TelemetaMediaImport(media_dir, log_file)
-    t.media_import()
-
-if __name__ == '__main__':
-    run()
diff --git a/telemeta/util/sql/convert_myisam_to_innodb.sql b/telemeta/util/sql/convert_myisam_to_innodb.sql
deleted file mode 100644 (file)
index 91e36d5..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-ALTER TABLE `vernacular_styles` ENGINE=InnoDB;
-ALTER TABLE `users` ENGINE=InnoDB;
-ALTER TABLE `thumbnail_kvstore` ENGINE=InnoDB;
-ALTER TABLE `telemeta_media_transcoded` ENGINE=InnoDB;
-ALTER TABLE `tape_width` ENGINE=InnoDB;
-ALTER TABLE `tape_wheel_diameter` ENGINE=InnoDB;
-ALTER TABLE `tape_vendor` ENGINE=InnoDB;
-ALTER TABLE `tape_speed` ENGINE=InnoDB;
-ALTER TABLE `tape_length` ENGINE=InnoDB;
-ALTER TABLE `south_migrationhistory` ENGINE=InnoDB;
-ALTER TABLE `search_criteria` ENGINE=InnoDB;
-ALTER TABLE `searches_criteria` ENGINE=InnoDB;
-ALTER TABLE `searches` ENGINE=InnoDB;
-ALTER TABLE `rights` ENGINE=InnoDB;
-ALTER TABLE `revisions` ENGINE=InnoDB;
-ALTER TABLE `recording_contexts` ENGINE=InnoDB;
-ALTER TABLE `publishing_status` ENGINE=InnoDB;
-ALTER TABLE `publisher_collections` ENGINE=InnoDB;
-ALTER TABLE `publishers` ENGINE=InnoDB;
-ALTER TABLE `profiles` ENGINE=InnoDB;
-ALTER TABLE `playlist_resources` ENGINE=InnoDB;
-ALTER TABLE `playlists` ENGINE=InnoDB;
-ALTER TABLE `physical_formats` ENGINE=InnoDB;
-ALTER TABLE `original_format` ENGINE=InnoDB;
-ALTER TABLE `original_channel_number` ENGINE=InnoDB;
-ALTER TABLE `organization` ENGINE=InnoDB;
-ALTER TABLE `metadata_writers` ENGINE=InnoDB;
-ALTER TABLE `metadata_authors` ENGINE=InnoDB;
-ALTER TABLE `media_type` ENGINE=InnoDB;
-ALTER TABLE `media_transcoding` ENGINE=InnoDB;
-ALTER TABLE `media_status` ENGINE=InnoDB;
-ALTER TABLE `media_parts` ENGINE=InnoDB;
-ALTER TABLE `media_markers` ENGINE=InnoDB;
-ALTER TABLE `media_item_related` ENGINE=InnoDB;
-ALTER TABLE `media_item_performances` ENGINE=InnoDB;
-ALTER TABLE `media_item_keywords` ENGINE=InnoDB;
-ALTER TABLE `media_item_identifier` ENGINE=InnoDB;
-ALTER TABLE `media_items` ENGINE=InnoDB;
-ALTER TABLE `media_formats` ENGINE=InnoDB;
-ALTER TABLE `media_fonds_related` ENGINE=InnoDB;
-ALTER TABLE `media_fonds_children` ENGINE=InnoDB;
-ALTER TABLE `media_fonds` ENGINE=InnoDB;
-ALTER TABLE `media_corpus_related` ENGINE=InnoDB;
-ALTER TABLE `media_corpus_children` ENGINE=InnoDB;
-ALTER TABLE `media_corpus` ENGINE=InnoDB;
-ALTER TABLE `media_collection_related` ENGINE=InnoDB;
-ALTER TABLE `media_collection_identifier` ENGINE=InnoDB;
-ALTER TABLE `media_collections` ENGINE=InnoDB;
-ALTER TABLE `media_analysis` ENGINE=InnoDB;
-ALTER TABLE `location_types` ENGINE=InnoDB;
-ALTER TABLE `location_relations` ENGINE=InnoDB;
-ALTER TABLE `location_aliases` ENGINE=InnoDB;
-ALTER TABLE `locations` ENGINE=InnoDB;
-ALTER TABLE `legal_rights` ENGINE=InnoDB;
-ALTER TABLE `languages` ENGINE=InnoDB;
-ALTER TABLE `jqchat_room` ENGINE=InnoDB;
-ALTER TABLE `jqchat_message` ENGINE=InnoDB;
-ALTER TABLE `ipauth_range` ENGINE=InnoDB;
-ALTER TABLE `instrument_relations` ENGINE=InnoDB;
-ALTER TABLE `instrument_alias_relations` ENGINE=InnoDB;
-ALTER TABLE `instrument_aliases` ENGINE=InnoDB;
-ALTER TABLE `instruments` ENGINE=InnoDB;
-ALTER TABLE `identifier_type` ENGINE=InnoDB;
-ALTER TABLE `googletools_siteverificationcode` ENGINE=InnoDB;
-ALTER TABLE `googletools_analyticscode` ENGINE=InnoDB;
-ALTER TABLE `generic_styles` ENGINE=InnoDB;
-ALTER TABLE `ethnic_group_aliases` ENGINE=InnoDB;
-ALTER TABLE `ethnic_groups` ENGINE=InnoDB;
-ALTER TABLE `django_site` ENGINE=InnoDB;
-ALTER TABLE `django_session` ENGINE=InnoDB;
-ALTER TABLE `django_content_type` ENGINE=InnoDB;
-ALTER TABLE `django_admin_log` ENGINE=InnoDB;
-ALTER TABLE `copy_type` ENGINE=InnoDB;
-ALTER TABLE `context_keywords` ENGINE=InnoDB;
-ALTER TABLE `auth_user_user_permissions` ENGINE=InnoDB;
-ALTER TABLE `auth_user_groups` ENGINE=InnoDB;
-ALTER TABLE `auth_user` ENGINE=InnoDB;
-ALTER TABLE `auth_permission` ENGINE=InnoDB;
-ALTER TABLE `auth_message` ENGINE=InnoDB;
-ALTER TABLE `auth_group_permissions` ENGINE=InnoDB;
-ALTER TABLE `auth_group` ENGINE=InnoDB;
-ALTER TABLE `ad_conversions` ENGINE=InnoDB;
-ALTER TABLE `acquisition_modes` ENGINE=InnoDB;
diff --git a/telemeta/util/transcode/create_thumbs.py b/telemeta/util/transcode/create_thumbs.py
deleted file mode 100755 (executable)
index dc3fd20..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/python
-
-import os, sys, string
-import logging
-
-class Logger:
-    """A logging object"""
-
-    def __init__(self, file):
-        self.logger = logging.getLogger('myapp')
-        self.hdlr = logging.FileHandler(file)
-        self.formatter = logging.Formatter('%(message)s')
-        self.hdlr.setFormatter(self.formatter)
-        self.logger.addHandler(self.hdlr)
-        self.logger.setLevel(logging.INFO)
-
-log_file = 'thumbs.log'
-logger = Logger(log_file)
-root_dir = sys.argv[-1]
-args = sys.argv[1:-1]
-source_format = 'webm'
-done = []
-preview_tc = '00:00:05'
-
-if os.path.exists(log_file):
-    f = open(log_file, 'r')
-    for line in f.readlines():
-        done.append(line[:-1])
-    f.close()
-
-for root, dirs, files in os.walk(root_dir):
-    for file in files:
-        path = os.path.abspath(root + os.sep + file)
-        name, ext = os.path.splitext(file)
-        if ext[1:] == source_format:
-            dest = os.path.abspath(root + os.sep + name + '.png')
-            if not dest in done or '--force' in args:
-                command = 'ffmpeg -ss '+ preview_tc + ' -i ' + path + '  -y ' + dest
-                os.system(command)
-                logger.logger.info(dest)
-
-print "DONE!"
diff --git a/telemeta/util/transcode/remux_fix_media.py b/telemeta/util/transcode/remux_fix_media.py
deleted file mode 100755 (executable)
index 39cfd9f..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/python
-
-import os, sys, psutil
-import datetime
-from ebml.utils.ebml_data import *
-
-class FixCheckMedia(object):
-
-    def __init__(self, dir, tmp_dir):
-        self.dir = dir
-        self.tmp_dir = tmp_dir
-        if not os.path.exists(self.tmp_dir):
-            os.makedirs(self.tmp_dir)
-
-    def process(self):
-        webm_fixed_log = 'webm.fixed'
-        webm_tofix_log = 'webm.tofix'
-        mp3_fixed_log = 'mp3.fixed'
-        mp3_tofix_log = 'mp3.tofix'
-
-        for root, dirs, files in os.walk(self.dir):
-            for filename in files:
-                source = root + os.sep + filename
-                name = os.path.splitext(filename)[0]
-                ext = os.path.splitext(filename)[1][1:]
-
-                if ext == 'webm' and os.path.getsize(source):
-                    dir_files = os.listdir(root)
-
-                    if not webm_fixed_log in dir_files:
-                        print source
-                        self.fix_webm(source)
-                        f = open(root + os.sep + webm_fixed_log, 'w')
-                        f.close()
-                        if os.path.exists(root + os.sep + webm_tofix_log):
-                            os.remove(root + os.sep + webm_tofix_log)
-
-                    if mp3_tofix_log in dir_files or not mp3_fixed_log in dir_files:
-                        for file in dir_files:
-                            dest_ext = os.path.splitext(file)[1][1:]
-                            if dest_ext == 'mp3':
-                                dest = root + os.sep + file
-                                print dest
-                                self.fix_mp3(source, dest)
-                                f = open(root + os.sep + mp3_fixed_log, 'w')
-                                f.close()
-                                if os.path.exists(root + os.sep + mp3_tofix_log):
-                                    os.remove(root + os.sep + mp3_tofix_log)
-                                #break
-
-
-    def hard_fix_webm(self, path):
-        try:
-            tmp_file = self.tmp_dir + 'out.webm '
-            command = 'ffmpeg -loglevel 0 -i "'+ path + '" -vcodec libvpx -vb 500k -acodec libvorbis -aq 7 -f webm -y "' + tmp_file + '" > /dev/null'
-            print command
-            os.system(command)
-            command = 'mv '  + tmp_file + path
-            os.system(command)
-        except:
-            pass
-
-
-    def fix_webm(self, path):
-        try:
-            tmp_file = self.tmp_dir + 'out.webm'
-            command = '/usr/local/bin/ffmpeg -loglevel 0 -i "' + path + '" -vcodec copy -acodec copy -f webm -y "' + tmp_file + '" > /dev/null'
-            print command
-            os.system(command)
-            ebml_obj = EBMLData(tmp_file)
-            offset = ebml_obj.get_first_cluster_seconds()
-            command = '/usr/local/bin/ffmpeg -loglevel 0 -ss ' + str(offset) + ' -i "' + tmp_file + '" -vcodec copy -acodec copy -f webm -y "' + path + '" > /dev/null'
-            print command
-            os.system(command)
-        except:
-            pass
-
-    def fix_mp3(self, source, path):
-        try:
-            command = '/usr/local/bin/ffmpeg -loglevel 0 -i "'+ source + '" -vn -aq 6 -y "' + path + '" > /dev/null'
-            print command
-            os.system(command)
-        except:
-            pass
-
-def get_pids(name, args=None):
-    """Get a process pid filtered by arguments and uid"""
-    pids = []
-    for proc in psutil.process_iter():
-        if proc.cmdline:
-            if name == proc.name:
-                if args:
-                    if args in proc.cmdline:
-                        pids.append(proc.pid)
-                else:
-                    pids.append(proc.pid)
-    return pids
-
-dir = sys.argv[-2]
-tmp_dir = sys.argv[-1]
-
-path =  os.path.abspath(__file__)
-pids = get_pids('python2.6',args=path)
-
-print datetime.datetime.now()
-if len(pids) <= 1:
-    print 'starting process...'
-    f = FixCheckMedia(dir, tmp_dir)
-    f.process()
-    print 'process finished.\n'
-else:
-    print 'already started !\n'
-
diff --git a/telemeta/util/transcode/transcode.py b/telemeta/util/transcode/transcode.py
deleted file mode 100755 (executable)
index efaa113..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/usr/bin/python
-
-import os, sys, string
-import logging
-
-
-class Logger:
-    """A logging object"""
-
-    def __init__(self, file):
-        self.logger = logging.getLogger('myapp')
-        self.hdlr = logging.FileHandler(file)
-        self.formatter = logging.Formatter('%(asctime)s %(message)s')
-        self.hdlr.setFormatter(self.formatter)
-        self.logger.addHandler(self.hdlr)
-        self.logger.setLevel(logging.INFO)
-
-
-class TelemetaTranscode(object):
-    """docstring for TelemetaTranscode"""
-
-    threads = 4
-    source_formats = ['webm', 'mp4']
-    dest_formats = {
-                   'mp3' : '-vn -acodec libmp3lame -aq 6',
-                   'ogg' : '-vn -acodec libvorbis -aq 6',
-                   'mp4' : '-vcodec libx264 -threads ' + str(threads) + \
-                           ' -c:v libx264 -crf 17 -maxrate 1100k -bufsize 1835k -acodec libfaac -ab 96k',
-                   'png' : '',
-                   'webm' : '-vcodec libvpx -threads ' + str(threads) + \
-                           ' -c:v libvpx -crf 17 -b:v 1100k',
-                  }
-
-
-    def __init__(self, args):
-        self.args = args
-        self.log_file = args[-1]
-        self.root_dir = args[-2]
-        self.logger = Logger(self.log_file)
-
-
-    def get_ext_in_dir(self, extension, root):
-        files = os.listdir(root)
-        exts = []
-        for f in files:
-            name, ext = os.path.splitext(f)
-            ext = ext[1:]
-            if not ext in exts:
-                exts.append(ext)
-        return extension in exts
-
-    def run(self):
-        for root, dirs, files in os.walk(self.root_dir):
-            for file in files:
-                path = os.path.abspath(root + os.sep + file)
-                name, ext = os.path.splitext(file)
-                ext = ext[1:]
-                if ext in self.source_formats:
-                    for format, ffmpeg_args in self.dest_formats.iteritems():
-                        local_file = name + '.' + format
-                        dest = os.path.abspath(root + os.sep + local_file)
-                        local_files = os.listdir(root)
-                        if not (local_file in local_files or self.get_ext_in_dir(format, root)) or '--force' in self.args:
-                            if ext == 'webm' and format == 'ogg':
-                                ffmpeg_args = '-vn -acodec copy'
-                            command = 'ffmpeg -loglevel 0 -i "' + path + '" ' + ffmpeg_args + ' -y "' + dest + '"'
-                            self.logger.logger.info(command)
-                            if not '--dry-run' in self.args:
-                                os.system(command)
-                            else:
-                                print command
-
-
-if __name__ == '__main__':
-    t = TelemetaTranscode(sys.argv[1:])
-    t.run()