]> git.parisson.com Git - telemeta.git/commitdiff
Move scripts/ to bin/
authorGuillaume Pellerin <guillaume.pellerin@ircam.fr>
Sun, 29 Jul 2018 19:41:55 +0000 (21:41 +0200)
committerGuillaume Pellerin <guillaume.pellerin@ircam.fr>
Sun, 29 Jul 2018 19:41:55 +0000 (21:41 +0200)
74 files changed:
Dockerfile
app/bin/app.sh [new file with mode: 0755]
app/bin/diag.sh [new file with mode: 0755]
app/bin/enumeration.sh [new file with mode: 0755]
app/bin/init.sh [new file with mode: 0755]
app/bin/install_plugins.sh [new file with mode: 0755]
app/bin/modelviz.py [new file with mode: 0755]
app/bin/notebook.sh [new file with mode: 0755]
app/bin/setup_plugins.sh [new file with mode: 0755]
app/bin/update_schema.sh [new file with mode: 0755]
app/bin/upgrade_from_1.6_to_1.7.sh [new file with mode: 0755]
app/bin/wait.sh [new file with mode: 0755]
app/bin/worker.sh [new file with mode: 0755]
app/scripts/app.sh [deleted file]
app/scripts/diag.sh [deleted file]
app/scripts/enumeration.sh [deleted file]
app/scripts/init.sh [deleted file]
app/scripts/install_plugins.sh [deleted file]
app/scripts/modelviz.py [deleted file]
app/scripts/notebook.sh [deleted file]
app/scripts/setup_plugins.sh [deleted file]
app/scripts/update_schema.sh [deleted file]
app/scripts/upgrade_from_1.6_to_1.7.sh [deleted file]
app/scripts/wait.sh [deleted file]
app/scripts/worker.sh [deleted file]
bin/kdenlive/__init__.py [new file with mode: 0644]
bin/kdenlive/auto_fade.py [new file with mode: 0755]
bin/kdenlive/auto_fade_batch.py [new file with mode: 0755]
bin/kdenlive/fade.py [new file with mode: 0644]
bin/kdenlive/mlt_fix_threads.sh [new file with mode: 0755]
bin/kdenlive/mlt_process_batch.py [new file with mode: 0755]
bin/old/crem_checker.py [new file with mode: 0755]
bin/old/process-waveform-cgi.py [new file with mode: 0755]
bin/old/telemeta-backup.py [new file with mode: 0755]
bin/old/telemeta-crem-import-alt_ids.py [new file with mode: 0755]
bin/old/telemeta-crem-import-test.py [new file with mode: 0755]
bin/old/telemeta-crem-import.py [new file with mode: 0755]
bin/old/telemeta-media-link.py [new file with mode: 0755]
bin/sql/backup_db.sh [new file with mode: 0755]
bin/sql/convert_myisam_to_innodb.sql [new file with mode: 0644]
bin/sql/drop_timeside.sql [new file with mode: 0644]
bin/sql/fix_contentttypes.sql [new file with mode: 0644]
bin/sql/import_sql.sh [new file with mode: 0755]
bin/sql/restore_db.sh [new file with mode: 0755]
bin/transcode/create_thumbs.py [new file with mode: 0755]
bin/transcode/remux_fix_media.py [new file with mode: 0755]
bin/transcode/transcode.py [new file with mode: 0755]
bin/upgrade.sh [new file with mode: 0755]
docker-compose.yml
env/dev.yml
env/notebook.yml
scripts/kdenlive/__init__.py [deleted file]
scripts/kdenlive/auto_fade.py [deleted file]
scripts/kdenlive/auto_fade_batch.py [deleted file]
scripts/kdenlive/fade.py [deleted file]
scripts/kdenlive/mlt_fix_threads.sh [deleted file]
scripts/kdenlive/mlt_process_batch.py [deleted file]
scripts/old/crem_checker.py [deleted file]
scripts/old/process-waveform-cgi.py [deleted file]
scripts/old/telemeta-backup.py [deleted file]
scripts/old/telemeta-crem-import-alt_ids.py [deleted file]
scripts/old/telemeta-crem-import-test.py [deleted file]
scripts/old/telemeta-crem-import.py [deleted file]
scripts/old/telemeta-media-link.py [deleted file]
scripts/sql/backup_db.sh [deleted file]
scripts/sql/convert_myisam_to_innodb.sql [deleted file]
scripts/sql/drop_timeside.sql [deleted file]
scripts/sql/fix_contentttypes.sql [deleted file]
scripts/sql/import_sql.sh [deleted file]
scripts/sql/restore_db.sh [deleted file]
scripts/transcode/create_thumbs.py [deleted file]
scripts/transcode/remux_fix_media.py [deleted file]
scripts/transcode/transcode.py [deleted file]
scripts/upgrade.sh [deleted file]

index f4ad8dec3c82140e470fb4160f4b4df65f2d18ce..fea6e0e96026c5d7a79048f8cfac39355f627002 100644 (file)
@@ -36,7 +36,7 @@ RUN /bin/bash /srv/app/scripts/setup_plugins.sh
 # Install Telemeta
 RUN pip install -r requirements.txt
 RUN pip install -r requirements-dev.txt --src /srv/lib
-RUN pip uninstall -y South
+RUN pip uninstall -y south
 
 WORKDIR /srv/app
 EXPOSE 8000
diff --git a/app/bin/app.sh b/app/bin/app.sh
new file mode 100755 (executable)
index 0000000..37568a0
--- /dev/null
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+# paths
+app='/srv/app'
+manage=$app'/manage.py'
+wsgi=$app'/wsgi.py'
+static='/srv/static/'
+media='/srv/media/'
+lib='/srv/lib/'
+log='/var/log/uwsgi/app.log'
+
+# uwsgi params
+port=8000
+processes=8
+threads=8
+uid='www-data'
+gid='www-data'
+
+# stating apps
+# pip uninstall -y south
+# pip install -U django==1.8.18 django-registration-redux djangorestframework==3.6.4
+# pip install django-debug-toolbar==1.6
+# pip install -e git+https://github.com/Parisson/django-jqchat.git@dj1.8#egg=django-jqchat
+# pip install -e git+https://github.com/Parisson/saved_searches.git@dj1.8#egg=saved_searches-2.0.0-beta
+
+# waiting for other network services
+sh $app/bin/wait.sh
+python $manage wait-for-db
+
+if [ ! -f .init ]; then
+    python $manage migrate --noinput
+    python $manage bower_install -- --allow-root
+    touch .init
+fi
+
+# telemeta setup
+python $manage telemeta-create-admin-user
+python $manage telemeta-create-boilerplate
+python $manage telemeta-setup-enumerations
+
+
+# Delete Timeside database if it exists
+cat /srv/lib/telemeta/bin/sql/drop_timeside.sql | python $manage dbshell
+
+if [ $REINDEX = "True" ]; then
+    python $manage rebuild_index --noinput
+fi
+
+# choose dev or prod mode
+if [ "$1" = "--runserver" ]; then
+    python $manage runserver 0.0.0.0:8000
+else
+    # static files auto update
+    # watchmedo shell-command --patterns="$patterns" --recursive \
+    #     --command='python '$manage' collectstatic --noinput' $lib &
+    python $manage collectstatic --noinput
+
+    # fix media access rights
+    find $media -maxdepth 1 -path ${media}import -prune -o -type d -not -user www-data -exec chown www-data:www-data {} \;
+
+    # app start
+    uwsgi --socket :$port --wsgi-file $wsgi --chdir $app --master \
+        --processes $processes --threads $threads \
+        --uid $uid --gid $gid --logto $log --touch-reload $wsgi
+fi
diff --git a/app/bin/diag.sh b/app/bin/diag.sh
new file mode 100755 (executable)
index 0000000..44d0552
--- /dev/null
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+app="telemeta"
+dir="../../doc/devel"
+
+python modelviz.py -a > $dir/$app-all.dot
+python modelviz.py $app > $dir/$app.dot
+
+dot $dir/$app-all.dot -Tpdf -o $dir/$app-all.pdf
+dot $dir/$app.dot -Tpdf -o $dir/$app.pdf
+
+rsync -a $dir/ doc.parisson.com:/var/www/files/doc/$app/diagram/
diff --git a/app/bin/enumeration.sh b/app/bin/enumeration.sh
new file mode 100755 (executable)
index 0000000..c6a42b0
--- /dev/null
@@ -0,0 +1,4 @@
+#!/bin/bash
+ pwd
+
+   chmod 777 "enumeration/enumeration.txt" && echo "The file is now writable"
diff --git a/app/bin/init.sh b/app/bin/init.sh
new file mode 100755 (executable)
index 0000000..21bd3d5
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+app='/srv/app'
+manage=$app'/manage.py'
+
+python $manage migrate --noinput
+python $manage telemeta-create-admin-user
+python $manage telemeta-create-boilerplate
+python $manage bower_install -- --allow-root
diff --git a/app/bin/install_plugins.sh b/app/bin/install_plugins.sh
new file mode 100755 (executable)
index 0000000..b0315e9
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+plugins=/srv/lib/plugins
+
+for dir in $(ls $plugins); do
+    if [ -f $plugins/$dir/setup.py ]; then
+        pip install -e $plugins/$dir/.
+    fi
+done
diff --git a/app/bin/modelviz.py b/app/bin/modelviz.py
new file mode 100755 (executable)
index 0000000..24af062
--- /dev/null
@@ -0,0 +1,359 @@
+#!/usr/bin/env python
+
+"""Django model to DOT (Graphviz) converter
+by Antonio Cavedoni <antonio@cavedoni.org>
+
+Make sure your DJANGO_SETTINGS_MODULE is set to your project or
+place this script in the same directory of the project and call
+the script like this:
+
+$ python modelviz.py [-h] [-a] [-d] [-g] [-n] [-L <language>] [-i <model_names>] <app_label> ... <app_label> > <filename>.dot
+$ dot <filename>.dot -Tpng -o <filename>.png
+
+options:
+    -h, --help
+    show this help message and exit.
+
+    -a, --all_applications
+    show models from all applications.
+
+    -d, --disable_fields
+    don't show the class member fields.
+
+    -g, --group_models
+    draw an enclosing box around models from the same app.
+
+    -i, --include_models=User,Person,Car
+    only include selected models in graph.
+
+    -n, --verbose_names
+    use verbose_name for field and models.
+
+    -L, --language
+    specify language used for verrbose_name localization
+
+    -x, --exclude_columns
+    exclude specific column(s) from the graph.
+
+    -X, --exclude_models
+    exclude specific model(s) from the graph.
+    
+    -e, --inheritance
+    show inheritance arrows.
+"""
+__version__ = "0.9"
+__svnid__ = "$Id$"
+__license__ = "Python"
+__author__ = "Antonio Cavedoni <http://cavedoni.com/>"
+__contributors__ = [
+   "Stefano J. Attardi <http://attardi.org/>",
+   "limodou <http://www.donews.net/limodou/>",
+   "Carlo C8E Miron",
+   "Andre Campos <cahenan@gmail.com>",
+   "Justin Findlay <jfindlay@gmail.com>",
+   "Alexander Houben <alexander@houben.ch>",
+   "Bas van Oostveen <v.oostveen@gmail.com>",
+   "Joern Hees <gitdev@joernhees.de>"
+]
+
+import os
+import sys
+import getopt
+
+from django.core.management import setup_environ
+
+try:
+    import settings
+except ImportError:
+    pass
+else:
+    setup_environ(settings)
+
+from django.utils.translation import activate as activate_language
+from django.utils.safestring import mark_safe
+from django.template import Template, Context, loader
+from django.db import models
+from django.db.models import get_models
+from django.db.models.fields.related import \
+    ForeignKey, OneToOneField, ManyToManyField, RelatedField
+
+try:
+    from django.db.models.fields.generic import GenericRelation
+except ImportError:
+    from django.contrib.contenttypes.generic import GenericRelation
+
+def parse_file_or_list(arg):
+    if not arg:
+        return []
+    if not ',' in arg and os.path.isfile(arg):
+        return [e.strip() for e in open(arg).readlines()]
+    return arg.split(',')
+
+
+def generate_dot(app_labels, **kwargs):
+    disable_fields = kwargs.get('disable_fields', False)
+    include_models = parse_file_or_list(kwargs.get('include_models', ""))
+    all_applications = kwargs.get('all_applications', False)
+    use_subgraph = kwargs.get('group_models', False)
+    verbose_names = kwargs.get('verbose_names', False)
+    inheritance = kwargs.get('inheritance', False)
+    language = kwargs.get('language', None)
+    if language is not None:
+        activate_language(language)
+    exclude_columns = parse_file_or_list(kwargs.get('exclude_columns', ""))
+    exclude_models = parse_file_or_list(kwargs.get('exclude_models', ""))
+
+    def skip_field(field):
+        if exclude_columns:
+            if verbose_names and field.verbose_name:
+                if field.verbose_name in exclude_columns:
+                    return True
+            if field.name in exclude_columns:
+                return True
+        return False
+
+
+
+
+    t = loader.get_template('django_extensions/graph_models/head.html')
+    c = Context({})
+    dot = t.render(c)
+
+    apps = []
+    if all_applications:
+        apps = models.get_apps()
+
+    for app_label in app_labels:
+        app = models.get_app(app_label)
+        if not app in apps:
+            apps.append(app)
+
+    graphs = []
+    for app in apps:
+        graph = Context({
+            'name': '"%s"' % app.__name__,
+            'app_name': "%s" % '.'.join(app.__name__.split('.')[:-1]),
+            'cluster_app_name': "cluster_%s" % app.__name__.replace(".", "_"),
+            'disable_fields': disable_fields,
+            'use_subgraph': use_subgraph,
+            'models': []
+        })
+
+        appmodels = get_models(app)
+        abstract_models = []
+        for appmodel in appmodels:
+            abstract_models = abstract_models + [abstract_model for abstract_model in appmodel.__bases__ if hasattr(abstract_model, '_meta') and abstract_model._meta.abstract]
+        abstract_models = list(set(abstract_models)) # remove duplicates
+        appmodels = abstract_models + appmodels
+        
+
+        for appmodel in appmodels:
+            appmodel_abstracts = [abstract_model.__name__ for abstract_model in appmodel.__bases__ if hasattr(abstract_model, '_meta') and abstract_model._meta.abstract]
+
+            # collect all attribs of abstract superclasses
+            def getBasesAbstractFields(c):
+                _abstract_fields = []
+                for e in c.__bases__:
+                    if hasattr(e, '_meta') and e._meta.abstract:
+                        _abstract_fields.extend(e._meta.fields)
+                        _abstract_fields.extend(getBasesAbstractFields(e))
+                return _abstract_fields
+            abstract_fields = getBasesAbstractFields(appmodel)
+
+            model = {
+                'app_name': appmodel.__module__.replace(".", "_"),
+                'name': appmodel.__name__,
+                'abstracts': appmodel_abstracts,
+                'fields': [],
+                'relations': []
+            }
+
+            # consider given model name ?
+            def consider(model_name):
+                if exclude_models and model_name in exclude_models:
+                    return False
+                return not include_models or model_name in include_models
+
+            if not consider(appmodel._meta.object_name):
+                continue
+
+            if verbose_names and appmodel._meta.verbose_name:
+                model['label'] = appmodel._meta.verbose_name
+            else:
+                model['label'] = model['name']
+
+            # model attributes
+            def add_attributes(field):
+                if verbose_names and field.verbose_name:
+                    label = field.verbose_name
+                else:
+                    label = field.name
+
+                t = type(field).__name__
+                if isinstance(field, (OneToOneField, ForeignKey)):
+                    t += " ({0})".format(field.rel.field_name)
+                # TODO: ManyToManyField, GenericRelation
+
+                model['fields'].append({
+                    'name': field.name,
+                    'label': label,
+                    'type': t,
+                    'blank': field.blank,
+                    'abstract': field in abstract_fields,
+                })
+
+            # Find all the real attributes. Relations are depicted as graph edges instead of attributes
+            attributes = [field for field in appmodel._meta.local_fields if not isinstance(field, RelatedField)]
+
+            # find primary key and print it first, ignoring implicit id if other pk exists
+            pk = appmodel._meta.pk
+            if not appmodel._meta.abstract and pk in attributes:
+                add_attributes(pk)
+            for field in attributes:
+                if skip_field(field):
+                    continue
+                if not field.primary_key:
+                    add_attributes(field)
+            
+            # FIXME: actually many_to_many fields aren't saved in this model's db table, so why should we add an attribute-line for them in the resulting graph?
+            #if appmodel._meta.many_to_many:
+            #    for field in appmodel._meta.many_to_many:
+            #        if skip_field(field):
+            #            continue
+            #        add_attributes(field)
+
+            # relations
+            def add_relation(field, extras=""):
+                if verbose_names and field.verbose_name:
+                    label = field.verbose_name
+                else:
+                    label = field.name
+                    
+                # show related field name
+                if hasattr(field, 'related_query_name'):
+                    label += ' (%s)' % field.related_query_name()
+
+                _rel = {
+                    'target_app': field.rel.to.__module__.replace('.', '_'),
+                    'target': field.rel.to.__name__,
+                    'type': type(field).__name__,
+                    'name': field.name,
+                    'label': label,
+                    'arrows': extras,
+                    'needs_node': True
+                }
+                if _rel not in model['relations'] and consider(_rel['target']):
+                    model['relations'].append(_rel)
+
+            for field in appmodel._meta.local_fields:
+                if field.attname.endswith('_ptr_id'): # excluding field redundant with inheritance relation
+                    continue
+                if field in abstract_fields: # excluding fields inherited from abstract classes. they too show as local_fields
+                    continue
+                if skip_field(field):
+                    continue
+                if isinstance(field, OneToOneField):
+                    add_relation(field, '[arrowhead=none, arrowtail=none]')
+                elif isinstance(field, ForeignKey):
+                    add_relation(field, '[arrowhead=none, arrowtail=dot]')
+
+            for field in appmodel._meta.local_many_to_many:
+                if skip_field(field):
+                    continue
+                if isinstance(field, ManyToManyField):
+                    if (getattr(field, 'creates_table', False) or  # django 1.1.
+                        (hasattr(field.rel.through, '_meta') and field.rel.through._meta.auto_created)):  # django 1.2
+                        add_relation(field, '[arrowhead=dot arrowtail=dot, dir=both]')
+                    elif isinstance(field, GenericRelation):
+                        add_relation(field, mark_safe('[style="dotted", arrowhead=normal, arrowtail=normal, dir=both]'))
+            
+            if inheritance:
+                # add inheritance arrows
+                for parent in appmodel.__bases__:
+                    if hasattr(parent, "_meta"): # parent is a model
+                        l = "multi-table"
+                        if parent._meta.abstract:
+                            l = "abstract"
+                        if appmodel._meta.proxy:
+                            l = "proxy"
+                        l += r"\ninheritance"
+                        _rel = {
+                            'target_app': parent.__module__.replace(".", "_"),
+                            'target': parent.__name__,
+                            'type': "inheritance",
+                            'name': "inheritance",
+                            'label': l,
+                            'arrows': '[arrowhead=empty, arrowtail=none]',
+                            'needs_node': True
+                        }
+                        # TODO: seems as if abstract models aren't part of models.getModels, which is why they are printed by this without any attributes.
+                        if _rel not in model['relations'] and consider(_rel['target']):
+                            model['relations'].append(_rel)
+            
+            graph['models'].append(model)
+        graphs.append(graph)
+
+    nodes = []
+    for graph in graphs:
+        nodes.extend([e['name'] for e in graph['models']])
+
+    for graph in graphs:
+        # don't draw duplication nodes because of relations
+        for model in graph['models']:
+            for relation in model['relations']:
+                if relation['target'] in nodes:
+                    relation['needs_node'] = False
+        # render templates
+        t = loader.get_template('django_extensions/graph_models/body.html')
+        dot += '\n' + t.render(graph)
+
+    for graph in graphs:
+        t = loader.get_template('django_extensions/graph_models/rel.html')
+        dot += '\n' + t.render(graph)
+
+
+    t = loader.get_template('django_extensions/graph_models/tail.html')
+    c = Context({})
+    dot += '\n' + t.render(c)
+    return dot
+
+def main():
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "hadgi:L:x:X:en",
+                    ["help", "all_applications", "disable_fields", "group_models", "include_models=", "inheritance", "verbose_names", "language=", "exclude_columns=", "exclude_models="])
+    except getopt.GetoptError, error:
+        print __doc__
+        sys.exit(error)
+
+    kwargs = {}
+    for opt, arg in opts:
+        if opt in ("-h", "--help"):
+            print __doc__
+            sys.exit()
+        if opt in ("-a", "--all_applications"):
+            kwargs['all_applications'] = True
+        if opt in ("-d", "--disable_fields"):
+            kwargs['disable_fields'] = True
+        if opt in ("-g", "--group_models"):
+            kwargs['group_models'] = True
+        if opt in ("-i", "--include_models"):
+            kwargs['include_models'] = arg
+        if opt in ("-e", "--inheritance"):
+            kwargs['inheritance'] = True
+        if opt in ("-n", "--verbose-names"):
+            kwargs['verbose_names'] = True
+        if opt in ("-L", "--language"):
+            kwargs['language'] = arg
+        if opt in ("-x", "--exclude_columns"):
+            kwargs['exclude_columns'] = arg
+        if opt in ("-X", "--exclude_models"):
+            kwargs['exclude_models'] = arg
+
+    if not args and not kwargs.get('all_applications', False):
+        print __doc__
+        sys.exit()
+
+    print generate_dot(args, **kwargs)
+
+if __name__ == "__main__":
+    main()
diff --git a/app/bin/notebook.sh b/app/bin/notebook.sh
new file mode 100755 (executable)
index 0000000..562f9df
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+export PYTHONPATH=$PYTHONPATH:/opt/miniconda/lib/python2.7/site-packages/:/srv/app/
+export DJANGO_SETTINGS_MODULE=settings
+
+python /srv/app/manage.py shell_plus --notebook
diff --git a/app/bin/setup_plugins.sh b/app/bin/setup_plugins.sh
new file mode 100755 (executable)
index 0000000..de55f84
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+plugins=/srv/lib/plugins
+
+apt-get update
+
+for dir in $(ls $plugins); do
+    env=$plugins/$dir/conda-environment.yml
+    if [ -f $env ]; then
+        conda env update --name root --file $env
+    fi
+    req=$plugins/$dir/debian-requirements.txt
+    if [ -f $req ]; then
+        packs=$(egrep -v "^\s*(#|$)" $req)
+        apt-get install -y --force-yes $packs
+    fi
+    if [ -f $plugins/$dir/setup.py ]; then
+        pip install -e $plugins/$dir/.
+    fi
+done
+
+apt-get clean
diff --git a/app/bin/update_schema.sh b/app/bin/update_schema.sh
new file mode 100755 (executable)
index 0000000..1b64fa3
--- /dev/null
@@ -0,0 +1,4 @@
+#!/bin/bash
+
+./manage.py schemamigration telemeta --auto
+./manage.py migrate telemeta
diff --git a/app/bin/upgrade_from_1.6_to_1.7.sh b/app/bin/upgrade_from_1.6_to_1.7.sh
new file mode 100755 (executable)
index 0000000..af74807
--- /dev/null
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+python manage.py migrate
+python manage.py migrate contenttypes --fake-initial
+python manage.py migrate --fake-initial
+python manage.py migrate thumbnail --fake-initial
+python manage.py migrate --fake telemeta 0006
diff --git a/app/bin/wait.sh b/app/bin/wait.sh
new file mode 100755 (executable)
index 0000000..f2a93f7
--- /dev/null
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+# apt-get install -y --force-yes netcat
+
+set -e
+
+host=$(env | grep _TCP_ADDR | cut -d = -f 2)
+port=$(env | grep _TCP_PORT | cut -d = -f 2)
+
+echo -n "waiting for TCP connection to $host:$port..."
+
+while ! nc -w 1 $host $port 2>/dev/null
+do
+  echo -n .
+  sleep 1
+done
+
+echo 'ok'
diff --git a/app/bin/worker.sh b/app/bin/worker.sh
new file mode 100755 (executable)
index 0000000..3eb7b40
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+# paths
+app='/srv/app'
+manage=$app'/manage.py'
+wsgi=$app'/wsgi.py'
+concurrency=12
+
+# stating apps
+# pip uninstall -y south
+# pip install -U django==1.8.18 django-registration-redux djangorestframework==3.6.4
+# pip install django-debug-toolbar==1.6
+# pip install -e git+https://github.com/Parisson/django-jqchat.git@dj1.8#egg=django-jqchat
+# pip install -e git+https://github.com/Parisson/saved_searches.git@dj1.8#egg=saved_searches-2.0.0-beta
+
+# waiting for other services
+bash $app/bin/wait.sh
+
+# Starting celery worker with the --autoreload option will enable the worker to watch for file system changes
+# This is an experimental feature intended for use in development only
+# see http://celery.readthedocs.org/en/latest/userguide/workers.html#autoreloading
+python $manage celery worker --autoreload -A worker --concurrency=$concurrency
diff --git a/app/scripts/app.sh b/app/scripts/app.sh
deleted file mode 100755 (executable)
index 6f5271e..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/bin/bash
-
-# paths
-app='/srv/app'
-manage=$app'/manage.py'
-wsgi=$app'/wsgi.py'
-static='/srv/static/'
-media='/srv/media/'
-lib='/srv/lib/'
-log='/var/log/uwsgi/app.log'
-
-# uwsgi params
-port=8000
-processes=8
-threads=8
-uid='www-data'
-gid='www-data'
-
-# stating apps
-# pip uninstall -y south
-# pip install -U django==1.8.18 django-registration-redux djangorestframework==3.6.4
-# pip install django-debug-toolbar==1.6
-# pip install -e git+https://github.com/Parisson/django-jqchat.git@dj1.8#egg=django-jqchat
-# pip install -e git+https://github.com/Parisson/saved_searches.git@dj1.8#egg=saved_searches-2.0.0-beta
-
-# waiting for other network services
-sh $app/scripts/wait.sh
-python $manage wait-for-db
-
-if [ ! -f .init ]; then
-    python $manage migrate --noinput
-    python $manage bower_install -- --allow-root
-    touch .init
-fi
-
-# telemeta setup
-python $manage telemeta-create-admin-user
-python $manage telemeta-create-boilerplate
-python $manage telemeta-setup-enumerations
-
-
-# Delete Timeside database if it exists
-cat /srv/lib/telemeta/scripts/sql/drop_timeside.sql | python $manage dbshell
-
-if [ $REINDEX = "True" ]; then
-    python $manage rebuild_index --noinput
-fi
-
-# choose dev or prod mode
-if [ "$1" = "--runserver" ]; then
-    python $manage runserver 0.0.0.0:8000
-else
-    # static files auto update
-    # watchmedo shell-command --patterns="$patterns" --recursive \
-    #     --command='python '$manage' collectstatic --noinput' $lib &
-    python $manage collectstatic --noinput
-
-    # fix media access rights
-    find $media -maxdepth 1 -path ${media}import -prune -o -type d -not -user www-data -exec chown www-data:www-data {} \;
-
-    # app start
-    uwsgi --socket :$port --wsgi-file $wsgi --chdir $app --master \
-        --processes $processes --threads $threads \
-        --uid $uid --gid $gid --logto $log --touch-reload $wsgi
-fi
diff --git a/app/scripts/diag.sh b/app/scripts/diag.sh
deleted file mode 100755 (executable)
index 44d0552..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-
-app="telemeta"
-dir="../../doc/devel"
-
-python modelviz.py -a > $dir/$app-all.dot
-python modelviz.py $app > $dir/$app.dot
-
-dot $dir/$app-all.dot -Tpdf -o $dir/$app-all.pdf
-dot $dir/$app.dot -Tpdf -o $dir/$app.pdf
-
-rsync -a $dir/ doc.parisson.com:/var/www/files/doc/$app/diagram/
diff --git a/app/scripts/enumeration.sh b/app/scripts/enumeration.sh
deleted file mode 100755 (executable)
index c6a42b0..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
- pwd
-
-   chmod 777 "enumeration/enumeration.txt" && echo "The file is now writable"
diff --git a/app/scripts/init.sh b/app/scripts/init.sh
deleted file mode 100755 (executable)
index 21bd3d5..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-app='/srv/app'
-manage=$app'/manage.py'
-
-python $manage migrate --noinput
-python $manage telemeta-create-admin-user
-python $manage telemeta-create-boilerplate
-python $manage bower_install -- --allow-root
diff --git a/app/scripts/install_plugins.sh b/app/scripts/install_plugins.sh
deleted file mode 100755 (executable)
index b0315e9..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-plugins=/srv/lib/plugins
-
-for dir in $(ls $plugins); do
-    if [ -f $plugins/$dir/setup.py ]; then
-        pip install -e $plugins/$dir/.
-    fi
-done
diff --git a/app/scripts/modelviz.py b/app/scripts/modelviz.py
deleted file mode 100755 (executable)
index 24af062..0000000
+++ /dev/null
@@ -1,359 +0,0 @@
-#!/usr/bin/env python
-
-"""Django model to DOT (Graphviz) converter
-by Antonio Cavedoni <antonio@cavedoni.org>
-
-Make sure your DJANGO_SETTINGS_MODULE is set to your project or
-place this script in the same directory of the project and call
-the script like this:
-
-$ python modelviz.py [-h] [-a] [-d] [-g] [-n] [-L <language>] [-i <model_names>] <app_label> ... <app_label> > <filename>.dot
-$ dot <filename>.dot -Tpng -o <filename>.png
-
-options:
-    -h, --help
-    show this help message and exit.
-
-    -a, --all_applications
-    show models from all applications.
-
-    -d, --disable_fields
-    don't show the class member fields.
-
-    -g, --group_models
-    draw an enclosing box around models from the same app.
-
-    -i, --include_models=User,Person,Car
-    only include selected models in graph.
-
-    -n, --verbose_names
-    use verbose_name for field and models.
-
-    -L, --language
-    specify language used for verrbose_name localization
-
-    -x, --exclude_columns
-    exclude specific column(s) from the graph.
-
-    -X, --exclude_models
-    exclude specific model(s) from the graph.
-    
-    -e, --inheritance
-    show inheritance arrows.
-"""
-__version__ = "0.9"
-__svnid__ = "$Id$"
-__license__ = "Python"
-__author__ = "Antonio Cavedoni <http://cavedoni.com/>"
-__contributors__ = [
-   "Stefano J. Attardi <http://attardi.org/>",
-   "limodou <http://www.donews.net/limodou/>",
-   "Carlo C8E Miron",
-   "Andre Campos <cahenan@gmail.com>",
-   "Justin Findlay <jfindlay@gmail.com>",
-   "Alexander Houben <alexander@houben.ch>",
-   "Bas van Oostveen <v.oostveen@gmail.com>",
-   "Joern Hees <gitdev@joernhees.de>"
-]
-
-import os
-import sys
-import getopt
-
-from django.core.management import setup_environ
-
-try:
-    import settings
-except ImportError:
-    pass
-else:
-    setup_environ(settings)
-
-from django.utils.translation import activate as activate_language
-from django.utils.safestring import mark_safe
-from django.template import Template, Context, loader
-from django.db import models
-from django.db.models import get_models
-from django.db.models.fields.related import \
-    ForeignKey, OneToOneField, ManyToManyField, RelatedField
-
-try:
-    from django.db.models.fields.generic import GenericRelation
-except ImportError:
-    from django.contrib.contenttypes.generic import GenericRelation
-
-def parse_file_or_list(arg):
-    if not arg:
-        return []
-    if not ',' in arg and os.path.isfile(arg):
-        return [e.strip() for e in open(arg).readlines()]
-    return arg.split(',')
-
-
-def generate_dot(app_labels, **kwargs):
-    disable_fields = kwargs.get('disable_fields', False)
-    include_models = parse_file_or_list(kwargs.get('include_models', ""))
-    all_applications = kwargs.get('all_applications', False)
-    use_subgraph = kwargs.get('group_models', False)
-    verbose_names = kwargs.get('verbose_names', False)
-    inheritance = kwargs.get('inheritance', False)
-    language = kwargs.get('language', None)
-    if language is not None:
-        activate_language(language)
-    exclude_columns = parse_file_or_list(kwargs.get('exclude_columns', ""))
-    exclude_models = parse_file_or_list(kwargs.get('exclude_models', ""))
-
-    def skip_field(field):
-        if exclude_columns:
-            if verbose_names and field.verbose_name:
-                if field.verbose_name in exclude_columns:
-                    return True
-            if field.name in exclude_columns:
-                return True
-        return False
-
-
-
-
-    t = loader.get_template('django_extensions/graph_models/head.html')
-    c = Context({})
-    dot = t.render(c)
-
-    apps = []
-    if all_applications:
-        apps = models.get_apps()
-
-    for app_label in app_labels:
-        app = models.get_app(app_label)
-        if not app in apps:
-            apps.append(app)
-
-    graphs = []
-    for app in apps:
-        graph = Context({
-            'name': '"%s"' % app.__name__,
-            'app_name': "%s" % '.'.join(app.__name__.split('.')[:-1]),
-            'cluster_app_name': "cluster_%s" % app.__name__.replace(".", "_"),
-            'disable_fields': disable_fields,
-            'use_subgraph': use_subgraph,
-            'models': []
-        })
-
-        appmodels = get_models(app)
-        abstract_models = []
-        for appmodel in appmodels:
-            abstract_models = abstract_models + [abstract_model for abstract_model in appmodel.__bases__ if hasattr(abstract_model, '_meta') and abstract_model._meta.abstract]
-        abstract_models = list(set(abstract_models)) # remove duplicates
-        appmodels = abstract_models + appmodels
-        
-
-        for appmodel in appmodels:
-            appmodel_abstracts = [abstract_model.__name__ for abstract_model in appmodel.__bases__ if hasattr(abstract_model, '_meta') and abstract_model._meta.abstract]
-
-            # collect all attribs of abstract superclasses
-            def getBasesAbstractFields(c):
-                _abstract_fields = []
-                for e in c.__bases__:
-                    if hasattr(e, '_meta') and e._meta.abstract:
-                        _abstract_fields.extend(e._meta.fields)
-                        _abstract_fields.extend(getBasesAbstractFields(e))
-                return _abstract_fields
-            abstract_fields = getBasesAbstractFields(appmodel)
-
-            model = {
-                'app_name': appmodel.__module__.replace(".", "_"),
-                'name': appmodel.__name__,
-                'abstracts': appmodel_abstracts,
-                'fields': [],
-                'relations': []
-            }
-
-            # consider given model name ?
-            def consider(model_name):
-                if exclude_models and model_name in exclude_models:
-                    return False
-                return not include_models or model_name in include_models
-
-            if not consider(appmodel._meta.object_name):
-                continue
-
-            if verbose_names and appmodel._meta.verbose_name:
-                model['label'] = appmodel._meta.verbose_name
-            else:
-                model['label'] = model['name']
-
-            # model attributes
-            def add_attributes(field):
-                if verbose_names and field.verbose_name:
-                    label = field.verbose_name
-                else:
-                    label = field.name
-
-                t = type(field).__name__
-                if isinstance(field, (OneToOneField, ForeignKey)):
-                    t += " ({0})".format(field.rel.field_name)
-                # TODO: ManyToManyField, GenericRelation
-
-                model['fields'].append({
-                    'name': field.name,
-                    'label': label,
-                    'type': t,
-                    'blank': field.blank,
-                    'abstract': field in abstract_fields,
-                })
-
-            # Find all the real attributes. Relations are depicted as graph edges instead of attributes
-            attributes = [field for field in appmodel._meta.local_fields if not isinstance(field, RelatedField)]
-
-            # find primary key and print it first, ignoring implicit id if other pk exists
-            pk = appmodel._meta.pk
-            if not appmodel._meta.abstract and pk in attributes:
-                add_attributes(pk)
-            for field in attributes:
-                if skip_field(field):
-                    continue
-                if not field.primary_key:
-                    add_attributes(field)
-            
-            # FIXME: actually many_to_many fields aren't saved in this model's db table, so why should we add an attribute-line for them in the resulting graph?
-            #if appmodel._meta.many_to_many:
-            #    for field in appmodel._meta.many_to_many:
-            #        if skip_field(field):
-            #            continue
-            #        add_attributes(field)
-
-            # relations
-            def add_relation(field, extras=""):
-                if verbose_names and field.verbose_name:
-                    label = field.verbose_name
-                else:
-                    label = field.name
-                    
-                # show related field name
-                if hasattr(field, 'related_query_name'):
-                    label += ' (%s)' % field.related_query_name()
-
-                _rel = {
-                    'target_app': field.rel.to.__module__.replace('.', '_'),
-                    'target': field.rel.to.__name__,
-                    'type': type(field).__name__,
-                    'name': field.name,
-                    'label': label,
-                    'arrows': extras,
-                    'needs_node': True
-                }
-                if _rel not in model['relations'] and consider(_rel['target']):
-                    model['relations'].append(_rel)
-
-            for field in appmodel._meta.local_fields:
-                if field.attname.endswith('_ptr_id'): # excluding field redundant with inheritance relation
-                    continue
-                if field in abstract_fields: # excluding fields inherited from abstract classes. they too show as local_fields
-                    continue
-                if skip_field(field):
-                    continue
-                if isinstance(field, OneToOneField):
-                    add_relation(field, '[arrowhead=none, arrowtail=none]')
-                elif isinstance(field, ForeignKey):
-                    add_relation(field, '[arrowhead=none, arrowtail=dot]')
-
-            for field in appmodel._meta.local_many_to_many:
-                if skip_field(field):
-                    continue
-                if isinstance(field, ManyToManyField):
-                    if (getattr(field, 'creates_table', False) or  # django 1.1.
-                        (hasattr(field.rel.through, '_meta') and field.rel.through._meta.auto_created)):  # django 1.2
-                        add_relation(field, '[arrowhead=dot arrowtail=dot, dir=both]')
-                    elif isinstance(field, GenericRelation):
-                        add_relation(field, mark_safe('[style="dotted", arrowhead=normal, arrowtail=normal, dir=both]'))
-            
-            if inheritance:
-                # add inheritance arrows
-                for parent in appmodel.__bases__:
-                    if hasattr(parent, "_meta"): # parent is a model
-                        l = "multi-table"
-                        if parent._meta.abstract:
-                            l = "abstract"
-                        if appmodel._meta.proxy:
-                            l = "proxy"
-                        l += r"\ninheritance"
-                        _rel = {
-                            'target_app': parent.__module__.replace(".", "_"),
-                            'target': parent.__name__,
-                            'type': "inheritance",
-                            'name': "inheritance",
-                            'label': l,
-                            'arrows': '[arrowhead=empty, arrowtail=none]',
-                            'needs_node': True
-                        }
-                        # TODO: seems as if abstract models aren't part of models.getModels, which is why they are printed by this without any attributes.
-                        if _rel not in model['relations'] and consider(_rel['target']):
-                            model['relations'].append(_rel)
-            
-            graph['models'].append(model)
-        graphs.append(graph)
-
-    nodes = []
-    for graph in graphs:
-        nodes.extend([e['name'] for e in graph['models']])
-
-    for graph in graphs:
-        # don't draw duplication nodes because of relations
-        for model in graph['models']:
-            for relation in model['relations']:
-                if relation['target'] in nodes:
-                    relation['needs_node'] = False
-        # render templates
-        t = loader.get_template('django_extensions/graph_models/body.html')
-        dot += '\n' + t.render(graph)
-
-    for graph in graphs:
-        t = loader.get_template('django_extensions/graph_models/rel.html')
-        dot += '\n' + t.render(graph)
-
-
-    t = loader.get_template('django_extensions/graph_models/tail.html')
-    c = Context({})
-    dot += '\n' + t.render(c)
-    return dot
-
-def main():
-    try:
-        opts, args = getopt.getopt(sys.argv[1:], "hadgi:L:x:X:en",
-                    ["help", "all_applications", "disable_fields", "group_models", "include_models=", "inheritance", "verbose_names", "language=", "exclude_columns=", "exclude_models="])
-    except getopt.GetoptError, error:
-        print __doc__
-        sys.exit(error)
-
-    kwargs = {}
-    for opt, arg in opts:
-        if opt in ("-h", "--help"):
-            print __doc__
-            sys.exit()
-        if opt in ("-a", "--all_applications"):
-            kwargs['all_applications'] = True
-        if opt in ("-d", "--disable_fields"):
-            kwargs['disable_fields'] = True
-        if opt in ("-g", "--group_models"):
-            kwargs['group_models'] = True
-        if opt in ("-i", "--include_models"):
-            kwargs['include_models'] = arg
-        if opt in ("-e", "--inheritance"):
-            kwargs['inheritance'] = True
-        if opt in ("-n", "--verbose-names"):
-            kwargs['verbose_names'] = True
-        if opt in ("-L", "--language"):
-            kwargs['language'] = arg
-        if opt in ("-x", "--exclude_columns"):
-            kwargs['exclude_columns'] = arg
-        if opt in ("-X", "--exclude_models"):
-            kwargs['exclude_models'] = arg
-
-    if not args and not kwargs.get('all_applications', False):
-        print __doc__
-        sys.exit()
-
-    print generate_dot(args, **kwargs)
-
-if __name__ == "__main__":
-    main()
diff --git a/app/scripts/notebook.sh b/app/scripts/notebook.sh
deleted file mode 100755 (executable)
index 562f9df..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-export PYTHONPATH=$PYTHONPATH:/opt/miniconda/lib/python2.7/site-packages/:/srv/app/
-export DJANGO_SETTINGS_MODULE=settings
-
-python /srv/app/manage.py shell_plus --notebook
diff --git a/app/scripts/setup_plugins.sh b/app/scripts/setup_plugins.sh
deleted file mode 100755 (executable)
index de55f84..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-plugins=/srv/lib/plugins
-
-apt-get update
-
-for dir in $(ls $plugins); do
-    env=$plugins/$dir/conda-environment.yml
-    if [ -f $env ]; then
-        conda env update --name root --file $env
-    fi
-    req=$plugins/$dir/debian-requirements.txt
-    if [ -f $req ]; then
-        packs=$(egrep -v "^\s*(#|$)" $req)
-        apt-get install -y --force-yes $packs
-    fi
-    if [ -f $plugins/$dir/setup.py ]; then
-        pip install -e $plugins/$dir/.
-    fi
-done
-
-apt-get clean
diff --git a/app/scripts/update_schema.sh b/app/scripts/update_schema.sh
deleted file mode 100755 (executable)
index 1b64fa3..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-
-./manage.py schemamigration telemeta --auto
-./manage.py migrate telemeta
diff --git a/app/scripts/upgrade_from_1.6_to_1.7.sh b/app/scripts/upgrade_from_1.6_to_1.7.sh
deleted file mode 100755 (executable)
index af74807..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-python manage.py migrate
-python manage.py migrate contenttypes --fake-initial
-python manage.py migrate --fake-initial
-python manage.py migrate thumbnail --fake-initial
-python manage.py migrate --fake telemeta 0006
diff --git a/app/scripts/wait.sh b/app/scripts/wait.sh
deleted file mode 100755 (executable)
index f2a93f7..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-# apt-get install -y --force-yes netcat
-
-set -e
-
-host=$(env | grep _TCP_ADDR | cut -d = -f 2)
-port=$(env | grep _TCP_PORT | cut -d = -f 2)
-
-echo -n "waiting for TCP connection to $host:$port..."
-
-while ! nc -w 1 $host $port 2>/dev/null
-do
-  echo -n .
-  sleep 1
-done
-
-echo 'ok'
diff --git a/app/scripts/worker.sh b/app/scripts/worker.sh
deleted file mode 100755 (executable)
index 15ef822..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-# paths
-app='/srv/app'
-manage=$app'/manage.py'
-wsgi=$app'/wsgi.py'
-concurrency=12
-
-# stating apps
-# pip uninstall -y south
-# pip install -U django==1.8.18 django-registration-redux djangorestframework==3.6.4
-# pip install django-debug-toolbar==1.6
-# pip install -e git+https://github.com/Parisson/django-jqchat.git@dj1.8#egg=django-jqchat
-# pip install -e git+https://github.com/Parisson/saved_searches.git@dj1.8#egg=saved_searches-2.0.0-beta
-
-# waiting for other services
-bash $app/scripts/wait.sh
-
-# Starting celery worker with the --autoreload option will enable the worker to watch for file system changes
-# This is an experimental feature intended for use in development only
-# see http://celery.readthedocs.org/en/latest/userguide/workers.html#autoreloading
-python $manage celery worker --autoreload -A worker --concurrency=$concurrency
diff --git a/bin/kdenlive/__init__.py b/bin/kdenlive/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/bin/kdenlive/auto_fade.py b/bin/kdenlive/auto_fade.py
new file mode 100755 (executable)
index 0000000..c716a8c
--- /dev/null
@@ -0,0 +1,11 @@
+#/usr/bin/python
+
+import sys
+from telemeta.util.kdenlive.fade import AutoFade
+
+path = sys.argv[-1]
+fade = AutoFade(path)
+data = fade.run()
+f = open(path, 'w')
+f.write(data)
+f.close()
diff --git a/bin/kdenlive/auto_fade_batch.py b/bin/kdenlive/auto_fade_batch.py
new file mode 100755 (executable)
index 0000000..2704776
--- /dev/null
@@ -0,0 +1,20 @@
+
+import os, sys
+from telemeta.util.kdenlive.fade import AutoFade
+
+if __name__ == '__main__':
+    dir = sys.argv[-2]
+    ext = sys.argv[-1]
+
+    for filename in os.listdir(dir):
+        prefix, extension = os.path.splitext(filename)
+        path = dir + os.sep + filename
+        flag = path + '.faded'
+        if ext in extension and not os.path.exists(flag):
+            os.system('cp ' + path + ' ' + path + '.bak')
+            fade = AutoFade(path)
+            data = fade.run()
+            f = open(path, 'w')
+            f.write(data)
+            f.close()
+            os.system('touch ' + flag)
diff --git a/bin/kdenlive/fade.py b/bin/kdenlive/fade.py
new file mode 100644 (file)
index 0000000..c590194
--- /dev/null
@@ -0,0 +1,200 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2012-2013 Guillaume Pellerin <yomguy@parisson.com>
+
+# This file is part of TimeSide.
+
+# TimeSide is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+
+# TimeSide is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with TimeSide.  If not, see <http://www.gnu.org/licenses/>.
+
+# Author: Guillaume Pellerin <yomguy@parisson.com>
+
+
+from telemeta.util.xmltodict2 import *
+
+
+class AutoFade(object):
+    """ Automatically applies a fade in and a fade out trasitions between each segment of a KdenLive session.
+        Each video clip needs to be splitted into one video track and an audio one ("Split audio"),
+        so that an audio fade in/out is also applied.
+
+        MLT files are also supported.
+    """
+
+    def __init__(self, path, audio_frames_out=2, audio_frames_in=1,
+                       video_frames_out=3, video_frames_in=3):
+        self.audio_frames_in = audio_frames_in
+        self.audio_frames_out = audio_frames_out
+        self.video_frames_in = video_frames_in
+        self.video_frames_out = video_frames_out
+        self.path = path
+        self.session = xmltodict(self.path)
+
+    def audio_fade_out(self, frame_out):
+        child = {'attributes': {u'id': u'fadeout',
+        u'in': unicode(int(frame_out)-self.audio_frames_out),
+        u'out': unicode(frame_out)},
+       'children': [{'attributes': {u'name': u'track'},
+         'cdata': '0',
+         'name': 'property'},
+        {'attributes': {u'name': u'window'},
+         'cdata': '75',
+         'name': 'property'},
+        {'attributes': {u'name': u'max_gain'},
+         'cdata': '20dB',
+         'name': 'property'},
+        {'attributes': {u'name': u'mlt_type'},
+         'cdata': 'filter',
+         'name': 'property'},
+        {'attributes': {u'name': u'mlt_service'},
+         'cdata': 'volume',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_id'},
+         'cdata': 'fadeout',
+         'name': 'property'},
+        {'attributes': {u'name': u'tag'},
+         'cdata': 'volume',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_ix'},
+         'cdata': '1',
+         'name': 'property'},
+        {'attributes': {u'name': u'gain'}, 'cdata': '1', 'name': 'property'},
+        {'attributes': {u'name': u'end'}, 'cdata': '0', 'name': 'property'}],
+       'name': 'filter'}
+
+        return child
+
+    def audio_fade_in(self, frame_in):
+        child = {'attributes': {u'id': u'fadein',
+        u'in': unicode(frame_in),
+        u'out': unicode(int(frame_in)+self.audio_frames_in)},
+       'children': [{'attributes': {u'name': u'track'},
+         'cdata': '0',
+         'name': 'property'},
+        {'attributes': {u'name': u'window'},
+         'cdata': '75',
+         'name': 'property'},
+        {'attributes': {u'name': u'max_gain'},
+         'cdata': '20dB',
+         'name': 'property'},
+        {'attributes': {u'name': u'mlt_type'},
+         'cdata': 'filter',
+         'name': 'property'},
+        {'attributes': {u'name': u'mlt_service'},
+         'cdata': 'volume',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_id'},
+         'cdata': 'fadein',
+         'name': 'property'},
+        {'attributes': {u'name': u'tag'},
+         'cdata': 'volume',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_ix'},
+         'cdata': '1',
+         'name': 'property'},
+        {'attributes': {u'name': u'gain'}, 'cdata': '0', 'name': 'property'},
+        {'attributes': {u'name': u'end'}, 'cdata': '1', 'name': 'property'}],
+       'name': 'filter'}
+
+        return child
+
+
+    def video_fade_out(self, frame_out):
+        child = {'attributes': {u'id': u'fade_to_black',
+        u'in': unicode(int(frame_out)-self.video_frames_out),
+        u'out': unicode(frame_out)},
+       'children': [{'attributes': {u'name': u'track'},
+         'cdata': '0',
+         'name': 'property'},
+        {'attributes': {u'name': u'start'}, 'cdata': '1', 'name': 'property'},
+        {'attributes': {u'name': u'mlt_type'},
+         'cdata': 'filter',
+         'name': 'property'},
+        {'attributes': {u'name': u'mlt_service'},
+         'cdata': 'brightness',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_id'},
+         'cdata': 'fade_to_black',
+         'name': 'property'},
+        {'attributes': {u'name': u'tag'},
+         'cdata': 'brightness',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_ix'},
+         'cdata': '1',
+         'name': 'property'},
+        {'attributes': {u'name': u'end'}, 'cdata': '0', 'name': 'property'}],
+       'name': 'filter'}
+
+        return child
+
+
+    def video_fade_in(self, frame_in):
+        child = {'attributes': {u'id': u'fade_from_black',
+        u'in': unicode(frame_in),
+        u'out': unicode(int(frame_in)+self.video_frames_in)},
+       'children': [{'attributes': {u'name': u'track'},
+         'cdata': '0',
+         'name': 'property'},
+        {'attributes': {u'name': u'start'}, 'cdata': '0', 'name': 'property'},
+        {'attributes': {u'name': u'mlt_type'},
+         'cdata': 'filter',
+         'name': 'property'},
+        {'attributes': {u'name': u'mlt_service'},
+         'cdata': 'brightness',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_id'},
+         'cdata': 'fade_from_black',
+         'name': 'property'},
+        {'attributes': {u'name': u'tag'},
+         'cdata': 'brightness',
+         'name': 'property'},
+        {'attributes': {u'name': u'kdenlive_ix'},
+         'cdata': '1',
+         'name': 'property'},
+        {'attributes': {u'name': u'end'}, 'cdata': '1', 'name': 'property'}],
+       'name': 'filter'}
+
+        return child
+
+    def run(self):
+        audio_count = 0
+        video_count = 0
+        
+        for attr in self.session['children']:
+            if 'playlist' in attr['name'] and 'children' in attr:
+                for att in attr['children']:
+                    if 'producer' in att['attributes'] and not 'children' in att:                        
+                        producer = att['attributes']['producer']
+                        if producer != 'black':
+                        
+                            frame_in = att['attributes']['in']
+                            frame_out = att['attributes']['out']
+
+                            if 'audio' in producer:
+                                if not audio_count % 2:
+                                    att['children'] = [self.audio_fade_out(frame_out)]
+                                else:
+                                    att['children'] = [self.audio_fade_in(frame_in)]
+                                audio_count += 1
+
+
+                            if 'video' in producer:
+                                if not video_count % 2:
+                                    att['children'] = [self.video_fade_out(frame_out)]
+                                else:
+                                    att['children'] = [self.video_fade_in(frame_in)]
+                                video_count += 1
+
+        return dicttoxml(self.session).encode('utf-8')
+
+
diff --git a/bin/kdenlive/mlt_fix_threads.sh b/bin/kdenlive/mlt_fix_threads.sh
new file mode 100755 (executable)
index 0000000..60b0061
--- /dev/null
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+#threads=$1
+dir=$1
+
+for file in `ls $dir/*.sh`; do
+ perl -pi -e 's/threads=6/threads=4/g' $file
+ perl -pi -e 's/threads=2/threads=4/g' $file
+ perl -pi -e 's/threads=1/threads=4/g' $file
+done
diff --git a/bin/kdenlive/mlt_process_batch.py b/bin/kdenlive/mlt_process_batch.py
new file mode 100755 (executable)
index 0000000..7d346c6
--- /dev/null
@@ -0,0 +1,39 @@
+#!/usr/bin/python
+
+import os, sys
+
+if __name__ == '__main__':
+    root_dir = sys.argv[-1]
+
+    fading = False
+    if '--fading' in sys.argv:
+        fading = True
+
+    for root, dirs, files in os.walk(root_dir):
+        for filename in files:
+            prefix, extension = os.path.splitext(filename)
+            path = root + os.sep + filename
+
+            flag = path + '.processed'
+            if 'sh' in extension and not os.path.exists(flag):
+                if fading:
+                    from telemeta.util.kdenlive.fade import AutoFade
+                    local_files = os.listdir(root)
+                    for local_file in local_files:
+                        local_name, local_ext = os.path.splitext(local_file)
+                        if 'mlt' in local_ext:
+                            local_path = root + os.sep + local_file
+                            local_flag = local_path + '.faded'
+                            if not os.path.exists(local_flag):
+                                print 'fading :        ' + local_path 
+                                os.system('cp ' + local_path + ' ' + local_path + '.bak')
+                                fade = AutoFade(local_path)
+                                data = fade.run()
+                                f = open(local_path, 'w')
+                                f.write(data)
+                                f.close()
+                                os.system('touch ' + local_flag)
+
+                print 'processing :    ' + path
+                os.system('nice -n 19 ' + path)
+                os.system('touch ' + flag)
diff --git a/bin/old/crem_checker.py b/bin/old/crem_checker.py
new file mode 100755 (executable)
index 0000000..9b5088a
--- /dev/null
@@ -0,0 +1,340 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+Vérifier que les nouvelles cotes d'item :
+
+- correspondent toutes Ã  la collection décrite par le fichier .csv
+  (le fichier .csv est nommé d'après la nouvelle cote de collection)
+
+- sont uniques dans le fichiers .csv
+
+- ont un des formats suivant :
+    - soit CNRSMH_I_aaaa_nnn_mmm
+    - soit CNRSMH_I_aaaa_nnn_mmm_tt
+    - soit CNRSMH_I_aaaa_nnn_mmm_tt_pp
+    - soit CNRSMH_E_aaaa_nnn_mmm_tt
+    - soit CNRSMH_E_aaaa_nnn_mmm_tt_pp
+
+- correspondent Ã  fichier .wav (et qu'il n'y a pas de fichiers .wav
+  supplémentaire)
+
+Vérifier que le répertoire est nommé d'apprès la nouvelle cote de collection
+
+Vérifier que la nouvelle cote de collection a l'un des formats suivant :
+    - soit CNRSMH_I_aaaa_nnn
+    - soit CNRSMH_E_aaaa_nnn_mmm
+
+Vérifier que les fichiers .wav sont lisibles, ont une durée et sont identifés
+comme WAV par audiolab.
+"""
+
+
+import os
+import re
+import sys
+import csv
+import xlrd
+import datetime
+import logging
+import shutil
+
+COLLECTION_OLD_PATTERN = [
+        { 'format': 'BM.aaa.nnn.mmm',           'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})$'},
+        { 'format': 'BM.aaaa.nnn.mmm/pp',       'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
+        { 'format': 'BM.aaaa.nnn.mmm',          'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})$'},
+        { 'format': 'BM.aaaa.nnn.mmm/',         'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/$'},
+        { 'format': 'BM.aaaa.nnn.mmm/ppp',      'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/[0-9]{3}$'},
+        { 'format': 'BM.aaaa.nnn.mm/pp',        'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{2})/[0-9]{2}$'},
+        { 'format': 'BM.aaaa.nnn',              'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})$'},
+        { 'format': 'BM.aaa.nnn.mmm/pp',        'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
+        { 'format': 'BM.aaa.nnn FANTOME',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3}) FANTOME$'},
+        { 'format': 'BM.aaa.nnn',               'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})$'},
+        { 'format': 'BM.aaa.nnnBISoo/pp',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})BIS([0-9]{2})/[0-9]{2}$'},
+        { 'format': 'BM.aaa.nnn.mmm.ppp',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})\.[0-9]{3}$'},
+        { 'format': 'BM.aaa.nnn.mmm/ppp',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{3}$'},
+        { 'format': 'BM.aaa.nnn/pp',            'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
+        { 'format': 'BM.aaa.nnn-BIS.ooo/pp',    'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})-BIS\.([0-9]{3})/[0-9]{2}$'},
+        { 'format': 'BM.aaaa.nnn.mmm/NN',       'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/NN$'},
+        { 'format': 'BM.aaa.nnn.mmm/pp-DEPOT',  'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}-DEPOT$'},
+        { 'format': 'BM.aaa.nnn.mmm-o>p',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})-[0-9]>[0-9]$'},
+        { 'format': 'CY.aaaa.nnn',              'regex': r'^(CY)\.([0-9]{4})\.([0-9]{3})$'},
+        { 'format': 'DI.aaaa.nnn.mmm',          'regex': r'^(DI)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})$'},
+        { 'format': 'DI.aaaa.nnn.mmm/pp',       'regex': r'^(DI)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
+        { 'format': 'DI.aaa.nnn.mmm',           'regex': r'^(DI)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})$'},
+        { 'format': 'DI.aaa.nnn.mmm/pp',        'regex': r'^(DI)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
+        { 'format': 'DI.aaa.nnn.mmm-o/p',       'regex': r'^(DI)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})-[0-9]/[0-9]$'},
+        { 'format': 'FANTOME 2*',               'regex': r'FANTOME 2\*$'},
+
+        ## yomguy
+        { 'format': 'BM.aaaa.nnn.mm',       'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})$'},
+        #{ 'format': 'BM.aaaa.nnn.mmm/pp:ii-jj', 'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/([0-9]{2})\:([0-9]{2})\-([0-9]{2})$'},
+        #{ 'format': 'BM.aaaa.nnn.mmm/ppp:ii-jj', 'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/([0-9]{2})\:([0-9]{2})\-([0-9]{2})$'},
+        #{ 'format': 'BM.aaaa.nnn.mmm:ii-jj',    'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3}):([0-9]{2})\-([0-9]{2})$'},
+        ]
+
+ITEM_NEW_PATTERN = [
+        { 'format': 'CNRSMH_I_aaaa_nnn_mmm',           'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{3})$'},
+        { 'format': 'CNRSMH_I_aaaa_nnn_mmm_tt',        'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2})$'},
+        { 'format': 'CNRSMH_I_aaaa_nnn_mmm_tt_pp',     'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2})_([0-9]{2})$'},
+        { 'format': 'CNRSMH_E_aaaa_nnn_mmm_tt',        'regex': r'^(CNRSMH)_E_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2})$'},
+        { 'format': 'CNRSMH_E_aaaa_nnn_mmm_tt_pp',     'regex': r'^(CNRSMH)_E_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2,3})_([0-9]{2})$'},
+
+        # yomguy
+        { 'format': 'CNRSMH_I_aaaa_nnn_mm',           'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{2})$'},
+        ]
+
+COLLECTION_PATTERN = [
+        { 'format': 'CNRSMH_I_aaaa_nnn',           'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})$'},
+        { 'format': 'CNRSMH_E_aaaa_nnn_mmm',        'regex': r'^(CNRSMH)_E_([0-9]{4})_([0-9]{3})_([0-9]{3})$'},
+        ]
+
+
+def check_name(patterns, name):
+    match = False
+    for pattern in patterns:
+        match = re.match(pattern['regex'], name)
+        if match:
+            break
+    return match
+
+
+class Logger:
+
+    def __init__(self, file):
+        self.logger = logging.getLogger('myapp')
+        self.hdlr = logging.FileHandler(file)
+        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+        self.hdlr.setFormatter(self.formatter)
+        self.logger.addHandler(self.hdlr)
+        self.logger.setLevel(logging.INFO)
+
+    def write_info(self, prefix, message):
+        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
+
+    def write_error(self, prefix, message):
+        self.logger.error(prefix + ' : ' + message.decode('utf8'))
+
+
+class CremCollection:
+
+    def __init__(self, dir, logger):
+        self.dir = dir
+        self.dir_name = self.dir.split(os.sep)[-1]
+        self.file_list = os.listdir(self.dir)
+        self.logger = logger
+
+    def xls_list(self):
+        file_list = []
+        for file in self.file_list:
+            filename = os.path.basename(file)
+            ext = os.path.splitext(file)[1]
+            if not '.' == filename[0] and (ext == '.xls' or ext == '.XLS'):
+                file_list.append(file)
+        print file_list
+        return file_list
+
+    def wav_list(self):
+        list = []
+        for file in self.file_list:
+            filename = os.path.basename(file)
+            ext = os.path.splitext(file)[1]
+            if not '.' == filename[0] and (ext == '.wav' or ext == '.WAV'):
+                list.append(file)
+            elif '.' == filename[0]:
+                self.logger.write_error(file, 'Warning : fichier caché présent !')
+        return list
+
+
+class CremCSV:
+
+    def __init__(self, file):
+        self.csv_file = open(file, 'w')
+        self.csv = csv.writer(self.csv_file,  delimiter=';')
+
+    def close(self):
+        self.csv_file.close()
+
+class CremXLS:
+
+    def __init__(self, file):
+        self.first_row = 8
+        self.original_col = 0
+        self.new_col = 1
+        self.book = xlrd.open_workbook(file)
+        self.sheet = self.book.sheet_by_index(0)
+        self.original_refs = self.original_refs()
+        self.new_refs = self.new_refs()
+        #print len(self.new_refs)
+        while True:
+            if len(self.original_refs) == 0 or len(self.new_refs) == 0:
+                break
+            else:
+                if not 'CNRS' in self.new_refs[0].encode('utf8') \
+                 and not  self.original_refs[0].encode('utf8') == '':
+                    self.original_refs = self.original_refs[1:]
+                    self.new_refs = self.new_refs[1:]
+                else:
+                    break
+
+        self.size = max(len(self.new_refs), len(self.original_refs))
+
+    def original_refs(self):
+        col = self.sheet.col(self.original_col)
+        list = []
+        for cell in col[self.first_row:]:
+            if cell.ctype == 1:
+                list.append(cell.value)
+        return list
+
+    def new_refs(self):
+        col = self.sheet.col(self.new_col)
+        list = []
+        for cell in col[self.first_row:]:
+            if cell.ctype == 1:
+                list.append(cell.value)
+        return list
+
+
+class CremItemFile:
+
+    def __init__(self):
+        self.media = ''
+
+    def set_media(self, media):
+        self.media = media
+
+    def properties(self):
+        self.frames = self.audio_file.get_nframes()
+        self.samplerate = self.audio_file.get_samplerate()
+        self.channels = self.audio_file.get_channels()
+        self.format = self.audio_file.get_file_format()
+        self.encoding = self.audio_file.get_encoding()
+
+
+class CremCheck:
+
+    def __init__(self, root_dir, log_file):
+        self.root_dir = root_dir
+        self.logger = Logger(log_file)
+        dir_list = os.listdir(self.root_dir)
+        list = []
+        for dir in dir_list:
+           if not dir[0] == '.':
+               list.append(dir)
+        self.dir_list = list
+
+    def check_new_refs(self):
+        for name in self.new_refs:
+            return check_name(ITEM_PATTERN, name)
+
+    def check(self):
+        for dir in self.dir_list:
+            collection = CremCollection(self.root_dir + dir, self.logger)
+            msg = '************************ ' + collection.dir_name + ' ******************************'
+            self.logger.write_info(collection.dir, msg[:70])
+
+            xls_list = collection.xls_list()
+            wav_list = collection.wav_list()
+
+            if not check_name(COLLECTION_PATTERN, dir):
+                self.logger.write_error(collection.dir, 'Le dossier de la collection est mal nommé -> SORTIE')
+            elif len(xls_list) == 0:
+                self.logger.write_error(collection.dir, 'PAS de fichier XLS dans le dossier collection -> SORTIE')
+            elif len(xls_list) > 1:
+                self.logger.write_error(collection.dir, 'Plusieurs fichiers XLS dans le dossier collection -> SORTIE')
+
+            else:
+                xls = CremXLS(self.root_dir + os.sep + dir + os.sep + xls_list[0])
+                self.logger.write_info(collection.dir, 'XLS : ' + xls_list[0] + ' - Feuille : ' + xls.sheet.name.encode('utf8'))
+                self.logger.write_info(collection.dir, 'Nombre d\'items détectés : ' + str(xls.size))
+                csv_file = CremCSV(self.root_dir + dir + os.sep + collection.dir_name + '.csv')
+
+                if len(wav_list) != xls.size:
+                    self.logger.write_error(collection.dir, \
+                    'Le nombre de références du fichier XLS (' + str(xls.size) + ') diffère du nombre de fichiers (' + str(len(wav_list)) + ')')
+
+                temp_list = []
+                item_file = CremItemFile()
+
+                for i in range(0,xls.size):
+                    error = False
+
+                    try:
+                        item_old = xls.original_refs[i]
+                        #self.logger.write_error(collection.dir, item_old)
+                    except:
+                        item_old = ''
+                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : l\'ancienne référence d\'item est inexistante'
+                        self.logger.write_error(collection.dir, msg)
+                        error = True
+                        continue
+
+                    try:
+                        item = xls.new_refs[i]
+                        #self.logger.write_error(collection.dir, item)
+                    except:
+                        item = ''
+                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : la nouvelle référence d\'item est inexistante'
+                        self.logger.write_error(collection.dir, msg)
+                        error = True
+                        continue
+
+                    if not item in temp_list:
+                        temp_list.append(item)
+                    else:
+                        msg =  'Ligne ' + str(i+xls.first_row+1) + ' : la référence d\'item ' + item.encode('utf8') + ' est multiple'
+                        self.logger.write_error(collection.dir, msg)
+                        error = True
+
+                    #if not check_name(ITEM_OLD_PATTERN, item_old):
+                        #msg = 'Ligne ' + str(i+xls.first_row+1) + ' : l\'ancienne référence d\'item ' + item_old.encode('utf8') + ' est mal formatée'
+                        #self.logger.write_error(collection.dir, msg)
+
+                    if not check_name(ITEM_NEW_PATTERN, item):
+                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : la nouvelle référence d\'item ' + item.encode('utf8') + ' est mal formatée'
+                        self.logger.write_error(collection.dir, msg)
+                        error = True
+
+                    if not collection.dir_name in item:
+                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : la référence d\'item ' + item.encode('utf8') + ' ne correspond pas Ã  celle de la collection'
+                        self.logger.write_error(collection.dir, msg)
+                        error = True
+
+                    name_wav = item.encode('utf8') + '.wav'
+                    if not name_wav in wav_list:
+                        self.logger.write_error(collection.dir, 'Le fichier ' + item.encode('utf8') + '.wav n\'existe pas')
+                    else:
+                        item_file.set_media(collection.dir + os.sep + name_wav)
+                        #if not item_file.is_wav():
+                        #    self.logger.write_error(collection.dir, 'Le fichier ' + item.encode('utf8') + '.wav n\'est pas valide')
+                        #    error = True
+
+                    if not error:
+                        csv_file.csv.writerow([xls.original_refs[i], xls.new_refs[i]])
+
+                csv_file.close()
+
+                for filename in wav_list:
+                    if not check_name(ITEM_NEW_PATTERN, os.path.splitext(filename)[0]):
+                        self.logger.write_error(collection.dir, 'Le nom du fichier ' + str(os.path.splitext(filename)[0]) + ' est mal formaté')
+
+            msg = '********************************************************************************'
+            self.logger.write_info(collection.dir, msg[:70])
+
+
+def main():
+    log_file = sys.argv[-1]
+    root_dir = sys.argv[-2]
+    log_tmp = log_file+'.tmp'
+
+    c = CremCheck(root_dir, log_tmp)
+    c.check()
+
+    date = datetime.datetime.now().strftime("%x-%X").replace('/','_')
+    shutil.copy(log_tmp,log_file+'-'+date+'.log')
+    shutil.move(log_tmp,log_file)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/bin/old/process-waveform-cgi.py b/bin/old/process-waveform-cgi.py
new file mode 100755 (executable)
index 0000000..317878b
--- /dev/null
@@ -0,0 +1,222 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2009-2010 Guillaume Pellerin <yomguy@parisson.com>
+
+# This file is part of TimeSide.
+
+# TimeSide is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+
+# TimeSide is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with TimeSide.  If not, see <http://www.gnu.org/licenses/>.
+
+# Author: Guillaume Pellerin <yomguy@parisson.com>
+
+# for python2.5
+
+version = '0.5'
+
+
+import os
+import sys
+import time
+import shutil
+import datetime
+import timeside
+
+# soon with python2.6
+#from multiprocessing import Process
+
+from django.core.management import setup_environ
+from django.core.files.base import ContentFile
+import cgi
+fs = cgi.FieldStorage()
+
+
+orig_media_dir = '/mnt/awdiomusic/musicbase'
+project_dir = '/mnt/awdio'
+log_file = project_dir + '/logs/process.log'
+sys.path.append('/home/awdio/apps/telemeta-awdio')
+
+
+class GrapherScheme:
+
+    def __init__(self):
+        self.color = 255
+        self.color_scheme = {
+            'waveform': [ # Four (R,G,B) tuples for three main color channels for the spectral centroid method
+                        (self.color,self.color,self.color)
+#                        (0, 0, 0), (0, 0, 0), (0, 0, 0), (0,0,0)
+                        ],
+            'spectrogram': [
+                        (0, 0, 0), (58/4,68/4,65/4), (80/2,100/2,153/2), (90,180,100), (224,224,44), (255,60,30), (255,255,255)
+                        ]}
+
+        # Grapher id
+        self.id = 'waveform_awdio'
+
+        # Width of the image
+        self.width = 1800
+
+        # Height of the image
+        self.height = 233
+
+        # Background color
+        self.bg_color = None
+
+        # Force computation. By default, the class doesn't overwrite existing image files.
+        self.force = False
+        
+        # Nb of threads
+        # FIXME: memory leak for > 1 !
+        self.threads = 1
+
+      
+class TelemetaPreprocessImport(object):
+
+    def __init__(self, root_dir, dest_dir, log_file):
+       from telemeta.cache import TelemetaCache as Cache
+       from telemeta.util.logger import Logger
+       self.media_item_dir = 'items'
+        self.root_dir = root_dir + 'items'
+        self.dest_dir = dest_dir
+        self.threads = 1
+        self.logger = Logger(log_file)
+        self.counter = 0
+        self.force = 0
+        self.cache = Cache(self.dest_dir)
+
+        self.scheme = GrapherScheme()
+        self.width = self.scheme.width
+        self.height = self.scheme.height
+        self.bg_color = self.scheme.bg_color
+        self.color_scheme = self.scheme.color_scheme
+        self.force = self.scheme.force
+        self.threads = self.scheme.threads
+        self.logger = Logger(log_file)
+        self.counter = 0
+        self.collection_name = 'awdio'
+        self.collection = self.set_collection(self.collection_name)
+        
+        self.analyzers = timeside.core.processors(timeside.api.IAnalyzer)
+        self.grapher = timeside.grapher.WaveformAwdio(width=self.width, 
+                                                         height=self.height, 
+                                                         bg_color=self.bg_color, 
+                                                         color_scheme=self.color_scheme)
+        
+
+    def set_collection(self, collection_name):
+        import telemeta.models
+        collections = telemeta.models.media.MediaCollection.objects.filter(code=collection_name)
+        if not collections:
+            c = telemeta.models.media.MediaCollection(code=collection_name)
+            c.title = collection_name
+            c.save()
+            msg = 'added'
+            self.logger.logger.info(collection_name, msg)
+            collection = c
+        else:
+            collection = collections[0]
+        return collection
+
+    def process(self):
+       import telemeta.models
+       keys = fs.keys()
+       if keys[0] == 'file':
+           filename = fs['file'].value
+           media_orig = orig_media_dir + os.sep + filename
+           media = self.root_dir + os.sep + filename
+           
+           if not os.path.exists(media):
+               shutil.copy(media_orig, media)
+               os.system('chmod 644 ' + media)
+            
+            name, ext = os.path.splitext(filename)
+            size = str(self.width) + '_' + str(self.height)
+            image_name = name + '.' + self.scheme.id + '.' + size + '.png'
+            image = self.dest_dir + os.sep + image_name
+            xml = name + '.xml'
+            
+            if not self.cache.exists(image_name) or not self.cache.exists(xml):
+                mess = 'Processing ' + media
+                self.logger.logger.info(mess)
+           
+               print "Content-type: text/plain\n"
+               print mess
+               decoder  = timeside.decoder.FileDecoder(media)
+               pipe = decoder | self.grapher
+               analyzers = []
+               analyzers_sub = []
+               for analyzer in self.analyzers:
+                   subpipe = analyzer()
+                   analyzers_sub.append(subpipe)
+                   pipe = pipe | subpipe
+               pipe.run()
+               
+               mess = 'Rendering ' + image
+               self.logger.logger.info(mess)
+               self.grapher.render(output=image)
+               
+               mess = 'Frames / Pixel = ' + str(self.grapher.graph.samples_per_pixel)
+               self.logger.logger.info(mess)
+               
+               for analyzer in analyzers_sub:
+                   value = analyzer.result()
+                   if analyzer.id() == 'duration':
+                       value = datetime.timedelta(0,value)
+                   analyzers.append({'name':analyzer.name(),
+                           'id':analyzer.id(),
+                           'unit':analyzer.unit(),
+                           'value':str(value)})
+               
+               self.cache.write_analyzer_xml(analyzers, xml)
+               
+               item = telemeta.models.media.MediaItem.objects.filter(code=name)
+                           
+               if not item:
+                   item = telemeta.models.media.MediaItem(collection=self.collection, code=name)
+                   item.title = name
+                   item.file = self.media_item_dir + os.sep + filename
+                   item.save()
+                   msg = 'added item : ' + filename
+                   self.logger.logger.info(self.collection_name, msg)
+
+               pipe = 0
+               decoder = 0
+               
+               print "OK"
+               
+               #except:
+                   #pipe = 0
+                   #decoder = 0
+                   #mess = 'Could NOT process : ' + media
+                   #self.logger.logger.error(mess)
+                   #print mess
+                   
+           else:
+               mess = "Nothing to do with file : " + media
+               self.logger.logger.info(mess)
+               print "Content-type: text/plain\n"
+               print mess
+       
+       else:
+           print "Content-type: text/plain\n"
+           print "No file given !"
+       
+
+if __name__ == '__main__':
+    sys.path.append(project_dir)
+    import settings
+    setup_environ(settings)
+    media_dir = settings.MEDIA_ROOT
+    data_dir = settings.TELEMETA_DATA_CACHE_DIR
+    t = TelemetaPreprocessImport(media_dir, data_dir, log_file)
+    t.process()
diff --git a/bin/old/telemeta-backup.py b/bin/old/telemeta-backup.py
new file mode 100755 (executable)
index 0000000..d93a83f
--- /dev/null
@@ -0,0 +1,80 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2007 Samalyse SARL
+
+# This file is part of Telemeta.
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+# Author: Olivier Guilyardi <olivier@samalyse.com>
+
+import os
+import sys
+import time
+from django.core.management import setup_environ
+
+def print_usage(toolname):
+    print "Usage: " + toolname + " <project_dir> <backup_dir>"
+    print "  project_dir: the directory of the Django project which hosts Telemeta"
+    print "  backup_dir: the destination backup folder (must exist)"
+
+def write_readme(dest_dir, coll_num):
+    readme = open(dest_dir + "/" + "README", "w")
+    timestr = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
+    readme.write("Telemeta Backup\n\n")
+    readme.write("- date: " + timestr + "\n")
+    readme.write("- number of collections: " + str(coll_num) + "\n\n")
+    readme.close()
+
+def backup(dest_dir):
+    from telemeta.models import MediaCollection
+    from telemeta.backup import CollectionSerializer
+
+    collections = MediaCollection.objects.order_by('id')
+    count = collections.count()
+
+    print "Writing README file..",
+    write_readme(dest_dir, count)
+    print "Done."
+
+    i = 0
+    for collection in collections:
+        if i % 100 == 0:
+            set_dir = dest_dir + ("/collections-%d-%d" % (i+1, i+100))
+            os.mkdir(set_dir)
+        i += 1
+        print "Processing collection %d/%d (%d%%) with id: %s.. " \
+            % (i, count, i*100/count, collection.id),
+        sys.stdout.flush()
+        serializer = CollectionSerializer(collection)
+        serializer.backup(set_dir)
+        print "Done"
+
+def run():
+    if len(sys.argv) != 3:
+        print_usage(os.path.basename(sys.argv[0]))
+        sys.exit(1)
+    else:
+        project_dir = sys.argv[1]
+        backup_dir = sys.argv[2]
+        sys.path.append(project_dir)
+        import settings
+        setup_environ(settings)
+        backup(backup_dir)
+
+if __name__ == '__main__':
+    run()
diff --git a/bin/old/telemeta-crem-import-alt_ids.py b/bin/old/telemeta-crem-import-alt_ids.py
new file mode 100755 (executable)
index 0000000..84c673d
--- /dev/null
@@ -0,0 +1,97 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2011 Guillaume Pellerin
+# All rights reserved.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
+#
+# Author: Guillaume Pellerin <yomguy@parisson.com>
+#
+
+import os
+import sys
+import xlrd
+import logging
+import datetime
+from django.core.management import setup_environ
+from django.core.files.base import ContentFile
+
+class Logger:
+
+    def __init__(self, file):
+        self.logger = logging.getLogger('myapp')
+        self.hdlr = logging.FileHandler(file)
+        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+        self.hdlr.setFormatter(self.formatter)
+        self.logger.addHandler(self.hdlr)
+        self.logger.setLevel(logging.INFO)
+
+    def write_info(self, prefix, message):
+        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
+
+    def write_error(self, prefix, message):
+        self.logger.error(prefix + ' : ' + message.decode('utf8'))
+
+
+class TelemetaAltIdsImport:
+
+    def __init__(self, xls_file, log_file):
+        self.logger = Logger(log_file)
+        self.xls = xls_file
+        self.row = 0
+
+    def alt_ids_import(self):
+        from telemeta.models import MediaCollection
+        self.book = xlrd.open_workbook(self.xls)
+        self.sheet = self.book.sheet_by_index(0)
+        self.length = len(self.sheet.col(0))-1
+        
+        while True:
+            ids = []
+            self.row += 1
+            row = self.sheet.row(self.row)
+            if self.row == self.length:
+                break
+            collection_id = row[0].value
+            cell_alt_id = row[1]
+            if cell_alt_id.ctype == 1:
+                for i in range(1,len(row)):
+                    cell_alt_id = row[i]
+                    if cell_alt_id.ctype == 1:
+                        ids.append(cell_alt_id.value)
+                alt_ids = ' '.join(ids)
+                try:
+                    collection = MediaCollection.objects.get(old_code=collection_id)
+                    collection.alt_ids = alt_ids
+                    collection.save()
+                    print self.row, collection_id, alt_ids
+                except:
+                    msg = 'No collection found for this id'
+                    self.logger.write_error(collection_id, msg)
+                    continue
+            
+                
+def print_usage(tool_name):
+    print "Usage: "+tool_name+" <project_dir> <xls_file> <log_file>"
+    print "  project_dir: the directory of the Django project which hosts Telemeta"
+    print "  xls_file: the excel file containing all collection alt_ids"
+
+def run():
+    if len(sys.argv) < 3:
+        print_usage(os.path.basename(sys.argv[0]))
+        sys.exit(1)
+    else:
+        project_dir = sys.argv[-3]
+        xls_file = sys.argv[-2]
+        log_file = sys.argv[-1]
+        sys.path.append(project_dir)
+        import settings
+        setup_environ(settings)
+        t = TelemetaAltIdsImport(xls_file, log_file)
+        t.alt_ids_import()
+
+if __name__ == '__main__':
+    run()
diff --git a/bin/old/telemeta-crem-import-test.py b/bin/old/telemeta-crem-import-test.py
new file mode 100755 (executable)
index 0000000..021e9a2
--- /dev/null
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2010 Guillaume Pellerin
+# All rights reserved.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
+#
+# Author: Guillaume Pellerin <yomguy@parisson.com>
+#
+
+import os
+import sys
+import csv
+import logging
+import datetime
+from django.core.management import setup_environ
+from django.core.files.base import ContentFile
+
+
+class Logger:
+
+    def __init__(self, file):
+        self.logger = logging.getLogger('myapp')
+        self.hdlr = logging.FileHandler(file)
+        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+        self.hdlr.setFormatter(self.formatter)
+        self.logger.addHandler(self.hdlr)
+        self.logger.setLevel(logging.INFO)
+
+    def info(self, prefix, message):
+        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
+
+    def error(self, prefix, message):
+        self.logger.error(prefix + ' : ' + message.decode('utf8'))
+
+
+class TelemetaWavImport:
+
+    def __init__(self, source_dir, log_file, pattern, domain):
+        from django.contrib.auth.models import User
+        self.logger = Logger(log_file)
+        self.source_dir = source_dir
+        self.collections = os.listdir(self.source_dir)
+        self.pattern = pattern
+        self.user = User.objects.filter(username='admin')[0]
+        self.domain = domain
+
+    def write_file(self, item, wav_file, overwrite=False):
+        filename = wav_file.split(os.sep)[-1]
+        if os.path.exists(wav_file):
+            if not item.file or overwrite:
+#                f = open(wav_file, 'r')
+#                file_content = ContentFile(f.read())
+#                item.file.save(filename, file_content)
+#                f.close()
+                item.save()
+                item.set_revision(self.user)
+            else:
+                msg = item.code + ' : fichier ' + item.file.name + ' deja inscrit dans la base de donnees !'
+                self.logger.error('item', msg)
+        else:
+            msg = item.code + ' : fichier audio ' + filename + ' inexistant dans le dossier !'
+            self.logger.error('item', msg)
+            
+    def wav_import(self):
+        from telemeta.models import MediaItem,  MediaCollection
+        
+        collections = []
+        for collection in self.collections:
+            collection_dir = self.source_dir + os.sep + collection
+            collection_files = os.listdir(collection_dir)
+            
+            
+            if not '/.' in collection_dir and self.pattern in collection_dir:
+                collection_name = collection.split(os.sep)[-1]
+                collections.append(collection_name)
+                c = MediaCollection.objects.filter(code=collection_name)
+                
+                if not c and collection + '.csv' in collection_files:
+                    msg = collection + ' collection NON présente dans la base de données, SORTIE '
+                    self.logger.error(collection, msg)
+                    sys.exit(msg)
+                elif not c:
+                    msg = 'collection NON présente dans la base de données, CREATION '
+                    self.logger.info(collection, msg)
+                    c = MediaCollection(code=collection_name)
+                    c.save()
+                    c.set_revision(self.user)
+                else:
+                    msg = 'collection présente dans la base de données, SELECTION'
+                    self.logger.info(collection, msg)
+                    
+        for collection in collections:
+            collection_dir = self.source_dir + os.sep + collection
+            collection_name = collection
+            collection_files = os.listdir(collection_dir)
+            msg = '************************ ' + collection + ' ******************************'
+            self.logger.info(collection, msg[:70])
+            overwrite = True
+            csv_file = ''
+            rows = {}
+            
+            if collection + '.csv' in collection_files:
+                csv_file = self.source_dir + os.sep + collection + os.sep + collection + '.csv'
+                csv_data = csv.reader(open(csv_file), delimiter=';')
+                for row in csv_data:
+                    rows[row[1].strip()] = row[0].strip()
+                msg = collection + ' import du fichier CSV de la collection'
+                self.logger.info(collection, msg[:70])
+            else:
+                msg = collection + ' pas de fichier CSV dans la collection'
+                self.logger.info(collection, msg[:70])
+            
+            c = MediaCollection.objects.filter(code=collection_name)
+            if not c:
+                c = MediaCollection(code=collection_name)
+                c.save()
+                msg = ' collection NON présente dans la BDD, CREATION '
+                self.logger.info(c.code, msg)
+            else:
+                c = c[0]
+                msg = ' id = '+str(c.id)
+                self.logger.info(c.code, msg)
+            
+            audio_files = []
+            for file in collection_files:
+                ext = ['WAV', 'wav']
+                if file.split('.')[-1] in ext:
+                    audio_files.append(file)
+            
+            audio_files.sort()
+            nb_items = c.items.count()
+            counter = 0
+            
+            for file in audio_files:
+                code = file.split('.')[0]
+                wav_file = self.source_dir + os.sep + collection + os.sep + file
+                
+                if len(audio_files) <= nb_items:
+                    items = MediaItem.objects.filter(code=code)
+                    
+                    old_ref = ''
+                    if code in rows and not items:
+                        old_ref = rows[code]
+                        items = MediaItem.objects.filter(old_code=old_ref)
+                        
+                    if items:
+                        item = items[0]
+                        msg = code + ' : ' + item.old_code + ' : Cas 1 ou 2 : id = ' + str(item.id)
+                        self.logger.info('item', msg)
+                        item.code = code
+                        item.save()
+                    else:
+                        item = MediaItem(code=code, collection=c)
+                        msg = code + ' : ' + old_ref + ' : Cas 1 ou 2 : item NON présent dans la base de données, CREATION'
+                        self.logger.info('item', msg)
+                    
+                    self.write_file(item, wav_file, overwrite)
+                    
+                elif nb_items == 1 and len(audio_files) > 1:
+                    if counter == 0:
+                        msg = code + ' : Cas 3a : item n°01 présent dans la base de données, PASSE'
+                        self.logger.info('item', msg)
+                    else:
+                        item = MediaItem(code=code, collection=c)
+                        msg = code + ' : Cas 3a : item NON présent dans la base de données, CREATION'
+                        self.logger.info('item', msg)
+                        self.write_file(item, wav_file, overwrite)
+                
+                elif nb_items > 1 and nb_items < len(audio_files):
+                    msg = code + ' : Cas 3b : nb items < nb de fichiers audio, PAS de creation'
+                    self.logger.info('item', msg)
+
+                counter += 1
+        
+        msg = 'Liste des URLs des collections importées :'
+        self.logger.info('INFO', msg)
+        for collection in collections:
+            msg = 'http://'+self.domain+'/collections/'+collection
+            self.logger.info(collection, msg)
+            
+        
+def print_usage(tool_name):
+    print "Usage: "+tool_name+" <project_dir> <source_dir> <pattern> <log_file> <domain>"
+    print "  project_dir: the directory of the Django project which hosts Telemeta"
+    print "  source_dir: the directory containing the wav files to include"
+    print "  pattern: a pattern to match the collection names"
+    print "  log_file: a log file to write logs"
+    print "  domain: root domain for collections"
+
+def run():
+    if len(sys.argv) < 3:
+        print_usage(os.path.basename(sys.argv[0]))
+        sys.exit(1)
+    else:
+        project_dir = sys.argv[-5]
+        source_dir = sys.argv[-4]
+        pattern = sys.argv[-3]
+        log_file = sys.argv[-2]
+        url = sys.argv[-1]
+        sys.path.append(project_dir)
+        import settings
+        setup_environ(settings)
+        t = TelemetaWavImport(source_dir, log_file, pattern, url)
+        t.wav_import()
+
+if __name__ == '__main__':
+    run()
diff --git a/bin/old/telemeta-crem-import.py b/bin/old/telemeta-crem-import.py
new file mode 100755 (executable)
index 0000000..dcdf5c1
--- /dev/null
@@ -0,0 +1,211 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2010 Guillaume Pellerin
+# All rights reserved.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
+#
+# Author: Guillaume Pellerin <yomguy@parisson.com>
+#
+
+import os
+import sys
+import csv
+import logging
+import datetime
+from django.core.management import setup_environ
+from django.core.files.base import ContentFile
+
+
+class Logger:
+
+    def __init__(self, file):
+        self.logger = logging.getLogger('myapp')
+        self.hdlr = logging.FileHandler(file)
+        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+        self.hdlr.setFormatter(self.formatter)
+        self.logger.addHandler(self.hdlr)
+        self.logger.setLevel(logging.INFO)
+
+    def info(self, prefix, message):
+        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
+
+    def error(self, prefix, message):
+        self.logger.error(prefix + ' : ' + message.decode('utf8'))
+
+
+class TelemetaWavImport:
+
+    def __init__(self, source_dir, log_file, pattern, domain):
+        from django.contrib.auth.models import User
+        self.logger = Logger(log_file)
+        self.source_dir = source_dir
+        self.collections = os.listdir(self.source_dir)
+        self.pattern = pattern
+        self.user = User.objects.filter(username='admin')[0]
+        self.domain = domain
+
+    def write_file(self, item, wav_file, overwrite=False):
+        filename = wav_file.split(os.sep)[-1]
+        if os.path.exists(wav_file):
+            if not item.file or overwrite:
+                f = open(wav_file, 'r')
+                file_content = ContentFile(f.read())
+                item.file.save(filename, file_content)
+                f.close()
+                item.save()
+                item.set_revision(self.user)
+            else:
+                msg = item.code + ' : fichier ' + item.file.name + ' deja inscrit dans la base de donnees !'
+                self.logger.error('item', msg)
+        else:
+            msg = item.code + ' : fichier audio ' + filename + ' inexistant dans le dossier !'
+            self.logger.error('item', msg)
+
+    def wav_import(self):
+        from telemeta.models import MediaItem,  MediaCollection
+
+        collections = []
+        for collection in self.collections:
+            collection_dir = self.source_dir + os.sep + collection
+            collection_files = os.listdir(collection_dir)
+
+
+            if not '/.' in collection_dir and self.pattern in collection_dir:
+                collection_name = collection.split(os.sep)[-1]
+                collections.append(collection_name)
+                c = MediaCollection.objects.filter(code=collection_name)
+
+                if not c and collection + '.csv' in collection_files:
+                    msg = collection + ' collection NON présente dans la base de données, SORTIE '
+                    self.logger.error(collection, msg)
+                    sys.exit(msg)
+                elif not c:
+                    msg = 'collection NON présente dans la base de données, CREATION '
+                    self.logger.info(collection, msg)
+                    c = MediaCollection(code=collection_name, title=collection_name)
+                    c.save()
+                    c.set_revision(self.user)
+                else:
+                    msg = 'collection présente dans la base de données, SELECTION'
+                    self.logger.info(collection, msg)
+
+        for collection in collections:
+            collection_dir = self.source_dir + os.sep + collection
+            collection_name = collection
+            collection_files = os.listdir(collection_dir)
+            msg = '************************ ' + collection + ' ******************************'
+            self.logger.info(collection, msg[:70])
+            overwrite = True
+            csv_file = ''
+            rows = {}
+
+            if collection + '.csv' in collection_files:
+                csv_file = self.source_dir + os.sep + collection + os.sep + collection + '.csv'
+                csv_data = csv.reader(open(csv_file), delimiter=';')
+                for row in csv_data:
+                    rows[row[1].strip()] = row[0].strip()
+                msg = collection + ' import du fichier CSV de la collection'
+                self.logger.info(collection, msg[:70])
+            else:
+                msg = collection + ' pas de fichier CSV dans la collection'
+                self.logger.info(collection, msg[:70])
+
+            c = MediaCollection.objects.filter(code=collection_name)
+            if not c:
+                c = MediaCollection(code=collection_name)
+                c.save()
+                msg = ' collection NON présente dans la BDD, CREATION '
+                self.logger.info(c.code, msg)
+            else:
+                c = c[0]
+                msg = ' id = '+str(c.id)
+                self.logger.info(c.code, msg)
+
+            audio_files = []
+            for file in collection_files:
+                ext = ['WAV', 'wav']
+                if file.split('.')[-1] in ext and file[0] != '.':
+                    audio_files.append(file)
+
+            audio_files.sort()
+            nb_items = c.items.count()
+            counter = 0
+
+            for file in audio_files:
+                code = file.split('.')[0]
+                wav_file = self.source_dir + os.sep + collection + os.sep + file
+
+                if len(audio_files) <= nb_items:
+                    items = MediaItem.objects.filter(code=code)
+
+                    old_ref = ''
+                    if code in rows and not items:
+                        old_ref = rows[code]
+                        items = MediaItem.objects.filter(old_code=old_ref)
+
+                    if items:
+                        item = items[0]
+                        msg = code + ' : ' + item.old_code + ' : Cas 1 ou 2 : id = ' + str(item.id)
+                        self.logger.info('item', msg)
+                        item.code = code
+                        item.save()
+                    else:
+                        item = MediaItem(code=code, collection=c)
+                        msg = code + ' : ' + old_ref + ' : Cas 1 ou 2 : item NON présent dans la base de données, CREATION'
+                        self.logger.info('item', msg)
+
+                    self.write_file(item, wav_file, overwrite)
+
+                elif nb_items == 1 and len(audio_files) > 1:
+                    if counter == 0:
+                        msg = code + ' : Cas 3a : item n°01 présent dans la base de données, PASSE'
+                        self.logger.info('item', msg)
+                    else:
+                        item = MediaItem(code=code, collection=c)
+                        msg = code + ' : Cas 3a : item NON présent dans la base de données, CREATION'
+                        self.logger.info('item', msg)
+                        self.write_file(item, wav_file, overwrite)
+
+                elif nb_items > 1 and nb_items < len(audio_files):
+                    msg = code + ' : Cas 3b : nb items < nb de fichiers audio, PAS de creation'
+                    self.logger.info('item', msg)
+
+                counter += 1
+
+        msg = 'Liste des URLs des collections importées :'
+        self.logger.info('INFO', msg)
+        for collection in collections:
+            msg = 'http://'+self.domain+'/archives/collections/'+collection
+            self.logger.info(collection, msg)
+
+
+def print_usage(tool_name):
+    print "Usage: "+tool_name+" <project_dir> <source_dir> <pattern> <log_file> <domain>"
+    print "  project_dir: the directory of the Django project which hosts Telemeta"
+    print "  source_dir: the directory containing the wav files to include"
+    print "  pattern: a pattern to match the collection names"
+    print "  log_file: a log file to write logs"
+    print "  domain: root domain for collections"
+
+def run():
+    if len(sys.argv) < 3:
+        print_usage(os.path.basename(sys.argv[0]))
+        sys.exit(1)
+    else:
+        project_dir = sys.argv[-5]
+        source_dir = sys.argv[-4]
+        pattern = sys.argv[-3]
+        log_file = sys.argv[-2]
+        url = sys.argv[-1]
+        sys.path.append(project_dir)
+        import settings
+        setup_environ(settings)
+        t = TelemetaWavImport(source_dir, log_file, pattern, url)
+        t.wav_import()
+
+if __name__ == '__main__':
+    run()
diff --git a/bin/old/telemeta-media-link.py b/bin/old/telemeta-media-link.py
new file mode 100755 (executable)
index 0000000..118fe95
--- /dev/null
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2010 Guillaume Pellerin
+# All rights reserved.
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
+#
+# Author: Guillaume Pellerin <yomguy@parisson.com>
+#
+
+import os
+import re
+import sys
+import logging
+import datetime
+import timeside
+from django.core.management import setup_environ
+from django.core.files.base import ContentFile
+
+mapping = {
+             'title': 'title',
+             'album': 'collection',
+             'date': 'recorded_from_date',
+             'artist': 'author',
+             'track-number': 'track',
+             'encoder': 'comment',
+             'genre': 'generic_style',
+             'audio-codec': 'comment',
+             'container-format': 'comment',
+             }
+
+class Logger:
+
+    def __init__(self, file):
+        self.logger = logging.getLogger('myapp')
+        self.hdlr = logging.FileHandler(file)
+        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+        self.hdlr.setFormatter(self.formatter)
+        self.logger.addHandler(self.hdlr)
+        self.logger.setLevel(logging.INFO)
+
+    def write_info(self, prefix, message):
+        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
+
+    def write_error(self, prefix, message):
+        self.logger.error(prefix + ' : ' + message.decode('utf8'))
+
+
+class TelemetaMediaImport:
+
+    def __init__(self, media_dir, log_file):
+        self.logger = Logger(log_file)
+        self.media_dir = media_dir
+        self.medias = self.get_medias()
+    
+    def get_medias(self):
+        os.chdir(self.media_dir)
+        file_list = []
+        for root, dirs, files in os.walk('.'):
+            for file in files:
+                path = root + os.sep + file
+                if not os.sep+'.' in path:
+                    file_list.append({'root': root, 'file': file})
+        return file_list
+        
+    def set_collection(self, collection_name):
+        if not collection_name:
+            collection_name = 'Unkown'
+        code = collection_name.replace(' ','_')
+        code = re.sub(r'\W+', '_', code)
+        from telemeta.models.media import MediaCollection
+        collections = MediaCollection.objects.filter(code=code)
+        if not collections:
+            collection = MediaCollection(code=code,title=collection_name)
+            collection.save()
+            msg = 'created'
+            self.logger.write_info('collection ' + collection_name, msg)
+        else:
+            collection = collections[0]
+        return collection
+        
+    def media_import(self):
+        from telemeta.models.media import MediaItem
+        for media in self.medias:
+            path = media['root'] + os.sep + media['file']
+            print 'checking ' + path
+            filename,  ext = os.path.splitext(media['file'])
+            item = MediaItem.objects.filter(code=filename)
+            if not item:
+                print 'importing ' + path
+                decoder = timeside.decoder.FileDecoder(path)
+                try:
+                    metadata = decoder.metadata()
+                    print metadata
+                    collection = self.set_collection(metadata['album'])
+                    item = MediaItem(collection=collection)
+                    item.code = re.sub(r'\W+', '_', metadata['title'])
+                    for tag in mapping.keys():
+                        try:
+                            if tag == 'date':
+                                date = metadata[tag].split(' ')[1].split('/')
+                                metadata[tag] = date[2]+'-'+date[1]+'-'+date[0]    
+                            if mapping[tag] == 'comment':
+                                item[mapping[tag]] = item[mapping[tag]] + '\n' + metadata[tag]
+                            else:
+                                item[mapping[tag]] = metadata[tag]
+                        except:
+                            continue
+                    item.file = path
+                    item.save()
+                    msg = 'added item : ' + path
+                    self.logger.write_info(collection.code, msg)
+                except:
+                    continue
+                
+
+def run():
+    project_dir = sys.argv[-2]
+    log_file = sys.argv[-1]
+    sys.path.append(project_dir)
+    import settings
+    setup_environ(settings)
+    media_dir = settings.MEDIA_ROOT
+    t = TelemetaMediaImport(media_dir, log_file)
+    t.media_import()
+
+if __name__ == '__main__':
+    run()
diff --git a/bin/sql/backup_db.sh b/bin/sql/backup_db.sh
new file mode 100755 (executable)
index 0000000..cfb5cff
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+DIR=/srv/backup/
+NOW=$(date +"%Y-%m-%d-%T")
+FILE=telemeta-$NOW.sql.gz
+
+echo "Backuping: "$FILE
+
+mysqldump -hdb -uroot -p$MYSQL_ROOT_PASSWORD telemeta | gzip > $DIR$FILE
+
+rename 's/\:/\_/g' $DIR$FILE
+
+echo "Done!"
diff --git a/bin/sql/convert_myisam_to_innodb.sql b/bin/sql/convert_myisam_to_innodb.sql
new file mode 100644 (file)
index 0000000..91e36d5
--- /dev/null
@@ -0,0 +1,83 @@
+ALTER TABLE `vernacular_styles` ENGINE=InnoDB;
+ALTER TABLE `users` ENGINE=InnoDB;
+ALTER TABLE `thumbnail_kvstore` ENGINE=InnoDB;
+ALTER TABLE `telemeta_media_transcoded` ENGINE=InnoDB;
+ALTER TABLE `tape_width` ENGINE=InnoDB;
+ALTER TABLE `tape_wheel_diameter` ENGINE=InnoDB;
+ALTER TABLE `tape_vendor` ENGINE=InnoDB;
+ALTER TABLE `tape_speed` ENGINE=InnoDB;
+ALTER TABLE `tape_length` ENGINE=InnoDB;
+ALTER TABLE `south_migrationhistory` ENGINE=InnoDB;
+ALTER TABLE `search_criteria` ENGINE=InnoDB;
+ALTER TABLE `searches_criteria` ENGINE=InnoDB;
+ALTER TABLE `searches` ENGINE=InnoDB;
+ALTER TABLE `rights` ENGINE=InnoDB;
+ALTER TABLE `revisions` ENGINE=InnoDB;
+ALTER TABLE `recording_contexts` ENGINE=InnoDB;
+ALTER TABLE `publishing_status` ENGINE=InnoDB;
+ALTER TABLE `publisher_collections` ENGINE=InnoDB;
+ALTER TABLE `publishers` ENGINE=InnoDB;
+ALTER TABLE `profiles` ENGINE=InnoDB;
+ALTER TABLE `playlist_resources` ENGINE=InnoDB;
+ALTER TABLE `playlists` ENGINE=InnoDB;
+ALTER TABLE `physical_formats` ENGINE=InnoDB;
+ALTER TABLE `original_format` ENGINE=InnoDB;
+ALTER TABLE `original_channel_number` ENGINE=InnoDB;
+ALTER TABLE `organization` ENGINE=InnoDB;
+ALTER TABLE `metadata_writers` ENGINE=InnoDB;
+ALTER TABLE `metadata_authors` ENGINE=InnoDB;
+ALTER TABLE `media_type` ENGINE=InnoDB;
+ALTER TABLE `media_transcoding` ENGINE=InnoDB;
+ALTER TABLE `media_status` ENGINE=InnoDB;
+ALTER TABLE `media_parts` ENGINE=InnoDB;
+ALTER TABLE `media_markers` ENGINE=InnoDB;
+ALTER TABLE `media_item_related` ENGINE=InnoDB;
+ALTER TABLE `media_item_performances` ENGINE=InnoDB;
+ALTER TABLE `media_item_keywords` ENGINE=InnoDB;
+ALTER TABLE `media_item_identifier` ENGINE=InnoDB;
+ALTER TABLE `media_items` ENGINE=InnoDB;
+ALTER TABLE `media_formats` ENGINE=InnoDB;
+ALTER TABLE `media_fonds_related` ENGINE=InnoDB;
+ALTER TABLE `media_fonds_children` ENGINE=InnoDB;
+ALTER TABLE `media_fonds` ENGINE=InnoDB;
+ALTER TABLE `media_corpus_related` ENGINE=InnoDB;
+ALTER TABLE `media_corpus_children` ENGINE=InnoDB;
+ALTER TABLE `media_corpus` ENGINE=InnoDB;
+ALTER TABLE `media_collection_related` ENGINE=InnoDB;
+ALTER TABLE `media_collection_identifier` ENGINE=InnoDB;
+ALTER TABLE `media_collections` ENGINE=InnoDB;
+ALTER TABLE `media_analysis` ENGINE=InnoDB;
+ALTER TABLE `location_types` ENGINE=InnoDB;
+ALTER TABLE `location_relations` ENGINE=InnoDB;
+ALTER TABLE `location_aliases` ENGINE=InnoDB;
+ALTER TABLE `locations` ENGINE=InnoDB;
+ALTER TABLE `legal_rights` ENGINE=InnoDB;
+ALTER TABLE `languages` ENGINE=InnoDB;
+ALTER TABLE `jqchat_room` ENGINE=InnoDB;
+ALTER TABLE `jqchat_message` ENGINE=InnoDB;
+ALTER TABLE `ipauth_range` ENGINE=InnoDB;
+ALTER TABLE `instrument_relations` ENGINE=InnoDB;
+ALTER TABLE `instrument_alias_relations` ENGINE=InnoDB;
+ALTER TABLE `instrument_aliases` ENGINE=InnoDB;
+ALTER TABLE `instruments` ENGINE=InnoDB;
+ALTER TABLE `identifier_type` ENGINE=InnoDB;
+ALTER TABLE `googletools_siteverificationcode` ENGINE=InnoDB;
+ALTER TABLE `googletools_analyticscode` ENGINE=InnoDB;
+ALTER TABLE `generic_styles` ENGINE=InnoDB;
+ALTER TABLE `ethnic_group_aliases` ENGINE=InnoDB;
+ALTER TABLE `ethnic_groups` ENGINE=InnoDB;
+ALTER TABLE `django_site` ENGINE=InnoDB;
+ALTER TABLE `django_session` ENGINE=InnoDB;
+ALTER TABLE `django_content_type` ENGINE=InnoDB;
+ALTER TABLE `django_admin_log` ENGINE=InnoDB;
+ALTER TABLE `copy_type` ENGINE=InnoDB;
+ALTER TABLE `context_keywords` ENGINE=InnoDB;
+ALTER TABLE `auth_user_user_permissions` ENGINE=InnoDB;
+ALTER TABLE `auth_user_groups` ENGINE=InnoDB;
+ALTER TABLE `auth_user` ENGINE=InnoDB;
+ALTER TABLE `auth_permission` ENGINE=InnoDB;
+ALTER TABLE `auth_message` ENGINE=InnoDB;
+ALTER TABLE `auth_group_permissions` ENGINE=InnoDB;
+ALTER TABLE `auth_group` ENGINE=InnoDB;
+ALTER TABLE `ad_conversions` ENGINE=InnoDB;
+ALTER TABLE `acquisition_modes` ENGINE=InnoDB;
diff --git a/bin/sql/drop_timeside.sql b/bin/sql/drop_timeside.sql
new file mode 100644 (file)
index 0000000..adecbcd
--- /dev/null
@@ -0,0 +1,15 @@
+BEGIN;
+
+DROP TABLE IF EXISTS `timeside_results`;
+DROP TABLE IF EXISTS `timeside_selections_items`;
+DROP TABLE IF EXISTS `timeside_items`;
+DROP TABLE IF EXISTS `timeside_tasks`;
+DROP TABLE IF EXISTS `timeside_selections_selections`;
+DROP TABLE IF EXISTS `timeside_selections`;   
+DROP TABLE IF EXISTS `timeside_experiences_presets`;
+DROP TABLE IF EXISTS `timeside_presets`;
+DROP TABLE IF EXISTS `timeside_experiences_experiences`;
+DROP TABLE IF EXISTS `timeside_experiences`;
+DROP TABLE IF EXISTS `timeside_processors`;
+
+COMMIT;
diff --git a/bin/sql/fix_contentttypes.sql b/bin/sql/fix_contentttypes.sql
new file mode 100644 (file)
index 0000000..0d2c76b
--- /dev/null
@@ -0,0 +1 @@
+alter table django_content_type drop column name;
diff --git a/bin/sql/import_sql.sh b/bin/sql/import_sql.sh
new file mode 100755 (executable)
index 0000000..10c0ca4
--- /dev/null
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+if [[ $# -ne 4 ]]; then
+       echo "Usage: $0 <username> <password> <database> </path/to/sql_file.sql.gz>"
+       exit 1
+fi
+
+echo "=> Starting MySQL Server"
+/usr/bin/mysqld_safe > /dev/null 2>&1 &
+PID=$!
+
+RET=1
+while [[ RET -ne 0 ]]; do
+    echo "=> Waiting for confirmation of MySQL service startup"
+    sleep 5
+    mysql -u"$1" -p"$2" -e "status" > /dev/null 2>&1
+RET=$?
+done
+
+echo "   Started with PID ${PID}"
+
+echo "=> Importing SQL file"
+gunzip -c "$4" | mysql -u"$1" -p"$2" "$3"
+
+echo "=> Stopping MySQL Server"
+mysqladmin -u"$1" -p"$2" shutdown
+
+echo "=> Done!"
diff --git a/bin/sql/restore_db.sh b/bin/sql/restore_db.sh
new file mode 100755 (executable)
index 0000000..8a8fd6b
--- /dev/null
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+DIR=/srv/backup/
+FILE=`ls -t $DIR/*.sql* | head -1`
+
+echo "Restoring: "$FILE
+
+if [[ $FILE == *".gz" ]]; then
+    gunzip < $FILE | mysql -hdb -uroot -p$MYSQL_ROOT_PASSWORD telemeta
+else
+    mysql -hdb -uroot -p$MYSQL_ROOT_PASSWORD telemeta < $FILE
+fi
+
+echo "Done!"
diff --git a/bin/transcode/create_thumbs.py b/bin/transcode/create_thumbs.py
new file mode 100755 (executable)
index 0000000..dc3fd20
--- /dev/null
@@ -0,0 +1,42 @@
+#!/usr/bin/python
+
+import os, sys, string
+import logging
+
+class Logger:
+    """A logging object"""
+
+    def __init__(self, file):
+        self.logger = logging.getLogger('myapp')
+        self.hdlr = logging.FileHandler(file)
+        self.formatter = logging.Formatter('%(message)s')
+        self.hdlr.setFormatter(self.formatter)
+        self.logger.addHandler(self.hdlr)
+        self.logger.setLevel(logging.INFO)
+
+log_file = 'thumbs.log'
+logger = Logger(log_file)
+root_dir = sys.argv[-1]
+args = sys.argv[1:-1]
+source_format = 'webm'
+done = []
+preview_tc = '00:00:05'
+
+if os.path.exists(log_file):
+    f = open(log_file, 'r')
+    for line in f.readlines():
+        done.append(line[:-1])
+    f.close()
+
+for root, dirs, files in os.walk(root_dir):
+    for file in files:
+        path = os.path.abspath(root + os.sep + file)
+        name, ext = os.path.splitext(file)
+        if ext[1:] == source_format:
+            dest = os.path.abspath(root + os.sep + name + '.png')
+            if not dest in done or '--force' in args:
+                command = 'ffmpeg -ss '+ preview_tc + ' -i ' + path + '  -y ' + dest
+                os.system(command)
+                logger.logger.info(dest)
+
+print "DONE!"
diff --git a/bin/transcode/remux_fix_media.py b/bin/transcode/remux_fix_media.py
new file mode 100755 (executable)
index 0000000..39cfd9f
--- /dev/null
@@ -0,0 +1,113 @@
+#!/usr/bin/python
+
+import os, sys, psutil
+import datetime
+from ebml.utils.ebml_data import *
+
+class FixCheckMedia(object):
+
+    def __init__(self, dir, tmp_dir):
+        self.dir = dir
+        self.tmp_dir = tmp_dir
+        if not os.path.exists(self.tmp_dir):
+            os.makedirs(self.tmp_dir)
+
+    def process(self):
+        webm_fixed_log = 'webm.fixed'
+        webm_tofix_log = 'webm.tofix'
+        mp3_fixed_log = 'mp3.fixed'
+        mp3_tofix_log = 'mp3.tofix'
+
+        for root, dirs, files in os.walk(self.dir):
+            for filename in files:
+                source = root + os.sep + filename
+                name = os.path.splitext(filename)[0]
+                ext = os.path.splitext(filename)[1][1:]
+
+                if ext == 'webm' and os.path.getsize(source):
+                    dir_files = os.listdir(root)
+
+                    if not webm_fixed_log in dir_files:
+                        print source
+                        self.fix_webm(source)
+                        f = open(root + os.sep + webm_fixed_log, 'w')
+                        f.close()
+                        if os.path.exists(root + os.sep + webm_tofix_log):
+                            os.remove(root + os.sep + webm_tofix_log)
+
+                    if mp3_tofix_log in dir_files or not mp3_fixed_log in dir_files:
+                        for file in dir_files:
+                            dest_ext = os.path.splitext(file)[1][1:]
+                            if dest_ext == 'mp3':
+                                dest = root + os.sep + file
+                                print dest
+                                self.fix_mp3(source, dest)
+                                f = open(root + os.sep + mp3_fixed_log, 'w')
+                                f.close()
+                                if os.path.exists(root + os.sep + mp3_tofix_log):
+                                    os.remove(root + os.sep + mp3_tofix_log)
+                                #break
+
+
+    def hard_fix_webm(self, path):
+        try:
+            tmp_file = self.tmp_dir + 'out.webm '
+            command = 'ffmpeg -loglevel 0 -i "'+ path + '" -vcodec libvpx -vb 500k -acodec libvorbis -aq 7 -f webm -y "' + tmp_file + '" > /dev/null'
+            print command
+            os.system(command)
+            command = 'mv '  + tmp_file + path
+            os.system(command)
+        except:
+            pass
+
+
+    def fix_webm(self, path):
+        try:
+            tmp_file = self.tmp_dir + 'out.webm'
+            command = '/usr/local/bin/ffmpeg -loglevel 0 -i "' + path + '" -vcodec copy -acodec copy -f webm -y "' + tmp_file + '" > /dev/null'
+            print command
+            os.system(command)
+            ebml_obj = EBMLData(tmp_file)
+            offset = ebml_obj.get_first_cluster_seconds()
+            command = '/usr/local/bin/ffmpeg -loglevel 0 -ss ' + str(offset) + ' -i "' + tmp_file + '" -vcodec copy -acodec copy -f webm -y "' + path + '" > /dev/null'
+            print command
+            os.system(command)
+        except:
+            pass
+
+    def fix_mp3(self, source, path):
+        try:
+            command = '/usr/local/bin/ffmpeg -loglevel 0 -i "'+ source + '" -vn -aq 6 -y "' + path + '" > /dev/null'
+            print command
+            os.system(command)
+        except:
+            pass
+
+def get_pids(name, args=None):
+    """Get a process pid filtered by arguments and uid"""
+    pids = []
+    for proc in psutil.process_iter():
+        if proc.cmdline:
+            if name == proc.name:
+                if args:
+                    if args in proc.cmdline:
+                        pids.append(proc.pid)
+                else:
+                    pids.append(proc.pid)
+    return pids
+
+dir = sys.argv[-2]
+tmp_dir = sys.argv[-1]
+
+path =  os.path.abspath(__file__)
+pids = get_pids('python2.6',args=path)
+
+print datetime.datetime.now()
+if len(pids) <= 1:
+    print 'starting process...'
+    f = FixCheckMedia(dir, tmp_dir)
+    f.process()
+    print 'process finished.\n'
+else:
+    print 'already started !\n'
+
diff --git a/bin/transcode/transcode.py b/bin/transcode/transcode.py
new file mode 100755 (executable)
index 0000000..efaa113
--- /dev/null
@@ -0,0 +1,76 @@
+#!/usr/bin/python
+
+import os, sys, string
+import logging
+
+
+class Logger:
+    """A logging object"""
+
+    def __init__(self, file):
+        self.logger = logging.getLogger('myapp')
+        self.hdlr = logging.FileHandler(file)
+        self.formatter = logging.Formatter('%(asctime)s %(message)s')
+        self.hdlr.setFormatter(self.formatter)
+        self.logger.addHandler(self.hdlr)
+        self.logger.setLevel(logging.INFO)
+
+
+class TelemetaTranscode(object):
+    """docstring for TelemetaTranscode"""
+
+    threads = 4
+    source_formats = ['webm', 'mp4']
+    dest_formats = {
+                   'mp3' : '-vn -acodec libmp3lame -aq 6',
+                   'ogg' : '-vn -acodec libvorbis -aq 6',
+                   'mp4' : '-vcodec libx264 -threads ' + str(threads) + \
+                           ' -c:v libx264 -crf 17 -maxrate 1100k -bufsize 1835k -acodec libfaac -ab 96k',
+                   'png' : '',
+                   'webm' : '-vcodec libvpx -threads ' + str(threads) + \
+                           ' -c:v libvpx -crf 17 -b:v 1100k',
+                  }
+
+
+    def __init__(self, args):
+        self.args = args
+        self.log_file = args[-1]
+        self.root_dir = args[-2]
+        self.logger = Logger(self.log_file)
+
+
+    def get_ext_in_dir(self, extension, root):
+        files = os.listdir(root)
+        exts = []
+        for f in files:
+            name, ext = os.path.splitext(f)
+            ext = ext[1:]
+            if not ext in exts:
+                exts.append(ext)
+        return extension in exts
+
+    def run(self):
+        for root, dirs, files in os.walk(self.root_dir):
+            for file in files:
+                path = os.path.abspath(root + os.sep + file)
+                name, ext = os.path.splitext(file)
+                ext = ext[1:]
+                if ext in self.source_formats:
+                    for format, ffmpeg_args in self.dest_formats.iteritems():
+                        local_file = name + '.' + format
+                        dest = os.path.abspath(root + os.sep + local_file)
+                        local_files = os.listdir(root)
+                        if not (local_file in local_files or self.get_ext_in_dir(format, root)) or '--force' in self.args:
+                            if ext == 'webm' and format == 'ogg':
+                                ffmpeg_args = '-vn -acodec copy'
+                            command = 'ffmpeg -loglevel 0 -i "' + path + '" ' + ffmpeg_args + ' -y "' + dest + '"'
+                            self.logger.logger.info(command)
+                            if not '--dry-run' in self.args:
+                                os.system(command)
+                            else:
+                                print command
+
+
+if __name__ == '__main__':
+    t = TelemetaTranscode(sys.argv[1:])
+    t.run()
diff --git a/bin/upgrade.sh b/bin/upgrade.sh
new file mode 100755 (executable)
index 0000000..a55e69d
--- /dev/null
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+docker-compose run app /srv/bin/upgrade_from_1.6_to_1.7.sh
index 9cd72c9946559a0275f3604261f2315faca9ffb0..68585ff568bafe8e31e104359b114f1f7ff1868f 100644 (file)
@@ -36,7 +36,7 @@ data:
 db:
   image: mysql:5
   volumes:
-    - ./scripts/:/srv/scripts
+    - ./bin/:/srv/bin
     - ./data/mysql/:/var/lib/mysql
   volumes_from:
     - data
@@ -59,7 +59,7 @@ app:
     - data
   env_file:
     - env/prod.env
-  command: /bin/bash scripts/app.sh
+  command: /bin/bash bin/app.sh
   links:
     - broker
     - db
@@ -72,7 +72,7 @@ worker:
     - data
   env_file:
     - env/prod.env
-  command: /bin/bash scripts/worker.sh
+  command: /bin/bash bin/worker.sh
   links:
     - broker
     - db
index f5e68cc388bdf59cb761276daee1dcda5f1617bc..dfcaadabe91a47466ccd5be4b46af186bda01e00 100644 (file)
@@ -26,7 +26,7 @@ app:
   image: parisson/telemeta:latest
   env_file:
     - env/debug.env
-  command: /bin/bash scripts/app.sh --runserver
+  command: /bin/bash bin/app.sh --runserver
   ports:
     - 9100:8000
 
@@ -37,4 +37,4 @@ worker:
 
 db:
   volumes:
-    - ./scripts/:/srv/scripts
+    - ./bin/:/srv/bin
index 1c234ae04c1578eca44f803edd07d1ae68218673..5b85c759d0ebf97ff1af2da164a7b513a60f0fff 100644 (file)
@@ -24,4 +24,4 @@
 app:
   ports:
     - "8888:8888"
-  command: "/srv/app/scripts/notebook.sh"
+  command: "/srv/app/bin/notebook.sh"
diff --git a/scripts/kdenlive/__init__.py b/scripts/kdenlive/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/scripts/kdenlive/auto_fade.py b/scripts/kdenlive/auto_fade.py
deleted file mode 100755 (executable)
index c716a8c..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#/usr/bin/python
-
-import sys
-from telemeta.util.kdenlive.fade import AutoFade
-
-path = sys.argv[-1]
-fade = AutoFade(path)
-data = fade.run()
-f = open(path, 'w')
-f.write(data)
-f.close()
diff --git a/scripts/kdenlive/auto_fade_batch.py b/scripts/kdenlive/auto_fade_batch.py
deleted file mode 100755 (executable)
index 2704776..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-
-import os, sys
-from telemeta.util.kdenlive.fade import AutoFade
-
-if __name__ == '__main__':
-    dir = sys.argv[-2]
-    ext = sys.argv[-1]
-
-    for filename in os.listdir(dir):
-        prefix, extension = os.path.splitext(filename)
-        path = dir + os.sep + filename
-        flag = path + '.faded'
-        if ext in extension and not os.path.exists(flag):
-            os.system('cp ' + path + ' ' + path + '.bak')
-            fade = AutoFade(path)
-            data = fade.run()
-            f = open(path, 'w')
-            f.write(data)
-            f.close()
-            os.system('touch ' + flag)
diff --git a/scripts/kdenlive/fade.py b/scripts/kdenlive/fade.py
deleted file mode 100644 (file)
index c590194..0000000
+++ /dev/null
@@ -1,200 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2012-2013 Guillaume Pellerin <yomguy@parisson.com>
-
-# This file is part of TimeSide.
-
-# TimeSide is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-
-# TimeSide is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with TimeSide.  If not, see <http://www.gnu.org/licenses/>.
-
-# Author: Guillaume Pellerin <yomguy@parisson.com>
-
-
-from telemeta.util.xmltodict2 import *
-
-
-class AutoFade(object):
-    """ Automatically applies a fade in and a fade out trasitions between each segment of a KdenLive session.
-        Each video clip needs to be splitted into one video track and an audio one ("Split audio"),
-        so that an audio fade in/out is also applied.
-
-        MLT files are also supported.
-    """
-
-    def __init__(self, path, audio_frames_out=2, audio_frames_in=1,
-                       video_frames_out=3, video_frames_in=3):
-        self.audio_frames_in = audio_frames_in
-        self.audio_frames_out = audio_frames_out
-        self.video_frames_in = video_frames_in
-        self.video_frames_out = video_frames_out
-        self.path = path
-        self.session = xmltodict(self.path)
-
-    def audio_fade_out(self, frame_out):
-        child = {'attributes': {u'id': u'fadeout',
-        u'in': unicode(int(frame_out)-self.audio_frames_out),
-        u'out': unicode(frame_out)},
-       'children': [{'attributes': {u'name': u'track'},
-         'cdata': '0',
-         'name': 'property'},
-        {'attributes': {u'name': u'window'},
-         'cdata': '75',
-         'name': 'property'},
-        {'attributes': {u'name': u'max_gain'},
-         'cdata': '20dB',
-         'name': 'property'},
-        {'attributes': {u'name': u'mlt_type'},
-         'cdata': 'filter',
-         'name': 'property'},
-        {'attributes': {u'name': u'mlt_service'},
-         'cdata': 'volume',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_id'},
-         'cdata': 'fadeout',
-         'name': 'property'},
-        {'attributes': {u'name': u'tag'},
-         'cdata': 'volume',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_ix'},
-         'cdata': '1',
-         'name': 'property'},
-        {'attributes': {u'name': u'gain'}, 'cdata': '1', 'name': 'property'},
-        {'attributes': {u'name': u'end'}, 'cdata': '0', 'name': 'property'}],
-       'name': 'filter'}
-
-        return child
-
-    def audio_fade_in(self, frame_in):
-        child = {'attributes': {u'id': u'fadein',
-        u'in': unicode(frame_in),
-        u'out': unicode(int(frame_in)+self.audio_frames_in)},
-       'children': [{'attributes': {u'name': u'track'},
-         'cdata': '0',
-         'name': 'property'},
-        {'attributes': {u'name': u'window'},
-         'cdata': '75',
-         'name': 'property'},
-        {'attributes': {u'name': u'max_gain'},
-         'cdata': '20dB',
-         'name': 'property'},
-        {'attributes': {u'name': u'mlt_type'},
-         'cdata': 'filter',
-         'name': 'property'},
-        {'attributes': {u'name': u'mlt_service'},
-         'cdata': 'volume',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_id'},
-         'cdata': 'fadein',
-         'name': 'property'},
-        {'attributes': {u'name': u'tag'},
-         'cdata': 'volume',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_ix'},
-         'cdata': '1',
-         'name': 'property'},
-        {'attributes': {u'name': u'gain'}, 'cdata': '0', 'name': 'property'},
-        {'attributes': {u'name': u'end'}, 'cdata': '1', 'name': 'property'}],
-       'name': 'filter'}
-
-        return child
-
-
-    def video_fade_out(self, frame_out):
-        child = {'attributes': {u'id': u'fade_to_black',
-        u'in': unicode(int(frame_out)-self.video_frames_out),
-        u'out': unicode(frame_out)},
-       'children': [{'attributes': {u'name': u'track'},
-         'cdata': '0',
-         'name': 'property'},
-        {'attributes': {u'name': u'start'}, 'cdata': '1', 'name': 'property'},
-        {'attributes': {u'name': u'mlt_type'},
-         'cdata': 'filter',
-         'name': 'property'},
-        {'attributes': {u'name': u'mlt_service'},
-         'cdata': 'brightness',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_id'},
-         'cdata': 'fade_to_black',
-         'name': 'property'},
-        {'attributes': {u'name': u'tag'},
-         'cdata': 'brightness',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_ix'},
-         'cdata': '1',
-         'name': 'property'},
-        {'attributes': {u'name': u'end'}, 'cdata': '0', 'name': 'property'}],
-       'name': 'filter'}
-
-        return child
-
-
-    def video_fade_in(self, frame_in):
-        child = {'attributes': {u'id': u'fade_from_black',
-        u'in': unicode(frame_in),
-        u'out': unicode(int(frame_in)+self.video_frames_in)},
-       'children': [{'attributes': {u'name': u'track'},
-         'cdata': '0',
-         'name': 'property'},
-        {'attributes': {u'name': u'start'}, 'cdata': '0', 'name': 'property'},
-        {'attributes': {u'name': u'mlt_type'},
-         'cdata': 'filter',
-         'name': 'property'},
-        {'attributes': {u'name': u'mlt_service'},
-         'cdata': 'brightness',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_id'},
-         'cdata': 'fade_from_black',
-         'name': 'property'},
-        {'attributes': {u'name': u'tag'},
-         'cdata': 'brightness',
-         'name': 'property'},
-        {'attributes': {u'name': u'kdenlive_ix'},
-         'cdata': '1',
-         'name': 'property'},
-        {'attributes': {u'name': u'end'}, 'cdata': '1', 'name': 'property'}],
-       'name': 'filter'}
-
-        return child
-
-    def run(self):
-        audio_count = 0
-        video_count = 0
-        
-        for attr in self.session['children']:
-            if 'playlist' in attr['name'] and 'children' in attr:
-                for att in attr['children']:
-                    if 'producer' in att['attributes'] and not 'children' in att:                        
-                        producer = att['attributes']['producer']
-                        if producer != 'black':
-                        
-                            frame_in = att['attributes']['in']
-                            frame_out = att['attributes']['out']
-
-                            if 'audio' in producer:
-                                if not audio_count % 2:
-                                    att['children'] = [self.audio_fade_out(frame_out)]
-                                else:
-                                    att['children'] = [self.audio_fade_in(frame_in)]
-                                audio_count += 1
-
-
-                            if 'video' in producer:
-                                if not video_count % 2:
-                                    att['children'] = [self.video_fade_out(frame_out)]
-                                else:
-                                    att['children'] = [self.video_fade_in(frame_in)]
-                                video_count += 1
-
-        return dicttoxml(self.session).encode('utf-8')
-
-
diff --git a/scripts/kdenlive/mlt_fix_threads.sh b/scripts/kdenlive/mlt_fix_threads.sh
deleted file mode 100755 (executable)
index 60b0061..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-
-#threads=$1
-dir=$1
-
-for file in `ls $dir/*.sh`; do
- perl -pi -e 's/threads=6/threads=4/g' $file
- perl -pi -e 's/threads=2/threads=4/g' $file
- perl -pi -e 's/threads=1/threads=4/g' $file
-done
diff --git a/scripts/kdenlive/mlt_process_batch.py b/scripts/kdenlive/mlt_process_batch.py
deleted file mode 100755 (executable)
index 7d346c6..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/python
-
-import os, sys
-
-if __name__ == '__main__':
-    root_dir = sys.argv[-1]
-
-    fading = False
-    if '--fading' in sys.argv:
-        fading = True
-
-    for root, dirs, files in os.walk(root_dir):
-        for filename in files:
-            prefix, extension = os.path.splitext(filename)
-            path = root + os.sep + filename
-
-            flag = path + '.processed'
-            if 'sh' in extension and not os.path.exists(flag):
-                if fading:
-                    from telemeta.util.kdenlive.fade import AutoFade
-                    local_files = os.listdir(root)
-                    for local_file in local_files:
-                        local_name, local_ext = os.path.splitext(local_file)
-                        if 'mlt' in local_ext:
-                            local_path = root + os.sep + local_file
-                            local_flag = local_path + '.faded'
-                            if not os.path.exists(local_flag):
-                                print 'fading :        ' + local_path 
-                                os.system('cp ' + local_path + ' ' + local_path + '.bak')
-                                fade = AutoFade(local_path)
-                                data = fade.run()
-                                f = open(local_path, 'w')
-                                f.write(data)
-                                f.close()
-                                os.system('touch ' + local_flag)
-
-                print 'processing :    ' + path
-                os.system('nice -n 19 ' + path)
-                os.system('touch ' + flag)
diff --git a/scripts/old/crem_checker.py b/scripts/old/crem_checker.py
deleted file mode 100755 (executable)
index 9b5088a..0000000
+++ /dev/null
@@ -1,340 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-Vérifier que les nouvelles cotes d'item :
-
-- correspondent toutes Ã  la collection décrite par le fichier .csv
-  (le fichier .csv est nommé d'après la nouvelle cote de collection)
-
-- sont uniques dans le fichiers .csv
-
-- ont un des formats suivant :
-    - soit CNRSMH_I_aaaa_nnn_mmm
-    - soit CNRSMH_I_aaaa_nnn_mmm_tt
-    - soit CNRSMH_I_aaaa_nnn_mmm_tt_pp
-    - soit CNRSMH_E_aaaa_nnn_mmm_tt
-    - soit CNRSMH_E_aaaa_nnn_mmm_tt_pp
-
-- correspondent Ã  fichier .wav (et qu'il n'y a pas de fichiers .wav
-  supplémentaire)
-
-Vérifier que le répertoire est nommé d'apprès la nouvelle cote de collection
-
-Vérifier que la nouvelle cote de collection a l'un des formats suivant :
-    - soit CNRSMH_I_aaaa_nnn
-    - soit CNRSMH_E_aaaa_nnn_mmm
-
-Vérifier que les fichiers .wav sont lisibles, ont une durée et sont identifés
-comme WAV par audiolab.
-"""
-
-
-import os
-import re
-import sys
-import csv
-import xlrd
-import datetime
-import logging
-import shutil
-
-COLLECTION_OLD_PATTERN = [
-        { 'format': 'BM.aaa.nnn.mmm',           'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})$'},
-        { 'format': 'BM.aaaa.nnn.mmm/pp',       'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
-        { 'format': 'BM.aaaa.nnn.mmm',          'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})$'},
-        { 'format': 'BM.aaaa.nnn.mmm/',         'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/$'},
-        { 'format': 'BM.aaaa.nnn.mmm/ppp',      'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/[0-9]{3}$'},
-        { 'format': 'BM.aaaa.nnn.mm/pp',        'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{2})/[0-9]{2}$'},
-        { 'format': 'BM.aaaa.nnn',              'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})$'},
-        { 'format': 'BM.aaa.nnn.mmm/pp',        'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
-        { 'format': 'BM.aaa.nnn FANTOME',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3}) FANTOME$'},
-        { 'format': 'BM.aaa.nnn',               'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})$'},
-        { 'format': 'BM.aaa.nnnBISoo/pp',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})BIS([0-9]{2})/[0-9]{2}$'},
-        { 'format': 'BM.aaa.nnn.mmm.ppp',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})\.[0-9]{3}$'},
-        { 'format': 'BM.aaa.nnn.mmm/ppp',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{3}$'},
-        { 'format': 'BM.aaa.nnn/pp',            'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
-        { 'format': 'BM.aaa.nnn-BIS.ooo/pp',    'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})-BIS\.([0-9]{3})/[0-9]{2}$'},
-        { 'format': 'BM.aaaa.nnn.mmm/NN',       'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/NN$'},
-        { 'format': 'BM.aaa.nnn.mmm/pp-DEPOT',  'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}-DEPOT$'},
-        { 'format': 'BM.aaa.nnn.mmm-o>p',       'regex': r'^(BM)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})-[0-9]>[0-9]$'},
-        { 'format': 'CY.aaaa.nnn',              'regex': r'^(CY)\.([0-9]{4})\.([0-9]{3})$'},
-        { 'format': 'DI.aaaa.nnn.mmm',          'regex': r'^(DI)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})$'},
-        { 'format': 'DI.aaaa.nnn.mmm/pp',       'regex': r'^(DI)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
-        { 'format': 'DI.aaa.nnn.mmm',           'regex': r'^(DI)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})$'},
-        { 'format': 'DI.aaa.nnn.mmm/pp',        'regex': r'^(DI)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})/[0-9]{2}$'},
-        { 'format': 'DI.aaa.nnn.mmm-o/p',       'regex': r'^(DI)\.([0-9]{3})\.([0-9]{3})\.([0-9]{3})-[0-9]/[0-9]$'},
-        { 'format': 'FANTOME 2*',               'regex': r'FANTOME 2\*$'},
-
-        ## yomguy
-        { 'format': 'BM.aaaa.nnn.mm',       'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})$'},
-        #{ 'format': 'BM.aaaa.nnn.mmm/pp:ii-jj', 'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/([0-9]{2})\:([0-9]{2})\-([0-9]{2})$'},
-        #{ 'format': 'BM.aaaa.nnn.mmm/ppp:ii-jj', 'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3})/([0-9]{2})\:([0-9]{2})\-([0-9]{2})$'},
-        #{ 'format': 'BM.aaaa.nnn.mmm:ii-jj',    'regex': r'^(BM)\.([0-9]{4})\.([0-9]{3})\.([0-9]{3}):([0-9]{2})\-([0-9]{2})$'},
-        ]
-
-ITEM_NEW_PATTERN = [
-        { 'format': 'CNRSMH_I_aaaa_nnn_mmm',           'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{3})$'},
-        { 'format': 'CNRSMH_I_aaaa_nnn_mmm_tt',        'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2})$'},
-        { 'format': 'CNRSMH_I_aaaa_nnn_mmm_tt_pp',     'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2})_([0-9]{2})$'},
-        { 'format': 'CNRSMH_E_aaaa_nnn_mmm_tt',        'regex': r'^(CNRSMH)_E_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2})$'},
-        { 'format': 'CNRSMH_E_aaaa_nnn_mmm_tt_pp',     'regex': r'^(CNRSMH)_E_([0-9]{4})_([0-9]{3})_([0-9]{3})_([0-9]{2,3})_([0-9]{2})$'},
-
-        # yomguy
-        { 'format': 'CNRSMH_I_aaaa_nnn_mm',           'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})_([0-9]{2})$'},
-        ]
-
-COLLECTION_PATTERN = [
-        { 'format': 'CNRSMH_I_aaaa_nnn',           'regex': r'^(CNRSMH)_I_([0-9]{4})_([0-9]{3})$'},
-        { 'format': 'CNRSMH_E_aaaa_nnn_mmm',        'regex': r'^(CNRSMH)_E_([0-9]{4})_([0-9]{3})_([0-9]{3})$'},
-        ]
-
-
-def check_name(patterns, name):
-    match = False
-    for pattern in patterns:
-        match = re.match(pattern['regex'], name)
-        if match:
-            break
-    return match
-
-
-class Logger:
-
-    def __init__(self, file):
-        self.logger = logging.getLogger('myapp')
-        self.hdlr = logging.FileHandler(file)
-        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
-        self.hdlr.setFormatter(self.formatter)
-        self.logger.addHandler(self.hdlr)
-        self.logger.setLevel(logging.INFO)
-
-    def write_info(self, prefix, message):
-        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
-
-    def write_error(self, prefix, message):
-        self.logger.error(prefix + ' : ' + message.decode('utf8'))
-
-
-class CremCollection:
-
-    def __init__(self, dir, logger):
-        self.dir = dir
-        self.dir_name = self.dir.split(os.sep)[-1]
-        self.file_list = os.listdir(self.dir)
-        self.logger = logger
-
-    def xls_list(self):
-        file_list = []
-        for file in self.file_list:
-            filename = os.path.basename(file)
-            ext = os.path.splitext(file)[1]
-            if not '.' == filename[0] and (ext == '.xls' or ext == '.XLS'):
-                file_list.append(file)
-        print file_list
-        return file_list
-
-    def wav_list(self):
-        list = []
-        for file in self.file_list:
-            filename = os.path.basename(file)
-            ext = os.path.splitext(file)[1]
-            if not '.' == filename[0] and (ext == '.wav' or ext == '.WAV'):
-                list.append(file)
-            elif '.' == filename[0]:
-                self.logger.write_error(file, 'Warning : fichier caché présent !')
-        return list
-
-
-class CremCSV:
-
-    def __init__(self, file):
-        self.csv_file = open(file, 'w')
-        self.csv = csv.writer(self.csv_file,  delimiter=';')
-
-    def close(self):
-        self.csv_file.close()
-
-class CremXLS:
-
-    def __init__(self, file):
-        self.first_row = 8
-        self.original_col = 0
-        self.new_col = 1
-        self.book = xlrd.open_workbook(file)
-        self.sheet = self.book.sheet_by_index(0)
-        self.original_refs = self.original_refs()
-        self.new_refs = self.new_refs()
-        #print len(self.new_refs)
-        while True:
-            if len(self.original_refs) == 0 or len(self.new_refs) == 0:
-                break
-            else:
-                if not 'CNRS' in self.new_refs[0].encode('utf8') \
-                 and not  self.original_refs[0].encode('utf8') == '':
-                    self.original_refs = self.original_refs[1:]
-                    self.new_refs = self.new_refs[1:]
-                else:
-                    break
-
-        self.size = max(len(self.new_refs), len(self.original_refs))
-
-    def original_refs(self):
-        col = self.sheet.col(self.original_col)
-        list = []
-        for cell in col[self.first_row:]:
-            if cell.ctype == 1:
-                list.append(cell.value)
-        return list
-
-    def new_refs(self):
-        col = self.sheet.col(self.new_col)
-        list = []
-        for cell in col[self.first_row:]:
-            if cell.ctype == 1:
-                list.append(cell.value)
-        return list
-
-
-class CremItemFile:
-
-    def __init__(self):
-        self.media = ''
-
-    def set_media(self, media):
-        self.media = media
-
-    def properties(self):
-        self.frames = self.audio_file.get_nframes()
-        self.samplerate = self.audio_file.get_samplerate()
-        self.channels = self.audio_file.get_channels()
-        self.format = self.audio_file.get_file_format()
-        self.encoding = self.audio_file.get_encoding()
-
-
-class CremCheck:
-
-    def __init__(self, root_dir, log_file):
-        self.root_dir = root_dir
-        self.logger = Logger(log_file)
-        dir_list = os.listdir(self.root_dir)
-        list = []
-        for dir in dir_list:
-           if not dir[0] == '.':
-               list.append(dir)
-        self.dir_list = list
-
-    def check_new_refs(self):
-        for name in self.new_refs:
-            return check_name(ITEM_PATTERN, name)
-
-    def check(self):
-        for dir in self.dir_list:
-            collection = CremCollection(self.root_dir + dir, self.logger)
-            msg = '************************ ' + collection.dir_name + ' ******************************'
-            self.logger.write_info(collection.dir, msg[:70])
-
-            xls_list = collection.xls_list()
-            wav_list = collection.wav_list()
-
-            if not check_name(COLLECTION_PATTERN, dir):
-                self.logger.write_error(collection.dir, 'Le dossier de la collection est mal nommé -> SORTIE')
-            elif len(xls_list) == 0:
-                self.logger.write_error(collection.dir, 'PAS de fichier XLS dans le dossier collection -> SORTIE')
-            elif len(xls_list) > 1:
-                self.logger.write_error(collection.dir, 'Plusieurs fichiers XLS dans le dossier collection -> SORTIE')
-
-            else:
-                xls = CremXLS(self.root_dir + os.sep + dir + os.sep + xls_list[0])
-                self.logger.write_info(collection.dir, 'XLS : ' + xls_list[0] + ' - Feuille : ' + xls.sheet.name.encode('utf8'))
-                self.logger.write_info(collection.dir, 'Nombre d\'items détectés : ' + str(xls.size))
-                csv_file = CremCSV(self.root_dir + dir + os.sep + collection.dir_name + '.csv')
-
-                if len(wav_list) != xls.size:
-                    self.logger.write_error(collection.dir, \
-                    'Le nombre de références du fichier XLS (' + str(xls.size) + ') diffère du nombre de fichiers (' + str(len(wav_list)) + ')')
-
-                temp_list = []
-                item_file = CremItemFile()
-
-                for i in range(0,xls.size):
-                    error = False
-
-                    try:
-                        item_old = xls.original_refs[i]
-                        #self.logger.write_error(collection.dir, item_old)
-                    except:
-                        item_old = ''
-                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : l\'ancienne référence d\'item est inexistante'
-                        self.logger.write_error(collection.dir, msg)
-                        error = True
-                        continue
-
-                    try:
-                        item = xls.new_refs[i]
-                        #self.logger.write_error(collection.dir, item)
-                    except:
-                        item = ''
-                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : la nouvelle référence d\'item est inexistante'
-                        self.logger.write_error(collection.dir, msg)
-                        error = True
-                        continue
-
-                    if not item in temp_list:
-                        temp_list.append(item)
-                    else:
-                        msg =  'Ligne ' + str(i+xls.first_row+1) + ' : la référence d\'item ' + item.encode('utf8') + ' est multiple'
-                        self.logger.write_error(collection.dir, msg)
-                        error = True
-
-                    #if not check_name(ITEM_OLD_PATTERN, item_old):
-                        #msg = 'Ligne ' + str(i+xls.first_row+1) + ' : l\'ancienne référence d\'item ' + item_old.encode('utf8') + ' est mal formatée'
-                        #self.logger.write_error(collection.dir, msg)
-
-                    if not check_name(ITEM_NEW_PATTERN, item):
-                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : la nouvelle référence d\'item ' + item.encode('utf8') + ' est mal formatée'
-                        self.logger.write_error(collection.dir, msg)
-                        error = True
-
-                    if not collection.dir_name in item:
-                        msg = 'Ligne ' + str(i+xls.first_row+1) + ' : la référence d\'item ' + item.encode('utf8') + ' ne correspond pas Ã  celle de la collection'
-                        self.logger.write_error(collection.dir, msg)
-                        error = True
-
-                    name_wav = item.encode('utf8') + '.wav'
-                    if not name_wav in wav_list:
-                        self.logger.write_error(collection.dir, 'Le fichier ' + item.encode('utf8') + '.wav n\'existe pas')
-                    else:
-                        item_file.set_media(collection.dir + os.sep + name_wav)
-                        #if not item_file.is_wav():
-                        #    self.logger.write_error(collection.dir, 'Le fichier ' + item.encode('utf8') + '.wav n\'est pas valide')
-                        #    error = True
-
-                    if not error:
-                        csv_file.csv.writerow([xls.original_refs[i], xls.new_refs[i]])
-
-                csv_file.close()
-
-                for filename in wav_list:
-                    if not check_name(ITEM_NEW_PATTERN, os.path.splitext(filename)[0]):
-                        self.logger.write_error(collection.dir, 'Le nom du fichier ' + str(os.path.splitext(filename)[0]) + ' est mal formaté')
-
-            msg = '********************************************************************************'
-            self.logger.write_info(collection.dir, msg[:70])
-
-
-def main():
-    log_file = sys.argv[-1]
-    root_dir = sys.argv[-2]
-    log_tmp = log_file+'.tmp'
-
-    c = CremCheck(root_dir, log_tmp)
-    c.check()
-
-    date = datetime.datetime.now().strftime("%x-%X").replace('/','_')
-    shutil.copy(log_tmp,log_file+'-'+date+'.log')
-    shutil.move(log_tmp,log_file)
-
-if __name__ == '__main__':
-    main()
-
diff --git a/scripts/old/process-waveform-cgi.py b/scripts/old/process-waveform-cgi.py
deleted file mode 100755 (executable)
index 317878b..0000000
+++ /dev/null
@@ -1,222 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (c) 2009-2010 Guillaume Pellerin <yomguy@parisson.com>
-
-# This file is part of TimeSide.
-
-# TimeSide is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-
-# TimeSide is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with TimeSide.  If not, see <http://www.gnu.org/licenses/>.
-
-# Author: Guillaume Pellerin <yomguy@parisson.com>
-
-# for python2.5
-
-version = '0.5'
-
-
-import os
-import sys
-import time
-import shutil
-import datetime
-import timeside
-
-# soon with python2.6
-#from multiprocessing import Process
-
-from django.core.management import setup_environ
-from django.core.files.base import ContentFile
-import cgi
-fs = cgi.FieldStorage()
-
-
-orig_media_dir = '/mnt/awdiomusic/musicbase'
-project_dir = '/mnt/awdio'
-log_file = project_dir + '/logs/process.log'
-sys.path.append('/home/awdio/apps/telemeta-awdio')
-
-
-class GrapherScheme:
-
-    def __init__(self):
-        self.color = 255
-        self.color_scheme = {
-            'waveform': [ # Four (R,G,B) tuples for three main color channels for the spectral centroid method
-                        (self.color,self.color,self.color)
-#                        (0, 0, 0), (0, 0, 0), (0, 0, 0), (0,0,0)
-                        ],
-            'spectrogram': [
-                        (0, 0, 0), (58/4,68/4,65/4), (80/2,100/2,153/2), (90,180,100), (224,224,44), (255,60,30), (255,255,255)
-                        ]}
-
-        # Grapher id
-        self.id = 'waveform_awdio'
-
-        # Width of the image
-        self.width = 1800
-
-        # Height of the image
-        self.height = 233
-
-        # Background color
-        self.bg_color = None
-
-        # Force computation. By default, the class doesn't overwrite existing image files.
-        self.force = False
-        
-        # Nb of threads
-        # FIXME: memory leak for > 1 !
-        self.threads = 1
-
-      
-class TelemetaPreprocessImport(object):
-
-    def __init__(self, root_dir, dest_dir, log_file):
-       from telemeta.cache import TelemetaCache as Cache
-       from telemeta.util.logger import Logger
-       self.media_item_dir = 'items'
-        self.root_dir = root_dir + 'items'
-        self.dest_dir = dest_dir
-        self.threads = 1
-        self.logger = Logger(log_file)
-        self.counter = 0
-        self.force = 0
-        self.cache = Cache(self.dest_dir)
-
-        self.scheme = GrapherScheme()
-        self.width = self.scheme.width
-        self.height = self.scheme.height
-        self.bg_color = self.scheme.bg_color
-        self.color_scheme = self.scheme.color_scheme
-        self.force = self.scheme.force
-        self.threads = self.scheme.threads
-        self.logger = Logger(log_file)
-        self.counter = 0
-        self.collection_name = 'awdio'
-        self.collection = self.set_collection(self.collection_name)
-        
-        self.analyzers = timeside.core.processors(timeside.api.IAnalyzer)
-        self.grapher = timeside.grapher.WaveformAwdio(width=self.width, 
-                                                         height=self.height, 
-                                                         bg_color=self.bg_color, 
-                                                         color_scheme=self.color_scheme)
-        
-
-    def set_collection(self, collection_name):
-        import telemeta.models
-        collections = telemeta.models.media.MediaCollection.objects.filter(code=collection_name)
-        if not collections:
-            c = telemeta.models.media.MediaCollection(code=collection_name)
-            c.title = collection_name
-            c.save()
-            msg = 'added'
-            self.logger.logger.info(collection_name, msg)
-            collection = c
-        else:
-            collection = collections[0]
-        return collection
-
-    def process(self):
-       import telemeta.models
-       keys = fs.keys()
-       if keys[0] == 'file':
-           filename = fs['file'].value
-           media_orig = orig_media_dir + os.sep + filename
-           media = self.root_dir + os.sep + filename
-           
-           if not os.path.exists(media):
-               shutil.copy(media_orig, media)
-               os.system('chmod 644 ' + media)
-            
-            name, ext = os.path.splitext(filename)
-            size = str(self.width) + '_' + str(self.height)
-            image_name = name + '.' + self.scheme.id + '.' + size + '.png'
-            image = self.dest_dir + os.sep + image_name
-            xml = name + '.xml'
-            
-            if not self.cache.exists(image_name) or not self.cache.exists(xml):
-                mess = 'Processing ' + media
-                self.logger.logger.info(mess)
-           
-               print "Content-type: text/plain\n"
-               print mess
-               decoder  = timeside.decoder.FileDecoder(media)
-               pipe = decoder | self.grapher
-               analyzers = []
-               analyzers_sub = []
-               for analyzer in self.analyzers:
-                   subpipe = analyzer()
-                   analyzers_sub.append(subpipe)
-                   pipe = pipe | subpipe
-               pipe.run()
-               
-               mess = 'Rendering ' + image
-               self.logger.logger.info(mess)
-               self.grapher.render(output=image)
-               
-               mess = 'Frames / Pixel = ' + str(self.grapher.graph.samples_per_pixel)
-               self.logger.logger.info(mess)
-               
-               for analyzer in analyzers_sub:
-                   value = analyzer.result()
-                   if analyzer.id() == 'duration':
-                       value = datetime.timedelta(0,value)
-                   analyzers.append({'name':analyzer.name(),
-                           'id':analyzer.id(),
-                           'unit':analyzer.unit(),
-                           'value':str(value)})
-               
-               self.cache.write_analyzer_xml(analyzers, xml)
-               
-               item = telemeta.models.media.MediaItem.objects.filter(code=name)
-                           
-               if not item:
-                   item = telemeta.models.media.MediaItem(collection=self.collection, code=name)
-                   item.title = name
-                   item.file = self.media_item_dir + os.sep + filename
-                   item.save()
-                   msg = 'added item : ' + filename
-                   self.logger.logger.info(self.collection_name, msg)
-
-               pipe = 0
-               decoder = 0
-               
-               print "OK"
-               
-               #except:
-                   #pipe = 0
-                   #decoder = 0
-                   #mess = 'Could NOT process : ' + media
-                   #self.logger.logger.error(mess)
-                   #print mess
-                   
-           else:
-               mess = "Nothing to do with file : " + media
-               self.logger.logger.info(mess)
-               print "Content-type: text/plain\n"
-               print mess
-       
-       else:
-           print "Content-type: text/plain\n"
-           print "No file given !"
-       
-
-if __name__ == '__main__':
-    sys.path.append(project_dir)
-    import settings
-    setup_environ(settings)
-    media_dir = settings.MEDIA_ROOT
-    data_dir = settings.TELEMETA_DATA_CACHE_DIR
-    t = TelemetaPreprocessImport(media_dir, data_dir, log_file)
-    t.process()
diff --git a/scripts/old/telemeta-backup.py b/scripts/old/telemeta-backup.py
deleted file mode 100755 (executable)
index d93a83f..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2007 Samalyse SARL
-
-# This file is part of Telemeta.
-
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU Affero General Public License for more details.
-
-# You should have received a copy of the GNU Affero General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#
-# Author: Olivier Guilyardi <olivier@samalyse.com>
-
-import os
-import sys
-import time
-from django.core.management import setup_environ
-
-def print_usage(toolname):
-    print "Usage: " + toolname + " <project_dir> <backup_dir>"
-    print "  project_dir: the directory of the Django project which hosts Telemeta"
-    print "  backup_dir: the destination backup folder (must exist)"
-
-def write_readme(dest_dir, coll_num):
-    readme = open(dest_dir + "/" + "README", "w")
-    timestr = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
-    readme.write("Telemeta Backup\n\n")
-    readme.write("- date: " + timestr + "\n")
-    readme.write("- number of collections: " + str(coll_num) + "\n\n")
-    readme.close()
-
-def backup(dest_dir):
-    from telemeta.models import MediaCollection
-    from telemeta.backup import CollectionSerializer
-
-    collections = MediaCollection.objects.order_by('id')
-    count = collections.count()
-
-    print "Writing README file..",
-    write_readme(dest_dir, count)
-    print "Done."
-
-    i = 0
-    for collection in collections:
-        if i % 100 == 0:
-            set_dir = dest_dir + ("/collections-%d-%d" % (i+1, i+100))
-            os.mkdir(set_dir)
-        i += 1
-        print "Processing collection %d/%d (%d%%) with id: %s.. " \
-            % (i, count, i*100/count, collection.id),
-        sys.stdout.flush()
-        serializer = CollectionSerializer(collection)
-        serializer.backup(set_dir)
-        print "Done"
-
-def run():
-    if len(sys.argv) != 3:
-        print_usage(os.path.basename(sys.argv[0]))
-        sys.exit(1)
-    else:
-        project_dir = sys.argv[1]
-        backup_dir = sys.argv[2]
-        sys.path.append(project_dir)
-        import settings
-        setup_environ(settings)
-        backup(backup_dir)
-
-if __name__ == '__main__':
-    run()
diff --git a/scripts/old/telemeta-crem-import-alt_ids.py b/scripts/old/telemeta-crem-import-alt_ids.py
deleted file mode 100755 (executable)
index 84c673d..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2011 Guillaume Pellerin
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
-#
-# Author: Guillaume Pellerin <yomguy@parisson.com>
-#
-
-import os
-import sys
-import xlrd
-import logging
-import datetime
-from django.core.management import setup_environ
-from django.core.files.base import ContentFile
-
-class Logger:
-
-    def __init__(self, file):
-        self.logger = logging.getLogger('myapp')
-        self.hdlr = logging.FileHandler(file)
-        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
-        self.hdlr.setFormatter(self.formatter)
-        self.logger.addHandler(self.hdlr)
-        self.logger.setLevel(logging.INFO)
-
-    def write_info(self, prefix, message):
-        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
-
-    def write_error(self, prefix, message):
-        self.logger.error(prefix + ' : ' + message.decode('utf8'))
-
-
-class TelemetaAltIdsImport:
-
-    def __init__(self, xls_file, log_file):
-        self.logger = Logger(log_file)
-        self.xls = xls_file
-        self.row = 0
-
-    def alt_ids_import(self):
-        from telemeta.models import MediaCollection
-        self.book = xlrd.open_workbook(self.xls)
-        self.sheet = self.book.sheet_by_index(0)
-        self.length = len(self.sheet.col(0))-1
-        
-        while True:
-            ids = []
-            self.row += 1
-            row = self.sheet.row(self.row)
-            if self.row == self.length:
-                break
-            collection_id = row[0].value
-            cell_alt_id = row[1]
-            if cell_alt_id.ctype == 1:
-                for i in range(1,len(row)):
-                    cell_alt_id = row[i]
-                    if cell_alt_id.ctype == 1:
-                        ids.append(cell_alt_id.value)
-                alt_ids = ' '.join(ids)
-                try:
-                    collection = MediaCollection.objects.get(old_code=collection_id)
-                    collection.alt_ids = alt_ids
-                    collection.save()
-                    print self.row, collection_id, alt_ids
-                except:
-                    msg = 'No collection found for this id'
-                    self.logger.write_error(collection_id, msg)
-                    continue
-            
-                
-def print_usage(tool_name):
-    print "Usage: "+tool_name+" <project_dir> <xls_file> <log_file>"
-    print "  project_dir: the directory of the Django project which hosts Telemeta"
-    print "  xls_file: the excel file containing all collection alt_ids"
-
-def run():
-    if len(sys.argv) < 3:
-        print_usage(os.path.basename(sys.argv[0]))
-        sys.exit(1)
-    else:
-        project_dir = sys.argv[-3]
-        xls_file = sys.argv[-2]
-        log_file = sys.argv[-1]
-        sys.path.append(project_dir)
-        import settings
-        setup_environ(settings)
-        t = TelemetaAltIdsImport(xls_file, log_file)
-        t.alt_ids_import()
-
-if __name__ == '__main__':
-    run()
diff --git a/scripts/old/telemeta-crem-import-test.py b/scripts/old/telemeta-crem-import-test.py
deleted file mode 100755 (executable)
index 021e9a2..0000000
+++ /dev/null
@@ -1,211 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2010 Guillaume Pellerin
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
-#
-# Author: Guillaume Pellerin <yomguy@parisson.com>
-#
-
-import os
-import sys
-import csv
-import logging
-import datetime
-from django.core.management import setup_environ
-from django.core.files.base import ContentFile
-
-
-class Logger:
-
-    def __init__(self, file):
-        self.logger = logging.getLogger('myapp')
-        self.hdlr = logging.FileHandler(file)
-        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
-        self.hdlr.setFormatter(self.formatter)
-        self.logger.addHandler(self.hdlr)
-        self.logger.setLevel(logging.INFO)
-
-    def info(self, prefix, message):
-        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
-
-    def error(self, prefix, message):
-        self.logger.error(prefix + ' : ' + message.decode('utf8'))
-
-
-class TelemetaWavImport:
-
-    def __init__(self, source_dir, log_file, pattern, domain):
-        from django.contrib.auth.models import User
-        self.logger = Logger(log_file)
-        self.source_dir = source_dir
-        self.collections = os.listdir(self.source_dir)
-        self.pattern = pattern
-        self.user = User.objects.filter(username='admin')[0]
-        self.domain = domain
-
-    def write_file(self, item, wav_file, overwrite=False):
-        filename = wav_file.split(os.sep)[-1]
-        if os.path.exists(wav_file):
-            if not item.file or overwrite:
-#                f = open(wav_file, 'r')
-#                file_content = ContentFile(f.read())
-#                item.file.save(filename, file_content)
-#                f.close()
-                item.save()
-                item.set_revision(self.user)
-            else:
-                msg = item.code + ' : fichier ' + item.file.name + ' deja inscrit dans la base de donnees !'
-                self.logger.error('item', msg)
-        else:
-            msg = item.code + ' : fichier audio ' + filename + ' inexistant dans le dossier !'
-            self.logger.error('item', msg)
-            
-    def wav_import(self):
-        from telemeta.models import MediaItem,  MediaCollection
-        
-        collections = []
-        for collection in self.collections:
-            collection_dir = self.source_dir + os.sep + collection
-            collection_files = os.listdir(collection_dir)
-            
-            
-            if not '/.' in collection_dir and self.pattern in collection_dir:
-                collection_name = collection.split(os.sep)[-1]
-                collections.append(collection_name)
-                c = MediaCollection.objects.filter(code=collection_name)
-                
-                if not c and collection + '.csv' in collection_files:
-                    msg = collection + ' collection NON présente dans la base de données, SORTIE '
-                    self.logger.error(collection, msg)
-                    sys.exit(msg)
-                elif not c:
-                    msg = 'collection NON présente dans la base de données, CREATION '
-                    self.logger.info(collection, msg)
-                    c = MediaCollection(code=collection_name)
-                    c.save()
-                    c.set_revision(self.user)
-                else:
-                    msg = 'collection présente dans la base de données, SELECTION'
-                    self.logger.info(collection, msg)
-                    
-        for collection in collections:
-            collection_dir = self.source_dir + os.sep + collection
-            collection_name = collection
-            collection_files = os.listdir(collection_dir)
-            msg = '************************ ' + collection + ' ******************************'
-            self.logger.info(collection, msg[:70])
-            overwrite = True
-            csv_file = ''
-            rows = {}
-            
-            if collection + '.csv' in collection_files:
-                csv_file = self.source_dir + os.sep + collection + os.sep + collection + '.csv'
-                csv_data = csv.reader(open(csv_file), delimiter=';')
-                for row in csv_data:
-                    rows[row[1].strip()] = row[0].strip()
-                msg = collection + ' import du fichier CSV de la collection'
-                self.logger.info(collection, msg[:70])
-            else:
-                msg = collection + ' pas de fichier CSV dans la collection'
-                self.logger.info(collection, msg[:70])
-            
-            c = MediaCollection.objects.filter(code=collection_name)
-            if not c:
-                c = MediaCollection(code=collection_name)
-                c.save()
-                msg = ' collection NON présente dans la BDD, CREATION '
-                self.logger.info(c.code, msg)
-            else:
-                c = c[0]
-                msg = ' id = '+str(c.id)
-                self.logger.info(c.code, msg)
-            
-            audio_files = []
-            for file in collection_files:
-                ext = ['WAV', 'wav']
-                if file.split('.')[-1] in ext:
-                    audio_files.append(file)
-            
-            audio_files.sort()
-            nb_items = c.items.count()
-            counter = 0
-            
-            for file in audio_files:
-                code = file.split('.')[0]
-                wav_file = self.source_dir + os.sep + collection + os.sep + file
-                
-                if len(audio_files) <= nb_items:
-                    items = MediaItem.objects.filter(code=code)
-                    
-                    old_ref = ''
-                    if code in rows and not items:
-                        old_ref = rows[code]
-                        items = MediaItem.objects.filter(old_code=old_ref)
-                        
-                    if items:
-                        item = items[0]
-                        msg = code + ' : ' + item.old_code + ' : Cas 1 ou 2 : id = ' + str(item.id)
-                        self.logger.info('item', msg)
-                        item.code = code
-                        item.save()
-                    else:
-                        item = MediaItem(code=code, collection=c)
-                        msg = code + ' : ' + old_ref + ' : Cas 1 ou 2 : item NON présent dans la base de données, CREATION'
-                        self.logger.info('item', msg)
-                    
-                    self.write_file(item, wav_file, overwrite)
-                    
-                elif nb_items == 1 and len(audio_files) > 1:
-                    if counter == 0:
-                        msg = code + ' : Cas 3a : item n°01 présent dans la base de données, PASSE'
-                        self.logger.info('item', msg)
-                    else:
-                        item = MediaItem(code=code, collection=c)
-                        msg = code + ' : Cas 3a : item NON présent dans la base de données, CREATION'
-                        self.logger.info('item', msg)
-                        self.write_file(item, wav_file, overwrite)
-                
-                elif nb_items > 1 and nb_items < len(audio_files):
-                    msg = code + ' : Cas 3b : nb items < nb de fichiers audio, PAS de creation'
-                    self.logger.info('item', msg)
-
-                counter += 1
-        
-        msg = 'Liste des URLs des collections importées :'
-        self.logger.info('INFO', msg)
-        for collection in collections:
-            msg = 'http://'+self.domain+'/collections/'+collection
-            self.logger.info(collection, msg)
-            
-        
-def print_usage(tool_name):
-    print "Usage: "+tool_name+" <project_dir> <source_dir> <pattern> <log_file> <domain>"
-    print "  project_dir: the directory of the Django project which hosts Telemeta"
-    print "  source_dir: the directory containing the wav files to include"
-    print "  pattern: a pattern to match the collection names"
-    print "  log_file: a log file to write logs"
-    print "  domain: root domain for collections"
-
-def run():
-    if len(sys.argv) < 3:
-        print_usage(os.path.basename(sys.argv[0]))
-        sys.exit(1)
-    else:
-        project_dir = sys.argv[-5]
-        source_dir = sys.argv[-4]
-        pattern = sys.argv[-3]
-        log_file = sys.argv[-2]
-        url = sys.argv[-1]
-        sys.path.append(project_dir)
-        import settings
-        setup_environ(settings)
-        t = TelemetaWavImport(source_dir, log_file, pattern, url)
-        t.wav_import()
-
-if __name__ == '__main__':
-    run()
diff --git a/scripts/old/telemeta-crem-import.py b/scripts/old/telemeta-crem-import.py
deleted file mode 100755 (executable)
index dcdf5c1..0000000
+++ /dev/null
@@ -1,211 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2010 Guillaume Pellerin
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
-#
-# Author: Guillaume Pellerin <yomguy@parisson.com>
-#
-
-import os
-import sys
-import csv
-import logging
-import datetime
-from django.core.management import setup_environ
-from django.core.files.base import ContentFile
-
-
-class Logger:
-
-    def __init__(self, file):
-        self.logger = logging.getLogger('myapp')
-        self.hdlr = logging.FileHandler(file)
-        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
-        self.hdlr.setFormatter(self.formatter)
-        self.logger.addHandler(self.hdlr)
-        self.logger.setLevel(logging.INFO)
-
-    def info(self, prefix, message):
-        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
-
-    def error(self, prefix, message):
-        self.logger.error(prefix + ' : ' + message.decode('utf8'))
-
-
-class TelemetaWavImport:
-
-    def __init__(self, source_dir, log_file, pattern, domain):
-        from django.contrib.auth.models import User
-        self.logger = Logger(log_file)
-        self.source_dir = source_dir
-        self.collections = os.listdir(self.source_dir)
-        self.pattern = pattern
-        self.user = User.objects.filter(username='admin')[0]
-        self.domain = domain
-
-    def write_file(self, item, wav_file, overwrite=False):
-        filename = wav_file.split(os.sep)[-1]
-        if os.path.exists(wav_file):
-            if not item.file or overwrite:
-                f = open(wav_file, 'r')
-                file_content = ContentFile(f.read())
-                item.file.save(filename, file_content)
-                f.close()
-                item.save()
-                item.set_revision(self.user)
-            else:
-                msg = item.code + ' : fichier ' + item.file.name + ' deja inscrit dans la base de donnees !'
-                self.logger.error('item', msg)
-        else:
-            msg = item.code + ' : fichier audio ' + filename + ' inexistant dans le dossier !'
-            self.logger.error('item', msg)
-
-    def wav_import(self):
-        from telemeta.models import MediaItem,  MediaCollection
-
-        collections = []
-        for collection in self.collections:
-            collection_dir = self.source_dir + os.sep + collection
-            collection_files = os.listdir(collection_dir)
-
-
-            if not '/.' in collection_dir and self.pattern in collection_dir:
-                collection_name = collection.split(os.sep)[-1]
-                collections.append(collection_name)
-                c = MediaCollection.objects.filter(code=collection_name)
-
-                if not c and collection + '.csv' in collection_files:
-                    msg = collection + ' collection NON présente dans la base de données, SORTIE '
-                    self.logger.error(collection, msg)
-                    sys.exit(msg)
-                elif not c:
-                    msg = 'collection NON présente dans la base de données, CREATION '
-                    self.logger.info(collection, msg)
-                    c = MediaCollection(code=collection_name, title=collection_name)
-                    c.save()
-                    c.set_revision(self.user)
-                else:
-                    msg = 'collection présente dans la base de données, SELECTION'
-                    self.logger.info(collection, msg)
-
-        for collection in collections:
-            collection_dir = self.source_dir + os.sep + collection
-            collection_name = collection
-            collection_files = os.listdir(collection_dir)
-            msg = '************************ ' + collection + ' ******************************'
-            self.logger.info(collection, msg[:70])
-            overwrite = True
-            csv_file = ''
-            rows = {}
-
-            if collection + '.csv' in collection_files:
-                csv_file = self.source_dir + os.sep + collection + os.sep + collection + '.csv'
-                csv_data = csv.reader(open(csv_file), delimiter=';')
-                for row in csv_data:
-                    rows[row[1].strip()] = row[0].strip()
-                msg = collection + ' import du fichier CSV de la collection'
-                self.logger.info(collection, msg[:70])
-            else:
-                msg = collection + ' pas de fichier CSV dans la collection'
-                self.logger.info(collection, msg[:70])
-
-            c = MediaCollection.objects.filter(code=collection_name)
-            if not c:
-                c = MediaCollection(code=collection_name)
-                c.save()
-                msg = ' collection NON présente dans la BDD, CREATION '
-                self.logger.info(c.code, msg)
-            else:
-                c = c[0]
-                msg = ' id = '+str(c.id)
-                self.logger.info(c.code, msg)
-
-            audio_files = []
-            for file in collection_files:
-                ext = ['WAV', 'wav']
-                if file.split('.')[-1] in ext and file[0] != '.':
-                    audio_files.append(file)
-
-            audio_files.sort()
-            nb_items = c.items.count()
-            counter = 0
-
-            for file in audio_files:
-                code = file.split('.')[0]
-                wav_file = self.source_dir + os.sep + collection + os.sep + file
-
-                if len(audio_files) <= nb_items:
-                    items = MediaItem.objects.filter(code=code)
-
-                    old_ref = ''
-                    if code in rows and not items:
-                        old_ref = rows[code]
-                        items = MediaItem.objects.filter(old_code=old_ref)
-
-                    if items:
-                        item = items[0]
-                        msg = code + ' : ' + item.old_code + ' : Cas 1 ou 2 : id = ' + str(item.id)
-                        self.logger.info('item', msg)
-                        item.code = code
-                        item.save()
-                    else:
-                        item = MediaItem(code=code, collection=c)
-                        msg = code + ' : ' + old_ref + ' : Cas 1 ou 2 : item NON présent dans la base de données, CREATION'
-                        self.logger.info('item', msg)
-
-                    self.write_file(item, wav_file, overwrite)
-
-                elif nb_items == 1 and len(audio_files) > 1:
-                    if counter == 0:
-                        msg = code + ' : Cas 3a : item n°01 présent dans la base de données, PASSE'
-                        self.logger.info('item', msg)
-                    else:
-                        item = MediaItem(code=code, collection=c)
-                        msg = code + ' : Cas 3a : item NON présent dans la base de données, CREATION'
-                        self.logger.info('item', msg)
-                        self.write_file(item, wav_file, overwrite)
-
-                elif nb_items > 1 and nb_items < len(audio_files):
-                    msg = code + ' : Cas 3b : nb items < nb de fichiers audio, PAS de creation'
-                    self.logger.info('item', msg)
-
-                counter += 1
-
-        msg = 'Liste des URLs des collections importées :'
-        self.logger.info('INFO', msg)
-        for collection in collections:
-            msg = 'http://'+self.domain+'/archives/collections/'+collection
-            self.logger.info(collection, msg)
-
-
-def print_usage(tool_name):
-    print "Usage: "+tool_name+" <project_dir> <source_dir> <pattern> <log_file> <domain>"
-    print "  project_dir: the directory of the Django project which hosts Telemeta"
-    print "  source_dir: the directory containing the wav files to include"
-    print "  pattern: a pattern to match the collection names"
-    print "  log_file: a log file to write logs"
-    print "  domain: root domain for collections"
-
-def run():
-    if len(sys.argv) < 3:
-        print_usage(os.path.basename(sys.argv[0]))
-        sys.exit(1)
-    else:
-        project_dir = sys.argv[-5]
-        source_dir = sys.argv[-4]
-        pattern = sys.argv[-3]
-        log_file = sys.argv[-2]
-        url = sys.argv[-1]
-        sys.path.append(project_dir)
-        import settings
-        setup_environ(settings)
-        t = TelemetaWavImport(source_dir, log_file, pattern, url)
-        t.wav_import()
-
-if __name__ == '__main__':
-    run()
diff --git a/scripts/old/telemeta-media-link.py b/scripts/old/telemeta-media-link.py
deleted file mode 100755 (executable)
index 118fe95..0000000
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2010 Guillaume Pellerin
-# All rights reserved.
-#
-# This software is licensed as described in the file COPYING, which
-# you should have received as part of this distribution. The terms
-# are also available at http://svn.parisson.org/telemeta/TelemetaLicense.
-#
-# Author: Guillaume Pellerin <yomguy@parisson.com>
-#
-
-import os
-import re
-import sys
-import logging
-import datetime
-import timeside
-from django.core.management import setup_environ
-from django.core.files.base import ContentFile
-
-mapping = {
-             'title': 'title',
-             'album': 'collection',
-             'date': 'recorded_from_date',
-             'artist': 'author',
-             'track-number': 'track',
-             'encoder': 'comment',
-             'genre': 'generic_style',
-             'audio-codec': 'comment',
-             'container-format': 'comment',
-             }
-
-class Logger:
-
-    def __init__(self, file):
-        self.logger = logging.getLogger('myapp')
-        self.hdlr = logging.FileHandler(file)
-        self.formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
-        self.hdlr.setFormatter(self.formatter)
-        self.logger.addHandler(self.hdlr)
-        self.logger.setLevel(logging.INFO)
-
-    def write_info(self, prefix, message):
-        self.logger.info(' ' + prefix + ' : ' + message.decode('utf8'))
-
-    def write_error(self, prefix, message):
-        self.logger.error(prefix + ' : ' + message.decode('utf8'))
-
-
-class TelemetaMediaImport:
-
-    def __init__(self, media_dir, log_file):
-        self.logger = Logger(log_file)
-        self.media_dir = media_dir
-        self.medias = self.get_medias()
-    
-    def get_medias(self):
-        os.chdir(self.media_dir)
-        file_list = []
-        for root, dirs, files in os.walk('.'):
-            for file in files:
-                path = root + os.sep + file
-                if not os.sep+'.' in path:
-                    file_list.append({'root': root, 'file': file})
-        return file_list
-        
-    def set_collection(self, collection_name):
-        if not collection_name:
-            collection_name = 'Unkown'
-        code = collection_name.replace(' ','_')
-        code = re.sub(r'\W+', '_', code)
-        from telemeta.models.media import MediaCollection
-        collections = MediaCollection.objects.filter(code=code)
-        if not collections:
-            collection = MediaCollection(code=code,title=collection_name)
-            collection.save()
-            msg = 'created'
-            self.logger.write_info('collection ' + collection_name, msg)
-        else:
-            collection = collections[0]
-        return collection
-        
-    def media_import(self):
-        from telemeta.models.media import MediaItem
-        for media in self.medias:
-            path = media['root'] + os.sep + media['file']
-            print 'checking ' + path
-            filename,  ext = os.path.splitext(media['file'])
-            item = MediaItem.objects.filter(code=filename)
-            if not item:
-                print 'importing ' + path
-                decoder = timeside.decoder.FileDecoder(path)
-                try:
-                    metadata = decoder.metadata()
-                    print metadata
-                    collection = self.set_collection(metadata['album'])
-                    item = MediaItem(collection=collection)
-                    item.code = re.sub(r'\W+', '_', metadata['title'])
-                    for tag in mapping.keys():
-                        try:
-                            if tag == 'date':
-                                date = metadata[tag].split(' ')[1].split('/')
-                                metadata[tag] = date[2]+'-'+date[1]+'-'+date[0]    
-                            if mapping[tag] == 'comment':
-                                item[mapping[tag]] = item[mapping[tag]] + '\n' + metadata[tag]
-                            else:
-                                item[mapping[tag]] = metadata[tag]
-                        except:
-                            continue
-                    item.file = path
-                    item.save()
-                    msg = 'added item : ' + path
-                    self.logger.write_info(collection.code, msg)
-                except:
-                    continue
-                
-
-def run():
-    project_dir = sys.argv[-2]
-    log_file = sys.argv[-1]
-    sys.path.append(project_dir)
-    import settings
-    setup_environ(settings)
-    media_dir = settings.MEDIA_ROOT
-    t = TelemetaMediaImport(media_dir, log_file)
-    t.media_import()
-
-if __name__ == '__main__':
-    run()
diff --git a/scripts/sql/backup_db.sh b/scripts/sql/backup_db.sh
deleted file mode 100755 (executable)
index cfb5cff..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-
-DIR=/srv/backup/
-NOW=$(date +"%Y-%m-%d-%T")
-FILE=telemeta-$NOW.sql.gz
-
-echo "Backuping: "$FILE
-
-mysqldump -hdb -uroot -p$MYSQL_ROOT_PASSWORD telemeta | gzip > $DIR$FILE
-
-rename 's/\:/\_/g' $DIR$FILE
-
-echo "Done!"
diff --git a/scripts/sql/convert_myisam_to_innodb.sql b/scripts/sql/convert_myisam_to_innodb.sql
deleted file mode 100644 (file)
index 91e36d5..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-ALTER TABLE `vernacular_styles` ENGINE=InnoDB;
-ALTER TABLE `users` ENGINE=InnoDB;
-ALTER TABLE `thumbnail_kvstore` ENGINE=InnoDB;
-ALTER TABLE `telemeta_media_transcoded` ENGINE=InnoDB;
-ALTER TABLE `tape_width` ENGINE=InnoDB;
-ALTER TABLE `tape_wheel_diameter` ENGINE=InnoDB;
-ALTER TABLE `tape_vendor` ENGINE=InnoDB;
-ALTER TABLE `tape_speed` ENGINE=InnoDB;
-ALTER TABLE `tape_length` ENGINE=InnoDB;
-ALTER TABLE `south_migrationhistory` ENGINE=InnoDB;
-ALTER TABLE `search_criteria` ENGINE=InnoDB;
-ALTER TABLE `searches_criteria` ENGINE=InnoDB;
-ALTER TABLE `searches` ENGINE=InnoDB;
-ALTER TABLE `rights` ENGINE=InnoDB;
-ALTER TABLE `revisions` ENGINE=InnoDB;
-ALTER TABLE `recording_contexts` ENGINE=InnoDB;
-ALTER TABLE `publishing_status` ENGINE=InnoDB;
-ALTER TABLE `publisher_collections` ENGINE=InnoDB;
-ALTER TABLE `publishers` ENGINE=InnoDB;
-ALTER TABLE `profiles` ENGINE=InnoDB;
-ALTER TABLE `playlist_resources` ENGINE=InnoDB;
-ALTER TABLE `playlists` ENGINE=InnoDB;
-ALTER TABLE `physical_formats` ENGINE=InnoDB;
-ALTER TABLE `original_format` ENGINE=InnoDB;
-ALTER TABLE `original_channel_number` ENGINE=InnoDB;
-ALTER TABLE `organization` ENGINE=InnoDB;
-ALTER TABLE `metadata_writers` ENGINE=InnoDB;
-ALTER TABLE `metadata_authors` ENGINE=InnoDB;
-ALTER TABLE `media_type` ENGINE=InnoDB;
-ALTER TABLE `media_transcoding` ENGINE=InnoDB;
-ALTER TABLE `media_status` ENGINE=InnoDB;
-ALTER TABLE `media_parts` ENGINE=InnoDB;
-ALTER TABLE `media_markers` ENGINE=InnoDB;
-ALTER TABLE `media_item_related` ENGINE=InnoDB;
-ALTER TABLE `media_item_performances` ENGINE=InnoDB;
-ALTER TABLE `media_item_keywords` ENGINE=InnoDB;
-ALTER TABLE `media_item_identifier` ENGINE=InnoDB;
-ALTER TABLE `media_items` ENGINE=InnoDB;
-ALTER TABLE `media_formats` ENGINE=InnoDB;
-ALTER TABLE `media_fonds_related` ENGINE=InnoDB;
-ALTER TABLE `media_fonds_children` ENGINE=InnoDB;
-ALTER TABLE `media_fonds` ENGINE=InnoDB;
-ALTER TABLE `media_corpus_related` ENGINE=InnoDB;
-ALTER TABLE `media_corpus_children` ENGINE=InnoDB;
-ALTER TABLE `media_corpus` ENGINE=InnoDB;
-ALTER TABLE `media_collection_related` ENGINE=InnoDB;
-ALTER TABLE `media_collection_identifier` ENGINE=InnoDB;
-ALTER TABLE `media_collections` ENGINE=InnoDB;
-ALTER TABLE `media_analysis` ENGINE=InnoDB;
-ALTER TABLE `location_types` ENGINE=InnoDB;
-ALTER TABLE `location_relations` ENGINE=InnoDB;
-ALTER TABLE `location_aliases` ENGINE=InnoDB;
-ALTER TABLE `locations` ENGINE=InnoDB;
-ALTER TABLE `legal_rights` ENGINE=InnoDB;
-ALTER TABLE `languages` ENGINE=InnoDB;
-ALTER TABLE `jqchat_room` ENGINE=InnoDB;
-ALTER TABLE `jqchat_message` ENGINE=InnoDB;
-ALTER TABLE `ipauth_range` ENGINE=InnoDB;
-ALTER TABLE `instrument_relations` ENGINE=InnoDB;
-ALTER TABLE `instrument_alias_relations` ENGINE=InnoDB;
-ALTER TABLE `instrument_aliases` ENGINE=InnoDB;
-ALTER TABLE `instruments` ENGINE=InnoDB;
-ALTER TABLE `identifier_type` ENGINE=InnoDB;
-ALTER TABLE `googletools_siteverificationcode` ENGINE=InnoDB;
-ALTER TABLE `googletools_analyticscode` ENGINE=InnoDB;
-ALTER TABLE `generic_styles` ENGINE=InnoDB;
-ALTER TABLE `ethnic_group_aliases` ENGINE=InnoDB;
-ALTER TABLE `ethnic_groups` ENGINE=InnoDB;
-ALTER TABLE `django_site` ENGINE=InnoDB;
-ALTER TABLE `django_session` ENGINE=InnoDB;
-ALTER TABLE `django_content_type` ENGINE=InnoDB;
-ALTER TABLE `django_admin_log` ENGINE=InnoDB;
-ALTER TABLE `copy_type` ENGINE=InnoDB;
-ALTER TABLE `context_keywords` ENGINE=InnoDB;
-ALTER TABLE `auth_user_user_permissions` ENGINE=InnoDB;
-ALTER TABLE `auth_user_groups` ENGINE=InnoDB;
-ALTER TABLE `auth_user` ENGINE=InnoDB;
-ALTER TABLE `auth_permission` ENGINE=InnoDB;
-ALTER TABLE `auth_message` ENGINE=InnoDB;
-ALTER TABLE `auth_group_permissions` ENGINE=InnoDB;
-ALTER TABLE `auth_group` ENGINE=InnoDB;
-ALTER TABLE `ad_conversions` ENGINE=InnoDB;
-ALTER TABLE `acquisition_modes` ENGINE=InnoDB;
diff --git a/scripts/sql/drop_timeside.sql b/scripts/sql/drop_timeside.sql
deleted file mode 100644 (file)
index adecbcd..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-BEGIN;
-
-DROP TABLE IF EXISTS `timeside_results`;
-DROP TABLE IF EXISTS `timeside_selections_items`;
-DROP TABLE IF EXISTS `timeside_items`;
-DROP TABLE IF EXISTS `timeside_tasks`;
-DROP TABLE IF EXISTS `timeside_selections_selections`;
-DROP TABLE IF EXISTS `timeside_selections`;   
-DROP TABLE IF EXISTS `timeside_experiences_presets`;
-DROP TABLE IF EXISTS `timeside_presets`;
-DROP TABLE IF EXISTS `timeside_experiences_experiences`;
-DROP TABLE IF EXISTS `timeside_experiences`;
-DROP TABLE IF EXISTS `timeside_processors`;
-
-COMMIT;
diff --git a/scripts/sql/fix_contentttypes.sql b/scripts/sql/fix_contentttypes.sql
deleted file mode 100644 (file)
index 0d2c76b..0000000
+++ /dev/null
@@ -1 +0,0 @@
-alter table django_content_type drop column name;
diff --git a/scripts/sql/import_sql.sh b/scripts/sql/import_sql.sh
deleted file mode 100755 (executable)
index 10c0ca4..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-
-if [[ $# -ne 4 ]]; then
-       echo "Usage: $0 <username> <password> <database> </path/to/sql_file.sql.gz>"
-       exit 1
-fi
-
-echo "=> Starting MySQL Server"
-/usr/bin/mysqld_safe > /dev/null 2>&1 &
-PID=$!
-
-RET=1
-while [[ RET -ne 0 ]]; do
-    echo "=> Waiting for confirmation of MySQL service startup"
-    sleep 5
-    mysql -u"$1" -p"$2" -e "status" > /dev/null 2>&1
-RET=$?
-done
-
-echo "   Started with PID ${PID}"
-
-echo "=> Importing SQL file"
-gunzip -c "$4" | mysql -u"$1" -p"$2" "$3"
-
-echo "=> Stopping MySQL Server"
-mysqladmin -u"$1" -p"$2" shutdown
-
-echo "=> Done!"
diff --git a/scripts/sql/restore_db.sh b/scripts/sql/restore_db.sh
deleted file mode 100755 (executable)
index 8a8fd6b..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-
-DIR=/srv/backup/
-FILE=`ls -t $DIR/*.sql* | head -1`
-
-echo "Restoring: "$FILE
-
-if [[ $FILE == *".gz" ]]; then
-    gunzip < $FILE | mysql -hdb -uroot -p$MYSQL_ROOT_PASSWORD telemeta
-else
-    mysql -hdb -uroot -p$MYSQL_ROOT_PASSWORD telemeta < $FILE
-fi
-
-echo "Done!"
diff --git a/scripts/transcode/create_thumbs.py b/scripts/transcode/create_thumbs.py
deleted file mode 100755 (executable)
index dc3fd20..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/python
-
-import os, sys, string
-import logging
-
-class Logger:
-    """A logging object"""
-
-    def __init__(self, file):
-        self.logger = logging.getLogger('myapp')
-        self.hdlr = logging.FileHandler(file)
-        self.formatter = logging.Formatter('%(message)s')
-        self.hdlr.setFormatter(self.formatter)
-        self.logger.addHandler(self.hdlr)
-        self.logger.setLevel(logging.INFO)
-
-log_file = 'thumbs.log'
-logger = Logger(log_file)
-root_dir = sys.argv[-1]
-args = sys.argv[1:-1]
-source_format = 'webm'
-done = []
-preview_tc = '00:00:05'
-
-if os.path.exists(log_file):
-    f = open(log_file, 'r')
-    for line in f.readlines():
-        done.append(line[:-1])
-    f.close()
-
-for root, dirs, files in os.walk(root_dir):
-    for file in files:
-        path = os.path.abspath(root + os.sep + file)
-        name, ext = os.path.splitext(file)
-        if ext[1:] == source_format:
-            dest = os.path.abspath(root + os.sep + name + '.png')
-            if not dest in done or '--force' in args:
-                command = 'ffmpeg -ss '+ preview_tc + ' -i ' + path + '  -y ' + dest
-                os.system(command)
-                logger.logger.info(dest)
-
-print "DONE!"
diff --git a/scripts/transcode/remux_fix_media.py b/scripts/transcode/remux_fix_media.py
deleted file mode 100755 (executable)
index 39cfd9f..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/python
-
-import os, sys, psutil
-import datetime
-from ebml.utils.ebml_data import *
-
-class FixCheckMedia(object):
-
-    def __init__(self, dir, tmp_dir):
-        self.dir = dir
-        self.tmp_dir = tmp_dir
-        if not os.path.exists(self.tmp_dir):
-            os.makedirs(self.tmp_dir)
-
-    def process(self):
-        webm_fixed_log = 'webm.fixed'
-        webm_tofix_log = 'webm.tofix'
-        mp3_fixed_log = 'mp3.fixed'
-        mp3_tofix_log = 'mp3.tofix'
-
-        for root, dirs, files in os.walk(self.dir):
-            for filename in files:
-                source = root + os.sep + filename
-                name = os.path.splitext(filename)[0]
-                ext = os.path.splitext(filename)[1][1:]
-
-                if ext == 'webm' and os.path.getsize(source):
-                    dir_files = os.listdir(root)
-
-                    if not webm_fixed_log in dir_files:
-                        print source
-                        self.fix_webm(source)
-                        f = open(root + os.sep + webm_fixed_log, 'w')
-                        f.close()
-                        if os.path.exists(root + os.sep + webm_tofix_log):
-                            os.remove(root + os.sep + webm_tofix_log)
-
-                    if mp3_tofix_log in dir_files or not mp3_fixed_log in dir_files:
-                        for file in dir_files:
-                            dest_ext = os.path.splitext(file)[1][1:]
-                            if dest_ext == 'mp3':
-                                dest = root + os.sep + file
-                                print dest
-                                self.fix_mp3(source, dest)
-                                f = open(root + os.sep + mp3_fixed_log, 'w')
-                                f.close()
-                                if os.path.exists(root + os.sep + mp3_tofix_log):
-                                    os.remove(root + os.sep + mp3_tofix_log)
-                                #break
-
-
-    def hard_fix_webm(self, path):
-        try:
-            tmp_file = self.tmp_dir + 'out.webm '
-            command = 'ffmpeg -loglevel 0 -i "'+ path + '" -vcodec libvpx -vb 500k -acodec libvorbis -aq 7 -f webm -y "' + tmp_file + '" > /dev/null'
-            print command
-            os.system(command)
-            command = 'mv '  + tmp_file + path
-            os.system(command)
-        except:
-            pass
-
-
-    def fix_webm(self, path):
-        try:
-            tmp_file = self.tmp_dir + 'out.webm'
-            command = '/usr/local/bin/ffmpeg -loglevel 0 -i "' + path + '" -vcodec copy -acodec copy -f webm -y "' + tmp_file + '" > /dev/null'
-            print command
-            os.system(command)
-            ebml_obj = EBMLData(tmp_file)
-            offset = ebml_obj.get_first_cluster_seconds()
-            command = '/usr/local/bin/ffmpeg -loglevel 0 -ss ' + str(offset) + ' -i "' + tmp_file + '" -vcodec copy -acodec copy -f webm -y "' + path + '" > /dev/null'
-            print command
-            os.system(command)
-        except:
-            pass
-
-    def fix_mp3(self, source, path):
-        try:
-            command = '/usr/local/bin/ffmpeg -loglevel 0 -i "'+ source + '" -vn -aq 6 -y "' + path + '" > /dev/null'
-            print command
-            os.system(command)
-        except:
-            pass
-
-def get_pids(name, args=None):
-    """Get a process pid filtered by arguments and uid"""
-    pids = []
-    for proc in psutil.process_iter():
-        if proc.cmdline:
-            if name == proc.name:
-                if args:
-                    if args in proc.cmdline:
-                        pids.append(proc.pid)
-                else:
-                    pids.append(proc.pid)
-    return pids
-
-dir = sys.argv[-2]
-tmp_dir = sys.argv[-1]
-
-path =  os.path.abspath(__file__)
-pids = get_pids('python2.6',args=path)
-
-print datetime.datetime.now()
-if len(pids) <= 1:
-    print 'starting process...'
-    f = FixCheckMedia(dir, tmp_dir)
-    f.process()
-    print 'process finished.\n'
-else:
-    print 'already started !\n'
-
diff --git a/scripts/transcode/transcode.py b/scripts/transcode/transcode.py
deleted file mode 100755 (executable)
index efaa113..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/usr/bin/python
-
-import os, sys, string
-import logging
-
-
-class Logger:
-    """A logging object"""
-
-    def __init__(self, file):
-        self.logger = logging.getLogger('myapp')
-        self.hdlr = logging.FileHandler(file)
-        self.formatter = logging.Formatter('%(asctime)s %(message)s')
-        self.hdlr.setFormatter(self.formatter)
-        self.logger.addHandler(self.hdlr)
-        self.logger.setLevel(logging.INFO)
-
-
-class TelemetaTranscode(object):
-    """docstring for TelemetaTranscode"""
-
-    threads = 4
-    source_formats = ['webm', 'mp4']
-    dest_formats = {
-                   'mp3' : '-vn -acodec libmp3lame -aq 6',
-                   'ogg' : '-vn -acodec libvorbis -aq 6',
-                   'mp4' : '-vcodec libx264 -threads ' + str(threads) + \
-                           ' -c:v libx264 -crf 17 -maxrate 1100k -bufsize 1835k -acodec libfaac -ab 96k',
-                   'png' : '',
-                   'webm' : '-vcodec libvpx -threads ' + str(threads) + \
-                           ' -c:v libvpx -crf 17 -b:v 1100k',
-                  }
-
-
-    def __init__(self, args):
-        self.args = args
-        self.log_file = args[-1]
-        self.root_dir = args[-2]
-        self.logger = Logger(self.log_file)
-
-
-    def get_ext_in_dir(self, extension, root):
-        files = os.listdir(root)
-        exts = []
-        for f in files:
-            name, ext = os.path.splitext(f)
-            ext = ext[1:]
-            if not ext in exts:
-                exts.append(ext)
-        return extension in exts
-
-    def run(self):
-        for root, dirs, files in os.walk(self.root_dir):
-            for file in files:
-                path = os.path.abspath(root + os.sep + file)
-                name, ext = os.path.splitext(file)
-                ext = ext[1:]
-                if ext in self.source_formats:
-                    for format, ffmpeg_args in self.dest_formats.iteritems():
-                        local_file = name + '.' + format
-                        dest = os.path.abspath(root + os.sep + local_file)
-                        local_files = os.listdir(root)
-                        if not (local_file in local_files or self.get_ext_in_dir(format, root)) or '--force' in self.args:
-                            if ext == 'webm' and format == 'ogg':
-                                ffmpeg_args = '-vn -acodec copy'
-                            command = 'ffmpeg -loglevel 0 -i "' + path + '" ' + ffmpeg_args + ' -y "' + dest + '"'
-                            self.logger.logger.info(command)
-                            if not '--dry-run' in self.args:
-                                os.system(command)
-                            else:
-                                print command
-
-
-if __name__ == '__main__':
-    t = TelemetaTranscode(sys.argv[1:])
-    t.run()
diff --git a/scripts/upgrade.sh b/scripts/upgrade.sh
deleted file mode 100755 (executable)
index 1167382..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-
-docker-compose run app /srv/scripts/upgrade_from_1.6_to_1.7.sh