]> git.parisson.com Git - telemeta.git/commitdiff
Merge branch 'dev' into crem2
authorGuillaume Pellerin <yomguy@parisson.com>
Mon, 20 Jan 2014 22:08:33 +0000 (23:08 +0100)
committerGuillaume Pellerin <yomguy@parisson.com>
Mon, 20 Jan 2014 22:08:33 +0000 (23:08 +0100)
Conflicts:
example/sandbox/settings.py
setup.py
telemeta/views/collection.py
telemeta/views/core.py
telemeta/views/item.py

1  2 
example/sandbox/settings.py
telemeta/templates/telemeta/base.html
telemeta/urls.py
telemeta/views/collection.py
telemeta/views/core.py
telemeta/views/item.py

index f50a60fc602a6728648a2a7ee57bf0672c7ad877,24d6552bdd5b28693d170e2277d709c7c75207b2..5bd692c6ae5926b84563b014b48a24edf66a5b5a
@@@ -162,5 -165,4 +165,3 @@@ LOGIN_REDIRECT_URL = reverse_lazy('tele
  
  EMAIL_HOST = 'localhost'
  DEFAULT_FROM_EMAIL = 'webmaster@parisson.com'
--
- FILE_UPLOAD_TEMP_DIR = '/tmp'
Simple merge
index bcbf6a1577cae22d9b597a886ecadfabcb720608,f91b992f7782c96395e80779d13fa17a2c7e1be4..f17310a3938920fc8d9ea6d8ea3d7c978ce1c83b
@@@ -360,6 -360,10 +360,10 @@@ urlpatterns = patterns(''
              'document_root': settings.TELEMETA_CACHE_DIR,}),
  
      url(r'^', include('jqchat.urls')),
- )
  
+ )
  
 -    url(r'^__debug__/', include(debug_toolbar.urls)),)
+ if settings.DEBUG:
+     import debug_toolbar
+     urlpatterns += patterns('',
++    url(r'^__debug__/', include(debug_toolbar.urls)),)
index c40ccb12b862a1bfde5970a0147dd7d3a5fb2e77,a0aa61cbdb4ee739e1954865ea3adc906f7383c1..1ba08754f862c0b57b689923e626d3044bd4f25c
@@@ -167,7 -167,40 +167,37 @@@ class CollectionPackageView(View)
      def get_object(self):
          return MediaCollection.objects.get(public_id=self.kwargs['public_id'])
  
-     def get(self, request, *args, **kwargs):
+     def get_stream(self, request, *args, **kwargs):
+         """
+         Stream a ZIP file of collection data
+         without loading the whole file into memory.
+         Based on ZipStream
+         """
+         from telemeta.views import MarkerView
+         from telemeta.backup import CollectionSerializer
+         import json
+         import zipstream
+         
+         z = zipstream.ZipFile()        
+         collection = MediaCollection.objects.get(public_id=public_id)
+         z.write(collection.code)
+         
+         for item in collection.items.all():
+             z.write(item.file.path)
+         try:
+             from django.http import StreamingHttpResponse
+             response = StreamingHttpResponse(z, content_type='application/zip')
+         except:
+             response = HttpResponse(z, content_type='application/zip')
+         response['Content-Disposition'] = "attachment; filename=%s.%s" % \
+                                              (item.code, 'zip')
+         return response
+     
+     @method_decorator(login_required)
+     def dispatch(self, *args, **kwargs):
+         return super(CollectionPackageView, self).dispatch(*args, **kwargs)
 -
 -
 -
          """
          Create a ZIP file on disk and transmit it in chunks of 8KB,
          without loading the whole file into memory. A similar approach can
Simple merge
index c7e9c49ddafd493b79797056f6b482c7350b6cf5,5a9bb215513320cecd8804209b89c9d01a7f0e04..f2f0cbe6c308fe7eafbd2f4982737e9da5d1129f
@@@ -347,7 -347,8 +348,8 @@@ class ItemView(object)
  
      def item_analyze(self, item):
          analyses = MediaItemAnalysis.objects.filter(item=item)
 -        
+         mime_type = ''
 +
          if analyses:
              for analysis in analyses:
                  if not item.approx_duration and analysis.analyzer_id == 'duration':
                                               analyzer_id='duration', unit='s',
                                               value=unicode(datetime.timedelta(0,decoder.input_duration)))
                  analysis.save()
+                 
                  for analyzer in analyzers_sub:
-                     value = analyzer.result()
-                     analysis = MediaItemAnalysis(item=item, name=analyzer.name(),
-                                                  analyzer_id=analyzer.id(),
-                                                  unit=analyzer.unit(), value=str(value))
-                     analysis.save()
+                     for key in analyzer.results.keys():
+                         result = analyzer.results[key]
+                         value = result.data_object.value
+                         if value.shape[0] == 1:
+                             value = value[0]
+                         analysis = MediaItemAnalysis(item=item, name=result.name,
+                                 analyzer_id=result.id, unit=result.unit, value = unicode(value))
+                         analysis.save()
  
 -#                FIXME: parse tags on first load
 +                analyses = MediaItemAnalysis.objects.filter(item=item)
 +
 +#                TODO: parse tags on first load
  #                tags = decoder.tags
  
 -        return mime_type
 +        return analyses
  
      def item_analyze_xml(self, request, public_id):
          item = MediaItem.objects.get(public_id=public_id)