#Mr Developer
.mr.developer.cfg
+
+.idea/*
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+ <component name="VcsDirectoryMappings">
+ <mapping directory="$PROJECT_DIR$" vcs="Git" />
+ </component>
+</project>
\ No newline at end of file
# If this is True, all tasks will be executed locally by blocking until the task returns.
CELERY_ALWAYS_EAGER=False
-REINDEX=False
+REINDEX=True
-collecteur dans recherche indispo
-
-liste de lecture -> tableau de bord : periode enregistrement
-
pas assez d items dans recherche
-autocompletion sur instruments/collecteur (listes fermees)
-
-date enregistrement : min 1900 mais date avant apparaissent FAIT ???
-
-nb items qui ont instrument dans liste instruments + liste
-
-filtre inedits/edites pour les items : passer par la collection puis sa cote (I : inedit, E : edites)
-
filtres apparait 1 fois sur 2
+nb items enum + liste
+
FAIT :
apres suppression collection dans liste de lecture : laisser ouvert par defaut celle qui a ete ouverte en dernier
tri dans recherche avancee/simple
page 643 erreur
+
+nb items qui ont instrument dans liste instruments + liste
+
+liste de lecture -> tableau de bord : periode enregistrement
+
+collecteur dans recherche indispo
+
+date enregistrement : min 1900 mais date avant apparaissent
+
+autocompletion sur instruments/collecteur (listes fermees)
+
+VERIF PROD :
+
+filtre inedits/edites pour les items : passer par la collection puis sa cote (I : inedit, E : edites)
list_all_year = []
list_collect = MediaCollection.objects.all()
for collect in list_collect:
- if collect.recorded_from_year != '0' and not collect.recorded_from_year in list_all_year:
+ if collect.recorded_from_year != 0 and not collect.recorded_from_year in list_all_year:
list_all_year.append(collect.recorded_from_year)
- if collect.recorded_to_year != '0' and not collect.recorded_to_year in list_all_year:
+ if collect.recorded_to_year != 0 and not collect.recorded_to_year in list_all_year:
list_all_year.append(collect.recorded_to_year)
list_all_year.sort()
- if len(list_all_year) >= 2:
- min_year = list_all_year[len(list_all_year) - 1]
- for year in list_all_year:
- if year != 0:
- if year < min_year:
- min_year = year
- list_all_year = range(min_year, date.today().year + 1)
list_year = []
list_year.append(('', '----'))
for year in list_all_year:
list_all_year = []
list_collect = MediaCollection.objects.all()
for collect in list_collect:
- if collect.year_published != '0' and not collect.year_published in list_all_year:
+ if collect.year_published != 0 and not collect.year_published in list_all_year:
list_all_year.append(collect.year_published)
list_all_year.sort()
- if len(list_all_year) >= 2:
- min_year = list_all_year[len(list_all_year) - 1]
- for year in list_all_year:
- if year != 0:
- if year < min_year:
- min_year = year
- list_all_year = range(min_year, date.today().year + 1)
list_year = []
list_year.append((0, '----'))
for year in list_all_year:
return self.no_query_found()
if self.cleaned_data.get('q'):
- sqs = sqs.filter(title__exact=self.cleaned_data['q'])
+ sqs = sqs.filter(title__contains=self.cleaned_data['q'])
if self.cleaned_data.get('code'):
sqs = sqs.filter(code__contains=self.cleaned_data['code'])
from haystack import indexes
from telemeta.models import *
+class InstrumentField(indexes.CharField):
+ field_type = 'instrument'
class MediaItemIndex(indexes.SearchIndex, indexes.Indexable):
#advance search
title = indexes.CharField(model_attr='title')
- title_auto = indexes.NgramField(model_attr='title')
code = indexes.CharField(model_attr='code', default='')
location_principal = indexes.CharField(null='None', boost=1.05)
location_relation = indexes.CharField()
ethnic_group = indexes.CharField(model_attr='ethnic_group', default='')
- instruments = indexes.NgramField(default='')
- collectors = indexes.NgramField(model_attr='collector', default='')
+ instruments = InstrumentField(default='')
+ collectors = indexes.CharField(model_attr='collector', default='')
recorded_from_date = indexes.DateField(model_attr='recorded_from_date', null='None')
recorded_to_date = indexes.DateField(model_attr='recorded_to_date', null='None')
year_published = indexes.IntegerField(model_attr='collection__year_published', default='')
instruments.append(material.instrument.name)
if material.alias is not None:
instruments.append(material.alias.name)
- return u"".join(' ' + instru for instru in instruments)
+ return u"".join('|' + instru for instru in instruments)
def prepare_collectors(self, obj):
collectors = []
#advance search
title = indexes.CharField(model_attr='title')
- title_auto = indexes.NgramField(model_attr='title')
code = indexes.CharField(model_attr='code', default='')
location_principal = indexes.CharField(default='', boost=1.05)
location_relation = indexes.CharField()
ethnic_group = indexes.CharField(default='')
- instruments = indexes.NgramField(default='')
- collectors = indexes.NgramField(model_attr='collector', default='')
+ instruments = InstrumentField(default='')
+ collectors = indexes.CharField(model_attr='collector', default='')
recorded_from_date = indexes.DateField(model_attr='recorded_from_year', null=True)
recorded_to_date = indexes.DateField(model_attr='recorded_to_year', null=True)
year_published = indexes.IntegerField(model_attr='year_published', default='')
if material.alias and not material.alias in instruments:
instruments.append(material.alias.name)
- return u"".join(' ' + instru for instru in instruments)
+ return u"".join('|' + instru for instru in instruments)
def prepare_recorded_from_date(self, obj):
if obj.recorded_from_year != 0:
#content_auto = indexes.EdgeNgramField(model_attr='content')
#advance search
- title = indexes.NgramField(model_attr='title')
- code = indexes.NgramField(model_attr='code', default='')
+ title = indexes.CharField(model_attr='title')
+ code = indexes.CharField(model_attr='code', default='')
#location_principal = indexes.CharField(default='', boost=1.05)
#location_relation = indexes.CharField()
#ethnic_group = indexes.CharField(default='')
#content_auto = indexes.EdgeNgramField(model_attr='content')
#advance search
- title = indexes.NgramField(model_attr='title')
- code = indexes.NgramField(model_attr='code', default='')
+ title = indexes.CharField(model_attr='title')
+ code = indexes.CharField(model_attr='code', default='')
#location_principal = indexes.CharField(default='', boost=1.05)
#location_relation = indexes.CharField()
#ethnic_group = indexes.CharField(default='')
</script>
<script>
$(function(){
- $('#id_q[type="search"], #id_code, #id_location').each(function(){
+ $('#id_code, #id_instruments, #id_collectors').each(function(){
var self = this;
$(this).autocomplete({
source : function(requete, reponse) {
class CustomElasticBackend(ElasticsearchSearchBackend):
def setup(self):
- FIELD_MAPPINGS.get('ngram')['search_analyzer']='startspacelower'
+
+ DEFAULT_FIELD_MAPPING['analyzer']='trim_lower_analyzer'
+ FIELD_MAPPINGS['instrument']={'type':'string', 'analyzer':'pipe_analyzer', 'search_analyzer': 'trim_lower_analyzer'}
eb = super(CustomElasticBackend, self)
- eb.DEFAULT_SETTINGS.get('settings').get('analysis').get('analyzer')['startspacelower']={"type":"pattern", "pattern":"^\\s+", "filter": ["lowercase"]}
- ngram = eb.DEFAULT_SETTINGS.get('settings').get('analysis').get('analyzer').get('ngram_analyzer')
- ngram['tokenizer']='keyword'
- ngram.get('filter').insert(0, 'trim')
+ eb.DEFAULT_SETTINGS.get('settings').get('analysis').get('tokenizer')['pipe_tokenizer']=\
+ {'type': 'pattern', 'pattern': '\\|'}
+ eb.DEFAULT_SETTINGS.get('settings').get('analysis').get('analyzer')['trim_lower_analyzer']=\
+ {"type": "custom", "tokenizer": "keyword", "filter": ["trim", "lowercase", "asciifolding"]}
+ eb.DEFAULT_SETTINGS.get('settings').get('analysis').get('analyzer')['pipe_analyzer']=\
+ {'type':'custom', 'tokenizer': 'pipe_tokenizer', 'filter': ['trim', 'lowercase', 'asciifolding']}
eb.setup()
class CustomElasticEngine(ElasticsearchSearchEngine):
def autocomplete(request):
sqs = SearchQuerySet().load_all()
- print(type)
- if request.GET.get('attr', '') == "q":
- sqs = sqs.filter(title_auto__exact=request.GET.get('q', ''))[:10]
- suggestions = [result.title for result in sqs]
- elif request.GET.get('attr', '') == "location":
- sqs = sqs.filter(location_principal__contains=request.GET.get('q', ''))[:10]
- suggestions = [result.location_principal for result in sqs]
+ if request.GET.get('attr', '') == "instruments":
+ sqs = sqs.filter(instruments__contains=request.GET.get('q', ''))[:10]
+ instrus = [result.instruments for result in sqs]
+ suggestions = []
+ for chaine in instrus:
+ for word in chaine.split('|'):
+ if word != "" and escapeAccentAndLower(request.GET.get('q', '')) in escapeAccentAndLower(word):
+ suggestions.append(word)
elif request.GET.get('attr', '') == "code":
sqs = sqs.filter(code__contains=request.GET.get('q', ''))[:10]
suggestions = [result.code for result in sqs]
+
+ elif request.GET.get('attr', '') == "collectors":
+ sqs = sqs.filter(collectors__contains=request.GET.get('q', ''))[:10]
+ suggestions = [result.collectors for result in sqs]
else:
suggestions = []
'results': suggestions
})
return HttpResponse(the_data, content_type='application/json')
+
+import unicodedata
+
+def escapeAccentAndLower(chaine):
+ return unicodedata.normalize('NFD', chaine).encode('ascii', 'ignore').lower()
\ No newline at end of file