# -*- coding: utf-8 -*-
import re
import unicodedata
import os
import operator
import pickle
from django.db import models
from dynastie.models import Post
class Search:
MINIMUM_LETTERS = 3
def __init__(self):
self.report = ''
self.tagreg = re.compile('<[^>]+>')
self.htmlreg = re.compile('&[^;]+;')
self.numreg = re.compile('[0-9]+')
self.pat = re.compile(r'\s+')
self.replace_by_space = ('(', ')', '#', '\'', '{', '}', '[', ']',
'-', '|', '\t', '\\', '_', '^' '=', '+', '$',
'£', '%', 'µ', '*', ',', '?', ';', '.', '/',
':', '!', '§', '€', '²')
# Imported from generator.py
def _addReport(self, string, color=''):
if color != '':
self.report = self.report + ''
self.report = self.report + '' + self.__class__.__name__ + ' : '
self.report = self.report + string
if color != '':
self.report = self.report + ''
self.report = self.report + '
\n'
def _addWarning(self, string):
self.addReport(string, 'yellow')
def _addError(self, string):
self.addReport(string, 'red')
def _saveDatabase(self, blog, hashtable):
d = pickle.dumps(hashtable)
f = open(blog.src_path + '/_search.db', 'w')
f.write(d)
f.close()
def _loadDatabase(self, blog):
filename = blog.src_path + '/_search.db'
if not os.path.exists(filename):
print 'No search index !'
return None
f = open(filename, 'rb')
hashtable = pickle.load(f)
f.close()
return hashtable
def _strip_accents(self, s):
return ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))
def _remove_tag(self, content):
content = self.htmlreg.sub('', content)
content = self.numreg.sub('', content)
content = content.replace('\n', '')
content = content.replace('\r', '')
content = content.replace('"', '')
for c in self.replace_by_space:
content = content.replace(c, ' ')
content = self.tagreg.sub('', content)
content = self.pat.sub(' ', content)
return content
def _prepare_string(self, content):
content = self._remove_tag(content)
content = self._strip_accents(unicode(content, 'utf8'))
return content
def _indexContent(self, hashtable, index, content, word_weight):
content = self._prepare_string(content)
wordlist = content.split(' ')
for word in wordlist:
if len(word) < self.MINIMUM_LETTERS:
continue
word = word.lower()
if not word in hashtable:
hashtable[word] = []
if not index in hashtable[word]:
hashtable[word].insert(0, [index, word_weight])
else:
weight = hashtable[word][1]
hashtable[word][1] = weight + word_weight
def _index_file(self, hashtable, filename, index):
try:
post = Post.objects.get(pk=index)
if post.published == False: return
except:
return
f = open(filename, 'r')
content = f.read()
f.close()
self._indexContent(hashtable, index, content, 1)
self._indexContent(hashtable, index, post.title.encode('utf-8'), 5)
def create_index(self, blog):
hashtable = {}
root = blog.src_path + '/_post'
if os.path.exists(root):
for post in os.listdir(root):
# Not a post number
if not re.search(self.numreg, post): continue
self._index_file(hashtable, root + '/' + post, int(post))
self._saveDatabase(blog, hashtable)
self._addReport('Search generated @ ' + blog.src_path + '/_search.db')
return self.report
def _index_post(self, blog, post, saveDatabase=True):
hashtable = self._loadDatabase(blog)
filename = blog.src_path + '/_post/' + str(post)
if hashtable is None:
return self.create_index(blog)
self._index_file(hashtable, filename, int(post))
if saveDatabase:
self._saveDatabase(blog, hashtable)
def _remove_post(self, blog, post, saveDatabase=True):
hashtable = self._loadDatabase(blog)
if hashtable is None: return
for k, v in hashtable.items():
# For tuples in values
for t in v:
if post == v[0]:
v.remove(t)
if saveDatabase:
self._saveDatabase(blog, hashtable)
def index_post(self, blog, post):
return self._index_post(blog, post, True)
def delete_post(self, blog, post):
return self._remove_post(blog, post, True)
def edit_post(self, blog, post, saveDatabase=True):
self._remove_post(blog, post, False)
self._index_post(blog, post, True)
def search(self, blog, string):
hashtable = self._loadDatabase(blog)
string = self._prepare_string(string.encode('utf-8'))
wordlist = string.split(' ')
res = {}
for word in wordlist:
if len(word) < Search.MINIMUM_LETTERS:
continue
word = word.lower()
reg = re.compile('.*' + word + '.*')
for key in hashtable.keys():
if reg.match(key):
for post in hashtable[key]:
if not post[0] in res:
res[post[0]] = post[1]
else:
res[post[0]] += post[1]
sorted_res = sorted(res.iteritems(), key=operator.itemgetter(1))
sorted_res.reverse()
res = []
for i in range(len(sorted_res)):
res .append(sorted_res[i][0])
return res