analyzer.py 1.2 KB

123456789101112131415161718192021222324252627282930313233
  1. #encoding=utf-8
  2. from whoosh.analysis import RegexAnalyzer,LowercaseFilter,StopFilter
  3. from whoosh.analysis import Tokenizer,Token
  4. import jieba
  5. import re
  6. STOP_WORDS = frozenset(('a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'can',
  7. 'for', 'from', 'have', 'if', 'in', 'is', 'it', 'may',
  8. 'not', 'of', 'on', 'or', 'tbd', 'that', 'the', 'this',
  9. 'to', 'us', 'we', 'when', 'will', 'with', 'yet',
  10. 'you', 'your',u'的',u'了',u'和'))
  11. accepted_chars = re.compile(ur"[\u4E00-\u9FA5]+")
  12. class ChineseTokenizer(Tokenizer):
  13. def __call__(self,text,**kargs):
  14. words = jieba.tokenize(text,mode="search")
  15. token = Token()
  16. for (w,start_pos,stop_pos) in words:
  17. if not accepted_chars.match(w):
  18. if len(w)>1:
  19. pass
  20. else:
  21. continue
  22. token.original = token.text = w
  23. token.pos = start_pos
  24. token.startchar = start_pos
  25. token.endchar = stop_pos
  26. yield token
  27. def ChineseAnalyzer(stoplist=STOP_WORDS,minsize=1):
  28. return ChineseTokenizer() | LowercaseFilter() | StopFilter(stoplist=stoplist,minsize=minsize)