mirror of https://github.com/fxsjy/jieba.git
merge the new file
commit
c01680c6a8
@ -0,0 +1,33 @@
|
|||||||
|
#encoding=utf-8
|
||||||
|
from whoosh.analysis import RegexAnalyzer,LowercaseFilter,StopFilter
|
||||||
|
from whoosh.analysis import Tokenizer,Token
|
||||||
|
|
||||||
|
import jieba
|
||||||
|
import re
|
||||||
|
|
||||||
|
STOP_WORDS = frozenset(('a', 'an', 'and', 'are', 'as', 'at', 'be', 'by', 'can',
|
||||||
|
'for', 'from', 'have', 'if', 'in', 'is', 'it', 'may',
|
||||||
|
'not', 'of', 'on', 'or', 'tbd', 'that', 'the', 'this',
|
||||||
|
'to', 'us', 'we', 'when', 'will', 'with', 'yet',
|
||||||
|
'you', 'your',u'的',u'了',u'和'))
|
||||||
|
|
||||||
|
accepted_chars = re.compile(ur"[\u4E00-\u9FA5]+")
|
||||||
|
|
||||||
|
class ChineseTokenizer(Tokenizer):
|
||||||
|
def __call__(self,text,**kargs):
|
||||||
|
words = jieba.tokenize(text,mode="search")
|
||||||
|
token = Token()
|
||||||
|
for (w,start_pos,stop_pos) in words:
|
||||||
|
if not accepted_chars.match(w):
|
||||||
|
if len(w)>1:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
token.original = token.text = w
|
||||||
|
token.pos = start_pos
|
||||||
|
token.startchar = start_pos
|
||||||
|
token.endchar = stop_pos
|
||||||
|
yield token
|
||||||
|
|
||||||
|
def ChineseAnalyzer(stoplist=STOP_WORDS,minsize=1):
|
||||||
|
return ChineseTokenizer() | LowercaseFilter() | StopFilter(stoplist=stoplist,minsize=minsize)
|
@ -0,0 +1,38 @@
|
|||||||
|
# -*- coding: UTF-8 -*-
|
||||||
|
import sys
|
||||||
|
sys.path.append("../")
|
||||||
|
from whoosh.index import create_in
|
||||||
|
from whoosh.fields import *
|
||||||
|
from whoosh.qparser import QueryParser
|
||||||
|
|
||||||
|
from jieba.analyse import ChineseAnalyzer
|
||||||
|
|
||||||
|
analyzer = ChineseAnalyzer()
|
||||||
|
|
||||||
|
schema = Schema(title=TEXT(stored=True), path=ID(stored=True), content=TEXT(stored=True, analyzer=analyzer))
|
||||||
|
ix = create_in("tmp", schema)
|
||||||
|
writer = ix.writer()
|
||||||
|
|
||||||
|
file_name = sys.argv[1]
|
||||||
|
|
||||||
|
with open(file_name,"rb") as inf:
|
||||||
|
i=0
|
||||||
|
for line in inf:
|
||||||
|
i+=1
|
||||||
|
writer.add_document(
|
||||||
|
title=u"line"+str(i),
|
||||||
|
path=u"/a",
|
||||||
|
content=line.decode('gbk','ignore')
|
||||||
|
)
|
||||||
|
writer.commit()
|
||||||
|
|
||||||
|
searcher = ix.searcher()
|
||||||
|
parser = QueryParser("content", schema=ix.schema)
|
||||||
|
|
||||||
|
for keyword in (u"水果小姐",u"你",u"first",u"中文",u"交换机",u"交换"):
|
||||||
|
print "result of ",keyword
|
||||||
|
q = parser.parse(keyword)
|
||||||
|
results = searcher.search(q)
|
||||||
|
for hit in results:
|
||||||
|
print hit.highlights("content")
|
||||||
|
print "="*10
|
Loading…
Reference in New Issue