From 315a411e5200978f4338306a7c7195e500215cc6 Mon Sep 17 00:00:00 2001 From: fxsjy Date: Thu, 13 Nov 2014 10:43:43 +0800 Subject: [PATCH 1/3] version update --- Changelog | 7 +++++++ jieba/analyse/__init__.py | 2 +- setup.py | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/Changelog b/Changelog index 2d786f1..4eb1af0 100644 --- a/Changelog +++ b/Changelog @@ -1,3 +1,10 @@ +2014-11-13: version 0.35 +1) 改进词典cache的dump和加载机制;by @gumblex +2)提升关键词提取的性能; by @gumblex +3)关键词提取新增基于textrank算法的子模块; by @singlee +4)修复自定义stopwords功能的bug; by @walkskyer + + 2014-10-20: version 0.34 1. 提升性能,词典结构由Trie改为Prefix Set,内存占用减少2/3, 详见:https://github.com/fxsjy/jieba/pull/187;by @gumblex 2. 修复关键词提取功能的性能问题 diff --git a/jieba/analyse/__init__.py b/jieba/analyse/__init__.py index cdfb855..94d0f49 100644 --- a/jieba/analyse/__init__.py +++ b/jieba/analyse/__init__.py @@ -54,7 +54,7 @@ def set_stop_words(stop_words_path): if not os.path.exists(abs_path): raise Exception("jieba: path does not exist: " + abs_path) content = open(abs_path,'rb').read().decode('utf-8') - lines = content.split('\n') + lines = content.replace("\r","").split('\n') for line in lines: STOP_WORDS.add(line) diff --git a/setup.py b/setup.py index d084544..3e6d860 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ from distutils.core import setup setup(name='jieba3k', - version='0.34', + version='0.35', description='Chinese Words Segementation Utilities', author='Sun, Junyi', author_email='ccnusjy@gmail.com', From 447c1ded8cb54f9a43128119f201865898cc6de7 Mon Sep 17 00:00:00 2001 From: fxsjy Date: Sat, 15 Nov 2014 13:44:30 +0800 Subject: [PATCH 2/3] fix problem for python3.2 --- jieba/__init__.py | 4 ++-- jieba/finalseg/__init__.py | 2 +- jieba/posseg/__init__.py | 10 +++++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/jieba/__init__.py b/jieba/__init__.py index e507111..37e2e62 100644 --- a/jieba/__init__.py +++ b/jieba/__init__.py @@ -242,9 +242,9 @@ def cut(sentence, cut_all=False, HMM=True): # \r\n|\s : whitespace characters. Will not be handled. if cut_all: - re_han, re_skip = re.compile(r"([\u4E00-\u9FA5]+)", re.U), re.compile(r"[^a-zA-Z0-9+#\n]", re.U) + re_han, re_skip = re.compile("([\u4E00-\u9FA5]+)", re.U), re.compile("[^a-zA-Z0-9+#\n]", re.U) else: - re_han, re_skip = re.compile(r"([\u4E00-\u9FA5a-zA-Z0-9+#&\._]+)", re.U), re.compile(r"(\r\n|\s)", re.U) + re_han, re_skip = re.compile("([\u4E00-\u9FA5a-zA-Z0-9+#&\._]+)", re.U), re.compile("(\r\n|\s)", re.U) blocks = re_han.split(sentence) if cut_all: cut_block = __cut_all diff --git a/jieba/finalseg/__init__.py b/jieba/finalseg/__init__.py index a7694fc..5e676ad 100644 --- a/jieba/finalseg/__init__.py +++ b/jieba/finalseg/__init__.py @@ -88,7 +88,7 @@ def cut(sentence): sentence = sentence.decode('utf-8') except UnicodeDecodeError: sentence = sentence.decode('gbk', 'ignore') - re_han, re_skip = re.compile(r"([\u4E00-\u9FA5]+)"), re.compile(r"(\d+\.\d+|[a-zA-Z0-9]+)") + re_han, re_skip = re.compile("([\u4E00-\u9FA5]+)"), re.compile("(\d+\.\d+|[a-zA-Z0-9]+)") blocks = re_han.split(sentence) for blk in blocks: if re_han.match(blk): diff --git a/jieba/posseg/__init__.py b/jieba/posseg/__init__.py index 52e3382..865a07d 100644 --- a/jieba/posseg/__init__.py +++ b/jieba/posseg/__init__.py @@ -104,8 +104,8 @@ def __cut(sentence): yield pair(sentence[next:], pos_list[next][1]) def __cut_detail(sentence): - re_han, re_skip = re.compile(r"([\u4E00-\u9FA5]+)"), re.compile(r"([\.0-9]+|[a-zA-Z0-9]+)") - re_eng, re_num = re.compile(r"[a-zA-Z0-9]+"), re.compile(r"[\.0-9]+") + re_han, re_skip = re.compile("([\u4E00-\u9FA5]+)"), re.compile("([\.0-9]+|[a-zA-Z0-9]+)") + re_eng, re_num = re.compile("[a-zA-Z0-9]+"), re.compile("[\.0-9]+") blocks = re_han.split(sentence) for blk in blocks: if re_han.match(blk): @@ -129,7 +129,7 @@ def __cut_DAG_NO_HMM(sentence): x = 0 N = len(sentence) buf = '' - re_eng = re.compile(r'[a-zA-Z0-9]',re.U) + re_eng = re.compile('[a-zA-Z0-9]',re.U) while x < N: y = route[x][1]+1 l_word = sentence[x:y] @@ -194,8 +194,8 @@ def __cut_internal(sentence, HMM=True): sentence = sentence.decode('utf-8') except UnicodeDecodeError: sentence = sentence.decode('gbk', 'ignore') - re_han, re_skip = re.compile(r"([\u4E00-\u9FA5a-zA-Z0-9+#&\._]+)"), re.compile(r"(\r\n|\s)") - re_eng, re_num = re.compile(r"[a-zA-Z0-9]+"), re.compile(r"[\.0-9]+") + re_han, re_skip = re.compile("([\u4E00-\u9FA5a-zA-Z0-9+#&\._]+)"), re.compile("(\r\n|\s)") + re_eng, re_num = re.compile("[a-zA-Z0-9]+"), re.compile("[\.0-9]+") blocks = re_han.split(sentence) if HMM: __cut_blk = __cut_DAG From 5c487dbcbab279f6744e0f2ee126fbc5aa7b8e43 Mon Sep 17 00:00:00 2001 From: fxsjy Date: Sat, 15 Nov 2014 13:46:27 +0800 Subject: [PATCH 3/3] update verson --- Changelog | 3 +++ setup.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Changelog b/Changelog index 4eb1af0..d1628f1 100644 --- a/Changelog +++ b/Changelog @@ -1,3 +1,6 @@ +2014-11-15: version 0.35.1 +1) fix Python 3.2的兼容性问题 + 2014-11-13: version 0.35 1) 改进词典cache的dump和加载机制;by @gumblex 2)提升关键词提取的性能; by @gumblex diff --git a/setup.py b/setup.py index 3e6d860..57a8421 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,6 @@ from distutils.core import setup setup(name='jieba3k', - version='0.35', + version='0.35.1', description='Chinese Words Segementation Utilities', author='Sun, Junyi', author_email='ccnusjy@gmail.com',