From 17ef8abba38552e5b0a78de33765095c149b8c4d Mon Sep 17 00:00:00 2001 From: Boyuan Yang <073plan@gmail.com> Date: Sun, 21 Jan 2018 19:15:48 +0800 Subject: [PATCH] Fix typos found by codespell --- jieba/__init__.py | 4 ++-- setup.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/jieba/__init__.py b/jieba/__init__.py index 62183a9..74bddb9 100644 --- a/jieba/__init__.py +++ b/jieba/__init__.py @@ -161,7 +161,7 @@ class Tokenizer(object): self.initialized = True default_logger.debug( "Loading model cost %.3f seconds." % (time.time() - t1)) - default_logger.debug("Prefix dict has been built succesfully.") + default_logger.debug("Prefix dict has been built successfully.") def check_initialized(self): if not self.initialized: @@ -272,7 +272,7 @@ class Tokenizer(object): def cut(self, sentence, cut_all=False, HMM=True): ''' The main function that segments an entire sentence that contains - Chinese characters into seperated words. + Chinese characters into separated words. Parameter: - sentence: The str(unicode) to be segmented. diff --git a/setup.py b/setup.py index 265882b..6882cf3 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ GitHub: https://github.com/fxsjy/jieba setup(name='jieba', version='0.39', - description='Chinese Words Segementation Utilities', + description='Chinese Words Segmentation Utilities', long_description=LONGDOC, author='Sun, Junyi', author_email='ccnusjy@gmail.com',