diff --git a/jieba/__init__.py b/jieba/__init__.py index 62183a9..74bddb9 100644 --- a/jieba/__init__.py +++ b/jieba/__init__.py @@ -161,7 +161,7 @@ class Tokenizer(object): self.initialized = True default_logger.debug( "Loading model cost %.3f seconds." % (time.time() - t1)) - default_logger.debug("Prefix dict has been built succesfully.") + default_logger.debug("Prefix dict has been built successfully.") def check_initialized(self): if not self.initialized: @@ -272,7 +272,7 @@ class Tokenizer(object): def cut(self, sentence, cut_all=False, HMM=True): ''' The main function that segments an entire sentence that contains - Chinese characters into seperated words. + Chinese characters into separated words. Parameter: - sentence: The str(unicode) to be segmented. diff --git a/setup.py b/setup.py index 265882b..6882cf3 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ GitHub: https://github.com/fxsjy/jieba setup(name='jieba', version='0.39', - description='Chinese Words Segementation Utilities', + description='Chinese Words Segmentation Utilities', long_description=LONGDOC, author='Sun, Junyi', author_email='ccnusjy@gmail.com',