Introduction - If you have any usage issues, please Google them yourself
IKAnalyzer segmentation algorithm based on quasi-commercial Lucene Chinese Word Breaker
Packet : 89346494k50rvzdojs03.rar filelist
dict\connector.dic
dict\count.dic
dict\c_number.dic
dict\noisechar.dic
dict\number_sign.dic
dict\other_digit.dic
dict\local\local.dic
dict\stopword\stopword.dic
dict\suffix\suffix.dic
dict\word\wordbase.dic
org\mira\lucene\analysis\IKTokenizer.java
org\mira\lucene\analysis\IK_CAnalyzer.java
org\mira\lucene\analysis\MIK_CAnalyzer.java
org\mira\lucene\analysis\MTokenDelegate.java
org\mira\lucene\analysis\TokenDelegate.java
org\mira\lucene\analysis\dict\Dictionary.java
org\mira\lucene\analysis\dict\DictSegment.java
org\mira\lucene\analysis\dict\Hit.java
org\mira\lucene\analysis\dict\WordType.java
org\mira\lucene\analysis\dict\Dictionary.class
org\mira\lucene\analysis\dict\DictSegment.class
org\mira\lucene\analysis\dict\Hit.class
org\mira\lucene\analysis\dict\WordType.class
org\mira\lucene\analysis\IKTokenizer.class
org\mira\lucene\analysis\IK_CAnalyzer.class
org\mira\lucene\analysis\MIK_CAnalyzer.class
org\mira\lucene\analysis\MTokenDelegate.class
org\mira\lucene\analysis\TokenDelegate.class
org\mira\lucene\analysis\dict
org\mira\lucene\analysis
org\mira\lucene
dict\local
dict\stopword
dict\suffix
dict\word
org\mira
dict
org