UNPKG

novel-segment

Version:

Chinese word segmentation 簡繁中文分词模块 以網路小說為樣本

35 lines (34 loc) 1.18 kB
/** * 通配符识别模块 * * @author 老雷<leizongmin@gmail.com> */ import { SubSModuleTokenizer, ISubTokenizerCreate } from '../mod'; import { Segment, IWord, IDICT, IDICT2 } from '../Segment'; import { IWordDebugInfo } from '../util/index'; export declare class WildcardTokenizer extends SubSModuleTokenizer { name: string; protected _TABLE: IDICT<IWord>; protected _TABLE2: IDICT2<IWord>; _cache(): void; /** * 对未识别的单词进行分词 * * @param {array} words 单词数组 * @return {array} */ split(words: IWord[]): IWord[]; createWildcardToken(word: IWord, lasttype?: number, attr?: IWordDebugInfo): Segment.IWord; splitWildcard(text: string, cur?: number): IWord[]; /** * 匹配单词,返回相关信息 * * @param {string} text 文本 * @param {int} cur 开始位置 * @return {array} 返回格式 {w: '单词', c: 开始位置} */ matchWord(text: string, cur?: number): Segment.IWord[]; } export declare const init: ISubTokenizerCreate<WildcardTokenizer, SubSModuleTokenizer>; export declare const type = "tokenizer"; export default WildcardTokenizer;