UNPKG

@microsoft/api-extractor

Version:

Validatation, documentation, and auditing for the exported API of a TypeScript package

45 lines (44 loc) 1.77 kB
import Token from './Token'; /** * Handles the tokenization of a JSDoc comment. */ export default class Tokenizer { /** * Match JsDoc block tags and inline tags * Example "@a @b@c d@e @f {whatever} {@link a} { @something } \@g" => ["@a", "@f", "{@link a}", "{ @something }"] */ private static _jsdocTagsRegex; /** * List of Tokens that have been tokenized. */ private _tokenStream; private _reportError; constructor(docs: string, reportError: (message: string) => void); /** * Converts a doc comment string into an array of Tokens. This processing is done so that docs * can be processed more strictly. * Example: "This is a JsDoc description with a {@link URL} and more text. \@summary example \@public" * => [ * {tokenType: 'text', parameter: 'This is a JsDoc description with a'}, * {tokenType: '@link', parameter: 'URL'}, * {tokenType: '\@summary', parameter: ''}, * {tokenType: 'text', parameter: 'example'}, * {tokenType: '\@public', parameter: ''} * ] */ protected _tokenizeDocs(docs: string): Token[]; /** * Parse an inline tag and returns the Token for it if itis a valid inline tag. * Example '{@link https://bing.com | Bing}' => '{type: 'Inline', tag: '@link', text: 'https://bing.com | Bing'}' */ protected _tokenizeInline(docEntry: string): Token; peekToken(): Token; getToken(): Token; /** * Trims whitespaces on either end of the entry (which is just a string within the doc comments), * replaces \r and \n's with single whitespace, and removes empty entries. * * @param docEntries - Array of doc strings to be santitized */ private _sanitizeDocEntries(docEntries); }