chromadb-default-embed
Version:
Chroma's fork of @xenova/transformers serving as our default embedding function
1,172 lines • 135 kB
TypeScript
declare const PreTrainedModel_base: new () => {
(...args: any[]): any;
_call(...args: any[]): any;
};
/**
* A base class for pre-trained models that provides the model configuration and an ONNX session.
*/
export class PreTrainedModel extends PreTrainedModel_base {
/**
* Instantiate one of the model classes of the library from a pretrained model.
*
* The model class to instantiate is selected based on the `model_type` property of the config object
* (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible)
*
* @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either:
* - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
* Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
* user or organization name, like `dbmdz/bert-base-german-cased`.
* - A path to a *directory* containing model weights, e.g., `./my_model_directory/`.
* @param {import('./utils/hub.js').PretrainedOptions} options Additional options for loading the model.
*
* @returns {Promise<PreTrainedModel>} A new instance of the `PreTrainedModel` class.
*/
static from_pretrained(pretrained_model_name_or_path: string, { quantized, progress_callback, config, cache_dir, local_files_only, revision, model_file_name, }?: import('./utils/hub.js').PretrainedOptions): Promise<PreTrainedModel>;
/**
* Creates a new instance of the `PreTrainedModel` class.
* @param {Object} config The model configuration.
* @param {any} session session for the model.
*/
constructor(config: any, session: any);
main_input_name: string;
config: any;
session: any;
can_generate: boolean;
_runBeam: typeof decoderRunBeam;
_getStartBeams: typeof decoderStartBeams;
_updateBeam: typeof decoderUpdatebeam;
_forward: typeof encoderForward;
/**
* Disposes of all the ONNX sessions that were created during inference.
* @returns {Promise<unknown[]>} An array of promises, one for each ONNX session that is being disposed.
* @todo Use https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/FinalizationRegistry
*/
dispose(): Promise<unknown[]>;
/**
* Runs the model with the provided inputs
* @param {Object} model_inputs Object containing input tensors
* @returns {Promise<Object>} Object containing output tensors
*/
_call(model_inputs: any): Promise<any>;
/**
* Forward method for a pretrained model. If not overridden by a subclass, the correct forward method
* will be chosen based on the model type.
* @param {Object} model_inputs The input data to the model in the format specified in the ONNX model.
* @returns {Promise<Object>} The output data from the model in the format specified in the ONNX model.
* @throws {Error} This method must be implemented in subclasses.
*/
forward(model_inputs: any): Promise<any>;
/**
* @param {import('./utils/generation.js').GenerationConfigType} generation_config
* @param {number} input_ids_seq_length The starting sequence length for the input ids.
* @returns {LogitsProcessorList}
* @private
*/
private _get_logits_processor;
/**
* This function merges multiple generation configs together to form a final generation config to be used by the model for text generation.
* It first creates an empty `GenerationConfig` object, then it applies the model's own `generation_config` property to it. Finally, if a `generation_config` object was passed in the arguments, it overwrites the corresponding properties in the final config with those of the passed config object.
* @param {import('./utils/generation.js').GenerationConfigType} generation_config A `GenerationConfig` object containing generation parameters.
* @returns {import('./utils/generation.js').GenerationConfigType} The final generation config object to be used by the model for text generation.
*/
_get_generation_config(generation_config: import('./utils/generation.js').GenerationConfigType): import('./utils/generation.js').GenerationConfigType;
/**
* @typedef {import('./utils/maths.js').TypedArray} TypedArray
*/
/**
* @typedef {{ sequences: Tensor, decoder_attentions: Tensor, cross_attentions: Tensor }} EncoderDecoderOutput
* @typedef {Object} DecoderOutput
*
* Generates text based on the given inputs and generation configuration using the model.
* @param {Tensor|Array|TypedArray} inputs An array of input token IDs.
* @param {Object|GenerationConfig|null} generation_config The generation configuration to use. If null, default configuration will be used.
* @param {Object|null} logits_processor An optional logits processor to use. If null, a new LogitsProcessorList instance will be created.
* @param {Object} options options
* @param {Object} [options.inputs_attention_mask=null] An optional attention mask for the inputs.
* @returns {Promise<number[][]|EncoderDecoderOutput|DecoderOutput>} An array of generated output sequences, where each sequence is an array of token IDs.
* @throws {Error} Throws an error if the inputs array is empty.
*/
generate(inputs: any[] | import("./transformers.js").TypedArray | Tensor, generation_config?: any | (new (kwargs?: import("./utils/generation.js").GenerationConfigType) => import("./utils/generation.js").GenerationConfigType) | null, logits_processor?: any | null, { inputs_attention_mask }?: {
inputs_attention_mask?: any;
}): Promise<any>;
/**
* Helper function to add attentions to beam
* @param {Object} beam
* @param {Object} output
* @private
*/
private addAttentionsToBeam;
/**
* Groups an array of beam objects by their ids.
*
* @param {Array} beams The array of beam objects to group.
* @returns {Array} An array of arrays, where each inner array contains beam objects with the same id.
*/
groupBeams(beams: any[]): any[];
/**
* Returns an object containing past key values from the given decoder results object.
*
* @param {Object} decoderResults The decoder results object.
* @param {Object} pastKeyValues The previous past key values.
* @returns {Object} An object containing past key values.
*/
getPastKeyValues(decoderResults: any, pastKeyValues: any): any;
/**
* Returns an object containing attentions from the given decoder results object.
*
* @param {Object} decoderResults The decoder results object.
* @returns {Object} An object containing attentions.
*/
getAttentions(decoderResults: any): any;
/**
* Adds past key values to the decoder feeds object. If pastKeyValues is null, creates new tensors for past key values.
*
* @param {Object} decoderFeeds The decoder feeds object to add past key values to.
* @param {Object} pastKeyValues An object containing past key values.
*/
addPastKeyValues(decoderFeeds: any, pastKeyValues: any): void;
/**
* Initializes and returns the beam for text generation task
* @param {Tensor} inputTokenIds The input token ids.
* @param {Object} generation_config The generation config.
* @param {number} numOutputTokens The number of tokens to be generated.
* @param {Tensor} inputs_attention_mask Optional input attention mask.
* @returns {any} A Beam object representing the initialized beam.
* @private
*/
private getStartBeams;
/**
* Runs a single step of the beam search generation algorithm.
* @param {any} beam The current beam being generated.
* @returns {Promise<any>} The updated beam after a single generation step.
* @private
*/
private runBeam;
/**
* Update a beam with a new token ID.
* @param {Object} beam The beam to update.
* @param {number} newTokenId The new token ID to add to the beam's output.
* @private
*/
private updateBeam;
}
export class ModelOutput {
}
/**
* Base class for model's outputs, with potential hidden states and attentions.
*/
export class BaseModelOutput extends ModelOutput {
/**
* @param {Object} output The output of the model.
* @param {Tensor} output.last_hidden_state Sequence of hidden-states at the output of the last layer of the model.
* @param {Tensor} [output.hidden_states] Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
* @param {Tensor} [output.attentions] Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
*/
constructor({ last_hidden_state, hidden_states, attentions }: {
last_hidden_state: Tensor;
hidden_states?: Tensor;
attentions?: Tensor;
});
last_hidden_state: Tensor;
hidden_states: Tensor;
attentions: Tensor;
}
export class BertPreTrainedModel extends PreTrainedModel {
}
export class BertModel extends BertPreTrainedModel {
}
/**
* BertForMaskedLM is a class representing a BERT model for masked language modeling.
*/
export class BertForMaskedLM extends BertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* BertForSequenceClassification is a class representing a BERT model for sequence classification.
*/
export class BertForSequenceClassification extends BertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* BertForTokenClassification is a class representing a BERT model for token classification.
*/
export class BertForTokenClassification extends BertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* BertForQuestionAnswering is a class representing a BERT model for question answering.
*/
export class BertForQuestionAnswering extends BertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class RoFormerPreTrainedModel extends PreTrainedModel {
}
/**
* The bare RoFormer Model transformer outputting raw hidden-states without any specific head on top.
*/
export class RoFormerModel extends RoFormerPreTrainedModel {
}
/**
* RoFormer Model with a `language modeling` head on top.
*/
export class RoFormerForMaskedLM extends RoFormerPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class RoFormerForSequenceClassification extends RoFormerPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output)
* e.g. for Named-Entity-Recognition (NER) tasks.
*/
export class RoFormerForTokenClassification extends RoFormerPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD
* (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
*/
export class RoFormerForQuestionAnswering extends RoFormerPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class ConvBertPreTrainedModel extends PreTrainedModel {
}
/**
* The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.
*/
export class ConvBertModel extends ConvBertPreTrainedModel {
}
/**
* ConvBERT Model with a language modeling head on top.
*/
export class ConvBertForMaskedLM extends ConvBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class ConvBertForSequenceClassification extends ConvBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output)
* e.g. for Named-Entity-Recognition (NER) tasks.
*/
export class ConvBertForTokenClassification extends ConvBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD
* (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`)
*/
export class ConvBertForQuestionAnswering extends ConvBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class ElectraPreTrainedModel extends PreTrainedModel {
}
/**
* The bare Electra Model transformer outputting raw hidden-states without any specific head on top.
* Identical to the BERT model except that it uses an additional linear layer between the embedding
* layer and the encoder if the hidden size and embedding size are different.
*/
export class ElectraModel extends ElectraPreTrainedModel {
}
/**
* Electra model with a language modeling head on top.
*/
export class ElectraForMaskedLM extends ElectraPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class ElectraForSequenceClassification extends ElectraPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* Electra model with a token classification head on top.
*/
export class ElectraForTokenClassification extends ElectraPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* LECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD
* (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
*/
export class ElectraForQuestionAnswering extends ElectraPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class CamembertPreTrainedModel extends PreTrainedModel {
}
/**
* The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.
*/
export class CamembertModel extends CamembertPreTrainedModel {
}
/**
* CamemBERT Model with a `language modeling` head on top.
*/
export class CamembertForMaskedLM extends CamembertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.
*/
export class CamembertForSequenceClassification extends CamembertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.
*/
export class CamembertForTokenClassification extends CamembertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* CamemBERT Model with a span classification head on top for extractive question-answering tasks
*/
export class CamembertForQuestionAnswering extends CamembertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class DebertaPreTrainedModel extends PreTrainedModel {
}
/**
* The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.
*/
export class DebertaModel extends DebertaPreTrainedModel {
}
/**
* DeBERTa Model with a `language modeling` head on top.
*/
export class DebertaForMaskedLM extends DebertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class DebertaForSequenceClassification extends DebertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.
*/
export class DebertaForTokenClassification extends DebertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
* layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
*/
export class DebertaForQuestionAnswering extends DebertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class DebertaV2PreTrainedModel extends PreTrainedModel {
}
/**
* The bare DeBERTa-V2 Model transformer outputting raw hidden-states without any specific head on top.
*/
export class DebertaV2Model extends DebertaV2PreTrainedModel {
}
/**
* DeBERTa-V2 Model with a `language modeling` head on top.
*/
export class DebertaV2ForMaskedLM extends DebertaV2PreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* DeBERTa-V2 Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class DebertaV2ForSequenceClassification extends DebertaV2PreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* DeBERTa-V2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.
*/
export class DebertaV2ForTokenClassification extends DebertaV2PreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* DeBERTa-V2 Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
* layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
*/
export class DebertaV2ForQuestionAnswering extends DebertaV2PreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class DistilBertPreTrainedModel extends PreTrainedModel {
}
export class DistilBertModel extends DistilBertPreTrainedModel {
}
/**
* DistilBertForSequenceClassification is a class representing a DistilBERT model for sequence classification.
*/
export class DistilBertForSequenceClassification extends DistilBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* DistilBertForTokenClassification is a class representing a DistilBERT model for token classification.
*/
export class DistilBertForTokenClassification extends DistilBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* DistilBertForQuestionAnswering is a class representing a DistilBERT model for question answering.
*/
export class DistilBertForQuestionAnswering extends DistilBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
/**
* DistilBertForMaskedLM is a class representing a DistilBERT model for masking task.
*/
export class DistilBertForMaskedLM extends DistilBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} returned object
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
export class EsmPreTrainedModel extends PreTrainedModel {
}
/**
* The bare ESM Model transformer outputting raw hidden-states without any specific head on top.
*/
export class EsmModel extends EsmPreTrainedModel {
}
/**
* ESM Model with a `language modeling` head on top.
*/
export class EsmForMaskedLM extends EsmPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class EsmForSequenceClassification extends EsmPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* ESM Model with a token classification head on top (a linear layer on top of the hidden-states output)
* e.g. for Named-Entity-Recognition (NER) tasks.
*/
export class EsmForTokenClassification extends EsmPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
export class MobileBertPreTrainedModel extends PreTrainedModel {
}
export class MobileBertModel extends MobileBertPreTrainedModel {
}
/**
* MobileBertForMaskedLM is a class representing a MobileBERT model for masking task.
*/
export class MobileBertForMaskedLM extends MobileBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} returned object
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class MobileBertForSequenceClassification extends MobileBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} returned object
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* MobileBert Model with a span classification head on top for extractive question-answering tasks
*/
export class MobileBertForQuestionAnswering extends MobileBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} returned object
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class MPNetPreTrainedModel extends PreTrainedModel {
}
/**
* The bare MPNet Model transformer outputting raw hidden-states without any specific head on top.
*/
export class MPNetModel extends MPNetPreTrainedModel {
}
/**
* MPNetForMaskedLM is a class representing a MPNet model for masked language modeling.
*/
export class MPNetForMaskedLM extends MPNetPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* MPNetForSequenceClassification is a class representing a MPNet model for sequence classification.
*/
export class MPNetForSequenceClassification extends MPNetPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* MPNetForTokenClassification is a class representing a MPNet model for token classification.
*/
export class MPNetForTokenClassification extends MPNetPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* MPNetForQuestionAnswering is a class representing a MPNet model for question answering.
*/
export class MPNetForQuestionAnswering extends MPNetPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class SqueezeBertPreTrainedModel extends PreTrainedModel {
}
export class SqueezeBertModel extends SqueezeBertPreTrainedModel {
}
export class SqueezeBertForMaskedLM extends SqueezeBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} returned object
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
export class SqueezeBertForSequenceClassification extends SqueezeBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} returned object
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
export class SqueezeBertForQuestionAnswering extends SqueezeBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} returned object
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class AlbertPreTrainedModel extends PreTrainedModel {
}
export class AlbertModel extends AlbertPreTrainedModel {
}
export class AlbertForSequenceClassification extends AlbertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} returned object
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
export class AlbertForQuestionAnswering extends AlbertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} returned object
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class AlbertForMaskedLM extends AlbertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} returned object
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
export class T5PreTrainedModel extends PreTrainedModel {
}
export class T5Model extends T5PreTrainedModel {
}
/**
* T5Model is a class representing a T5 model for conditional generation.
*/
export class T5ForConditionalGeneration extends T5PreTrainedModel {
/**
* Creates a new instance of the `T5ForConditionalGeneration` class.
* @param {Object} config The model configuration.
* @param {any} session session for the model.
* @param {any} decoder_merged_session session for the decoder.
* @param {GenerationConfig} generation_config The generation configuration.
*/
constructor(config: any, session: any, decoder_merged_session: any, generation_config: new (kwargs?: import("./utils/generation.js").GenerationConfigType) => import("./utils/generation.js").GenerationConfigType);
decoder_merged_session: any;
generation_config: new (kwargs?: import("./utils/generation.js").GenerationConfigType) => import("./utils/generation.js").GenerationConfigType;
num_decoder_layers: any;
num_decoder_heads: any;
decoder_dim_kv: any;
num_encoder_layers: any;
num_encoder_heads: any;
encoder_dim_kv: any;
}
/**
* An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.
*/
export class LongT5PreTrainedModel extends PreTrainedModel {
}
/**
* The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top.
*/
export class LongT5Model extends LongT5PreTrainedModel {
}
/**
* LONGT5 Model with a `language modeling` head on top.
*/
export class LongT5ForConditionalGeneration extends LongT5PreTrainedModel {
/**
* Creates a new instance of the `LongT5ForConditionalGeneration` class.
* @param {Object} config The model configuration.
* @param {any} session session for the model.
* @param {any} decoder_merged_session session for the decoder.
* @param {GenerationConfig} generation_config The generation configuration.
*/
constructor(config: any, session: any, decoder_merged_session: any, generation_config: new (kwargs?: import("./utils/generation.js").GenerationConfigType) => import("./utils/generation.js").GenerationConfigType);
decoder_merged_session: any;
generation_config: new (kwargs?: import("./utils/generation.js").GenerationConfigType) => import("./utils/generation.js").GenerationConfigType;
num_decoder_layers: any;
num_decoder_heads: any;
decoder_dim_kv: any;
num_encoder_layers: any;
num_encoder_heads: any;
encoder_dim_kv: any;
}
export class MT5PreTrainedModel extends PreTrainedModel {
}
export class MT5Model extends MT5PreTrainedModel {
}
/**
* A class representing a conditional sequence-to-sequence model based on the MT5 architecture.
*/
export class MT5ForConditionalGeneration extends MT5PreTrainedModel {
/**
* Creates a new instance of the `MT5ForConditionalGeneration` class.
* @param {any} config The model configuration.
* @param {any} session The ONNX session containing the encoder weights.
* @param {any} decoder_merged_session The ONNX session containing the merged decoder weights.
* @param {GenerationConfig} generation_config The generation configuration.
*/
constructor(config: any, session: any, decoder_merged_session: any, generation_config: new (kwargs?: import("./utils/generation.js").GenerationConfigType) => import("./utils/generation.js").GenerationConfigType);
decoder_merged_session: any;
generation_config: new (kwargs?: import("./utils/generation.js").GenerationConfigType) => import("./utils/generation.js").GenerationConfigType;
num_decoder_layers: any;
num_decoder_heads: any;
decoder_dim_kv: any;
num_encoder_layers: any;
num_encoder_heads: any;
encoder_dim_kv: any;
}
export class BartPretrainedModel extends PreTrainedModel {
}
/**
* The bare BART Model outputting raw hidden-states without any specific head on top.
*/
export class BartModel extends BartPretrainedModel {
}
/**
* The BART Model with a language modeling head. Can be used for summarization.
*/
export class BartForConditionalGeneration extends BartPretrainedModel {
/**
* Creates a new instance of the `BartForConditionalGeneration` class.
* @param {Object} config The configuration object for the Bart model.
* @param {Object} session The ONNX session used to execute the model.
* @param {Object} decoder_merged_session The ONNX session used to execute the decoder.
* @param {Object} generation_config The generation configuration object.
*/
constructor(config: any, session: any, decoder_merged_session: any, generation_config: any);
decoder_merged_session: any;
generation_config: any;
num_decoder_layers: any;
num_decoder_heads: any;
decoder_dim_kv: number;
num_encoder_layers: any;
num_encoder_heads: any;
encoder_dim_kv: number;
}
/**
* Bart model with a sequence classification/head on top (a linear layer on top of the pooled output)
*/
export class BartForSequenceClassification extends BartPretrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
export class MBartPreTrainedModel extends PreTrainedModel {
}
/**
* The bare MBART Model outputting raw hidden-states without any specific head on top.
*/
export class MBartModel extends MBartPreTrainedModel {
}
/**
* The MBART Model with a language modeling head. Can be used for summarization, after fine-tuning the pretrained models.
*/
export class MBartForConditionalGeneration extends MBartPreTrainedModel {
/**
* Creates a new instance of the `MBartForConditionalGeneration` class.
* @param {Object} config The configuration object for the Bart model.
* @param {Object} session The ONNX session used to execute the model.
* @param {Object} decoder_merged_session The ONNX session used to execute the decoder.
* @param {Object} generation_config The generation configuration object.
*/
constructor(config: any, session: any, decoder_merged_session: any, generation_config: any);
decoder_merged_session: any;
generation_config: any;
num_decoder_layers: any;
num_decoder_heads: any;
decoder_dim_kv: number;
num_encoder_layers: any;
num_encoder_heads: any;
encoder_dim_kv: number;
}
/**
* MBart model with a sequence classification/head on top (a linear layer on top of the pooled output).
*/
export class MBartForSequenceClassification extends MBartPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
export class MBartForCausalLM extends MBartPreTrainedModel {
/**
* Creates a new instance of the `MBartForCausalLM` class.
* @param {Object} config Configuration object for the model.
* @param {Object} decoder_merged_session ONNX Session object for the decoder.
* @param {Object} generation_config Configuration object for the generation process.
*/
constructor(config: any, decoder_merged_session: any, generation_config: any);
generation_config: any;
num_decoder_layers: any;
num_decoder_heads: any;
decoder_dim_kv: number;
num_encoder_layers: any;
num_encoder_heads: any;
encoder_dim_kv: number;
}
export class BlenderbotPreTrainedModel extends PreTrainedModel {
}
/**
* The bare Blenderbot Model outputting raw hidden-states without any specific head on top.
*/
export class BlenderbotModel extends BlenderbotPreTrainedModel {
}
/**
* The Blenderbot Model with a language modeling head. Can be used for summarization.
*/
export class BlenderbotForConditionalGeneration extends BlenderbotPreTrainedModel {
/**
* Creates a new instance of the `BlenderbotForConditionalGeneration` class.
* @param {any} config The model configuration.
* @param {any} session The ONNX session containing the encoder weights.
* @param {any} decoder_merged_session The ONNX session containing the merged decoder weights.
* @param {GenerationConfig} generation_config The generation configuration.
*/
constructor(config: any, session: any, decoder_merged_session: any, generation_config: new (kwargs?: import("./utils/generation.js").GenerationConfigType) => import("./utils/generation.js").GenerationConfigType);
decoder_merged_session: any;
generation_config: new (kwargs?: import("./utils/generation.js").GenerationConfigType) => import("./utils/generation.js").GenerationConfigType;
num_decoder_layers: any;
num_decoder_heads: any;
decoder_dim_kv: number;
num_encoder_layers: any;
num_encoder_heads: any;
encoder_dim_kv: number;
}
export class BlenderbotSmallPreTrainedModel extends PreTrainedModel {
}
/**
* The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top.
*/
export class BlenderbotSmallModel extends BlenderbotSmallPreTrainedModel {
}
/**
* The BlenderbotSmall Model with a language modeling head. Can be used for summarization.
*/
export class BlenderbotSmallForConditionalGeneration extends BlenderbotSmallPreTrainedModel {
/**
* Creates a new instance of the `BlenderbotForConditionalGeneration` class.
* @param {any} config The model configuration.
* @param {any} session The ONNX session containing the encoder weights.
* @param {any} decoder_merged_session The ONNX session containing the merged decoder weights.
* @param {GenerationConfig} generation_config The generation configuration.
*/
constructor(config: any, session: any, decoder_merged_session: any, generation_config: new (kwargs?: import("./utils/generation.js").GenerationConfigType) => import("./utils/generation.js").GenerationConfigType);
decoder_merged_session: any;
generation_config: new (kwargs?: import("./utils/generation.js").GenerationConfigType) => import("./utils/generation.js").GenerationConfigType;
num_decoder_layers: any;
num_decoder_heads: any;
decoder_dim_kv: number;
num_encoder_layers: any;
num_encoder_heads: any;
encoder_dim_kv: number;
}
export class RobertaPreTrainedModel extends PreTrainedModel {
}
export class RobertaModel extends RobertaPreTrainedModel {
}
/**
* RobertaForMaskedLM class for performing masked language modeling on Roberta models.
*/
export class RobertaForMaskedLM extends RobertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} returned object
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* RobertaForSequenceClassification class for performing sequence classification on Roberta models.
*/
export class RobertaForSequenceClassification extends RobertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} returned object
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* RobertaForTokenClassification class for performing token classification on Roberta models.
*/
export class RobertaForTokenClassification extends RobertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* RobertaForQuestionAnswering class for performing question answering on Roberta models.
*/
export class RobertaForQuestionAnswering extends RobertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} returned object
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
/**
* An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.
*/
export class XLMPreTrainedModel extends PreTrainedModel {
}
/**
* The bare XLM Model transformer outputting raw hidden-states without any specific head on top.
*/
export class XLMModel extends XLMPreTrainedModel {
}
/**
* The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).
*/
export class XLMWithLMHeadModel extends XLMPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} returned object
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class XLMForSequenceClassification extends XLMPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} returned object
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* XLM Model with a token classification head on top (a linear layer on top of the hidden-states output)
*/
export class XLMForTokenClassification extends XLMPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* XLM Model with a span classification head on top for extractive question-answering tasks
*/
export class XLMForQuestionAnswering extends XLMPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} returned object
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class XLMRobertaPreTrainedModel extends PreTrainedModel {
}
export class XLMRobertaModel extends XLMRobertaPreTrainedModel {
}
/**
* XLMRobertaForMaskedLM class for performing masked language modeling on XLMRoberta models.
*/
export class XLMRobertaForMaskedLM extends XLMRobertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_