transformers-fork
Version:
State-of-the-art Machine Learning for the web. Run 🤗 Transformers directly in your browser, with no need for a server!
1,207 lines • 155 kB
TypeScript
declare const PreTrainedModel_base: new () => {
(...args: any[]): any;
_call(...args: any[]): any;
};
/**
* A base class for pre-trained models that provides the model configuration and an ONNX session.
*/
export class PreTrainedModel extends PreTrainedModel_base {
/**
* Instantiate one of the model classes of the library from a pretrained model.
*
* The model class to instantiate is selected based on the `model_type` property of the config object
* (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible)
*
* @param {string} pretrained_model_name_or_path The name or path of the pretrained model. Can be either:
* - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
* Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
* user or organization name, like `dbmdz/bert-base-german-cased`.
* - A path to a *directory* containing model weights, e.g., `./my_model_directory/`.
* @param {import('./utils/hub.js').PretrainedModelOptions} options Additional options for loading the model.
*
* @returns {Promise<PreTrainedModel>} A new instance of the `PreTrainedModel` class.
*/
static from_pretrained(pretrained_model_name_or_path: string, { progress_callback, config, cache_dir, local_files_only, revision, model_file_name, subfolder, device, dtype, use_external_data_format, session_options, }?: import("./utils/hub.js").PretrainedModelOptions): Promise<PreTrainedModel>;
/**
* Creates a new instance of the `PreTrainedModel` class.
* @param {import('./configs.js').PretrainedConfig} config The model configuration.
* @param {Record<string, any>} sessions The inference sessions for the model.
* @param {Record<string, Object>} configs Additional configuration files (e.g., generation_config.json).
*/
constructor(config: import("./configs.js").PretrainedConfig, sessions: Record<string, any>, configs: Record<string, any>);
main_input_name: string;
forward_params: string[];
config: import("./configs.js").PretrainedConfig;
sessions: Record<string, any>;
configs: Record<string, any>;
can_generate: boolean;
_forward: typeof decoderForward;
_prepare_inputs_for_generation: typeof image_text_to_text_prepare_inputs_for_generation;
/** @type {import('./configs.js').TransformersJSConfig} */
custom_config: import("./configs.js").TransformersJSConfig;
/**
* Disposes of all the ONNX sessions that were created during inference.
* @returns {Promise<unknown[]>} An array of promises, one for each ONNX session that is being disposed.
* @todo Use https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/FinalizationRegistry
*/
dispose(): Promise<unknown[]>;
/**
* Runs the model with the provided inputs
* @param {Object} model_inputs Object containing input tensors
* @returns {Promise<Object>} Object containing output tensors
*/
_call(model_inputs: any): Promise<any>;
/**
* Forward method for a pretrained model. If not overridden by a subclass, the correct forward method
* will be chosen based on the model type.
* @param {Object} model_inputs The input data to the model in the format specified in the ONNX model.
* @returns {Promise<Object>} The output data from the model in the format specified in the ONNX model.
* @throws {Error} This method must be implemented in subclasses.
*/
forward(model_inputs: any): Promise<any>;
/**
* Get the model's generation config, if it exists.
* @returns {GenerationConfig|null} The model's generation config if it exists, otherwise `null`.
*/
get generation_config(): GenerationConfig | null;
/**
* This function returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsWarper`]
* instances used for multinomial sampling.
* @param {GenerationConfig} generation_config The generation config.
* @returns {LogitsProcessorList} generation_config
*/
_get_logits_warper(generation_config: GenerationConfig): LogitsProcessorList;
/**
* @param {GenerationConfig} generation_config
* @param {number} input_ids_seq_length The starting sequence length for the input ids.
* @returns {LogitsProcessorList}
* @private
*/
private _get_logits_processor;
/**
* This function merges multiple generation configs together to form a final generation config to be used by the model for text generation.
* It first creates an empty `GenerationConfig` object, then it applies the model's own `generation_config` property to it. Finally, if a `generation_config` object was passed in the arguments, it overwrites the corresponding properties in the final config with those of the passed config object.
* @param {GenerationConfig|null} generation_config A `GenerationConfig` object containing generation parameters.
* @param {Object} kwargs Additional generation parameters to be used in place of those in the `generation_config` object.
* @returns {GenerationConfig} The final generation config object to be used by the model for text generation.
*/
_prepare_generation_config(generation_config: GenerationConfig | null, kwargs: any, cls?: typeof GenerationConfig): GenerationConfig;
/**
*
* @param {GenerationConfig} generation_config
* @param {StoppingCriteriaList} [stopping_criteria=null]
*/
_get_stopping_criteria(generation_config: GenerationConfig, stopping_criteria?: StoppingCriteriaList): StoppingCriteriaList;
/**
* Confirms that the model class is compatible with generation.
* If not, raises an exception that points to the right class to use.
*/
_validate_model_class(): void;
prepare_inputs_for_generation(...args: any[]): any;
/**
*
* @param {Object} inputs
* @param {bigint[][]} inputs.generated_input_ids
* @param {Object} inputs.outputs
* @param {Object} inputs.model_inputs
* @param {boolean} inputs.is_encoder_decoder
* @returns {Object} The updated model inputs for the next generation iteration.
*/
_update_model_kwargs_for_generation({ generated_input_ids, outputs, model_inputs, is_encoder_decoder }: {
generated_input_ids: bigint[][];
outputs: any;
model_inputs: any;
is_encoder_decoder: boolean;
}): any;
/**
* This function extracts the model-specific `inputs` for generation.
* @param {Object} params
* @param {Tensor} [params.inputs=null]
* @param {number} [params.bos_token_id=null]
* @param {Record<string, Tensor|number[]>} [params.model_kwargs]
* @returns {{inputs_tensor: Tensor, model_inputs: Record<string, Tensor>, model_input_name: string}} The model-specific inputs for generation.
*/
_prepare_model_inputs({ inputs, bos_token_id, model_kwargs }: {
inputs?: Tensor;
bos_token_id?: number;
model_kwargs?: Record<string, Tensor | number[]>;
}): {
inputs_tensor: Tensor;
model_inputs: Record<string, Tensor>;
model_input_name: string;
};
_prepare_encoder_decoder_kwargs_for_generation({ inputs_tensor, model_inputs, model_input_name, generation_config }: {
inputs_tensor: any;
model_inputs: any;
model_input_name: any;
generation_config: any;
}): Promise<any>;
/**
* Prepares `decoder_input_ids` for generation with encoder-decoder models
* @param {*} param0
*/
_prepare_decoder_input_ids_for_generation({ batch_size, model_input_name, model_kwargs, decoder_start_token_id, bos_token_id, generation_config }: any): {
input_ids: any;
model_inputs: any;
};
/**
* Generates sequences of token ids for models with a language modeling head.
* @param {import('./generation/parameters.js').GenerationFunctionParameters} options
* @returns {Promise<ModelOutput|Tensor>} The output of the model, which can contain the generated token ids, attentions, and scores.
*/
generate({ inputs, generation_config, logits_processor, stopping_criteria, streamer, ...kwargs }: any): Promise<ModelOutput | Tensor>;
/**
* Returns an object containing past key values from the given decoder results object.
*
* @param {Object} decoderResults The decoder results object.
* @param {Object} pastKeyValues The previous past key values.
* @returns {Object} An object containing past key values.
*/
getPastKeyValues(decoderResults: any, pastKeyValues: any, disposeEncoderPKVs?: boolean): any;
/**
* Returns an object containing attentions from the given model output object.
*
* @param {Object} model_output The output of the model.
* @returns {{cross_attentions?: Tensor[]}} An object containing attentions.
*/
getAttentions(model_output: any): {
cross_attentions?: Tensor[];
};
/**
* Adds past key values to the decoder feeds object. If pastKeyValues is null, creates new tensors for past key values.
*
* @param {Object} decoderFeeds The decoder feeds object to add past key values to.
* @param {Object} pastKeyValues An object containing past key values.
*/
addPastKeyValues(decoderFeeds: any, pastKeyValues: any): void;
encode_image({ pixel_values }: {
pixel_values: any;
}): Promise<any>;
encode_text({ input_ids }: {
input_ids: any;
}): Promise<any>;
}
export class ModelOutput {
}
/**
* Base class for model's outputs, with potential hidden states and attentions.
*/
export class BaseModelOutput extends ModelOutput {
/**
* @param {Object} output The output of the model.
* @param {Tensor} output.last_hidden_state Sequence of hidden-states at the output of the last layer of the model.
* @param {Tensor} [output.hidden_states] Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
* @param {Tensor} [output.attentions] Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
*/
constructor({ last_hidden_state, hidden_states, attentions }: {
last_hidden_state: Tensor;
hidden_states?: Tensor;
attentions?: Tensor;
});
last_hidden_state: Tensor;
hidden_states: Tensor;
attentions: Tensor;
}
export class BertPreTrainedModel extends PreTrainedModel {
}
export class BertModel extends BertPreTrainedModel {
}
/**
* BertForMaskedLM is a class representing a BERT model for masked language modeling.
*/
export class BertForMaskedLM extends BertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* BertForSequenceClassification is a class representing a BERT model for sequence classification.
*/
export class BertForSequenceClassification extends BertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* BertForTokenClassification is a class representing a BERT model for token classification.
*/
export class BertForTokenClassification extends BertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* BertForQuestionAnswering is a class representing a BERT model for question answering.
*/
export class BertForQuestionAnswering extends BertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class NomicBertPreTrainedModel extends PreTrainedModel {
}
export class NomicBertModel extends NomicBertPreTrainedModel {
}
export class RoFormerPreTrainedModel extends PreTrainedModel {
}
/**
* The bare RoFormer Model transformer outputting raw hidden-states without any specific head on top.
*/
export class RoFormerModel extends RoFormerPreTrainedModel {
}
/**
* RoFormer Model with a `language modeling` head on top.
*/
export class RoFormerForMaskedLM extends RoFormerPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* RoFormer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class RoFormerForSequenceClassification extends RoFormerPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output)
* e.g. for Named-Entity-Recognition (NER) tasks.
*/
export class RoFormerForTokenClassification extends RoFormerPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD
* (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
*/
export class RoFormerForQuestionAnswering extends RoFormerPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class ConvBertPreTrainedModel extends PreTrainedModel {
}
/**
* The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.
*/
export class ConvBertModel extends ConvBertPreTrainedModel {
}
/**
* ConvBERT Model with a language modeling head on top.
*/
export class ConvBertForMaskedLM extends ConvBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class ConvBertForSequenceClassification extends ConvBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output)
* e.g. for Named-Entity-Recognition (NER) tasks.
*/
export class ConvBertForTokenClassification extends ConvBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD
* (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`)
*/
export class ConvBertForQuestionAnswering extends ConvBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class ElectraPreTrainedModel extends PreTrainedModel {
}
/**
* The bare Electra Model transformer outputting raw hidden-states without any specific head on top.
* Identical to the BERT model except that it uses an additional linear layer between the embedding
* layer and the encoder if the hidden size and embedding size are different.
*/
export class ElectraModel extends ElectraPreTrainedModel {
}
/**
* Electra model with a language modeling head on top.
*/
export class ElectraForMaskedLM extends ElectraPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class ElectraForSequenceClassification extends ElectraPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* Electra model with a token classification head on top.
*/
export class ElectraForTokenClassification extends ElectraPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* LECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD
* (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
*/
export class ElectraForQuestionAnswering extends ElectraPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class CamembertPreTrainedModel extends PreTrainedModel {
}
/**
* The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.
*/
export class CamembertModel extends CamembertPreTrainedModel {
}
/**
* CamemBERT Model with a `language modeling` head on top.
*/
export class CamembertForMaskedLM extends CamembertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* CamemBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks.
*/
export class CamembertForSequenceClassification extends CamembertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* CamemBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.
*/
export class CamembertForTokenClassification extends CamembertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* CamemBERT Model with a span classification head on top for extractive question-answering tasks
*/
export class CamembertForQuestionAnswering extends CamembertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class DebertaPreTrainedModel extends PreTrainedModel {
}
/**
* The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.
*/
export class DebertaModel extends DebertaPreTrainedModel {
}
/**
* DeBERTa Model with a `language modeling` head on top.
*/
export class DebertaForMaskedLM extends DebertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class DebertaForSequenceClassification extends DebertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.
*/
export class DebertaForTokenClassification extends DebertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
* layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
*/
export class DebertaForQuestionAnswering extends DebertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class DebertaV2PreTrainedModel extends PreTrainedModel {
}
/**
* The bare DeBERTa-V2 Model transformer outputting raw hidden-states without any specific head on top.
*/
export class DebertaV2Model extends DebertaV2PreTrainedModel {
}
/**
* DeBERTa-V2 Model with a `language modeling` head on top.
*/
export class DebertaV2ForMaskedLM extends DebertaV2PreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* DeBERTa-V2 Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class DebertaV2ForSequenceClassification extends DebertaV2PreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* DeBERTa-V2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks.
*/
export class DebertaV2ForTokenClassification extends DebertaV2PreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* DeBERTa-V2 Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
* layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
*/
export class DebertaV2ForQuestionAnswering extends DebertaV2PreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class DistilBertPreTrainedModel extends PreTrainedModel {
}
export class DistilBertModel extends DistilBertPreTrainedModel {
}
/**
* DistilBertForSequenceClassification is a class representing a DistilBERT model for sequence classification.
*/
export class DistilBertForSequenceClassification extends DistilBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* DistilBertForTokenClassification is a class representing a DistilBERT model for token classification.
*/
export class DistilBertForTokenClassification extends DistilBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* DistilBertForQuestionAnswering is a class representing a DistilBERT model for question answering.
*/
export class DistilBertForQuestionAnswering extends DistilBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
/**
* DistilBertForMaskedLM is a class representing a DistilBERT model for masking task.
*/
export class DistilBertForMaskedLM extends DistilBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} returned object
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
export class EsmPreTrainedModel extends PreTrainedModel {
}
/**
* The bare ESM Model transformer outputting raw hidden-states without any specific head on top.
*/
export class EsmModel extends EsmPreTrainedModel {
}
/**
* ESM Model with a `language modeling` head on top.
*/
export class EsmForMaskedLM extends EsmPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class EsmForSequenceClassification extends EsmPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* ESM Model with a token classification head on top (a linear layer on top of the hidden-states output)
* e.g. for Named-Entity-Recognition (NER) tasks.
*/
export class EsmForTokenClassification extends EsmPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
export class MobileBertPreTrainedModel extends PreTrainedModel {
}
export class MobileBertModel extends MobileBertPreTrainedModel {
}
/**
* MobileBertForMaskedLM is a class representing a MobileBERT model for masking task.
*/
export class MobileBertForMaskedLM extends MobileBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} returned object
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class MobileBertForSequenceClassification extends MobileBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} returned object
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* MobileBert Model with a span classification head on top for extractive question-answering tasks
*/
export class MobileBertForQuestionAnswering extends MobileBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} returned object
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class MPNetPreTrainedModel extends PreTrainedModel {
}
/**
* The bare MPNet Model transformer outputting raw hidden-states without any specific head on top.
*/
export class MPNetModel extends MPNetPreTrainedModel {
}
/**
* MPNetForMaskedLM is a class representing a MPNet model for masked language modeling.
*/
export class MPNetForMaskedLM extends MPNetPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} An object containing the model's output logits for masked language modeling.
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* MPNetForSequenceClassification is a class representing a MPNet model for sequence classification.
*/
export class MPNetForSequenceClassification extends MPNetPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* MPNetForTokenClassification is a class representing a MPNet model for token classification.
*/
export class MPNetForTokenClassification extends MPNetPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* MPNetForQuestionAnswering is a class representing a MPNet model for question answering.
*/
export class MPNetForQuestionAnswering extends MPNetPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} An object containing the model's output logits for question answering.
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class SqueezeBertPreTrainedModel extends PreTrainedModel {
}
export class SqueezeBertModel extends SqueezeBertPreTrainedModel {
}
export class SqueezeBertForMaskedLM extends SqueezeBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} returned object
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
export class SqueezeBertForSequenceClassification extends SqueezeBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} returned object
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
export class SqueezeBertForQuestionAnswering extends SqueezeBertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} returned object
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class AlbertPreTrainedModel extends PreTrainedModel {
}
export class AlbertModel extends AlbertPreTrainedModel {
}
export class AlbertForSequenceClassification extends AlbertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} returned object
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
export class AlbertForQuestionAnswering extends AlbertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} returned object
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class AlbertForMaskedLM extends AlbertPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} returned object
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
export class T5PreTrainedModel extends PreTrainedModel {
}
export class T5Model extends T5PreTrainedModel {
}
/**
* T5Model is a class representing a T5 model for conditional generation.
*/
export class T5ForConditionalGeneration extends T5PreTrainedModel {
}
/**
* An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.
*/
export class LongT5PreTrainedModel extends PreTrainedModel {
}
/**
* The bare LONGT5 Model transformer outputting raw hidden-states without any specific head on top.
*/
export class LongT5Model extends LongT5PreTrainedModel {
}
/**
* LONGT5 Model with a `language modeling` head on top.
*/
export class LongT5ForConditionalGeneration extends LongT5PreTrainedModel {
}
export class MT5PreTrainedModel extends PreTrainedModel {
}
export class MT5Model extends MT5PreTrainedModel {
}
/**
* A class representing a conditional sequence-to-sequence model based on the MT5 architecture.
*/
export class MT5ForConditionalGeneration extends MT5PreTrainedModel {
}
export class BartPretrainedModel extends PreTrainedModel {
}
/**
* The bare BART Model outputting raw hidden-states without any specific head on top.
*/
export class BartModel extends BartPretrainedModel {
}
/**
* The BART Model with a language modeling head. Can be used for summarization.
*/
export class BartForConditionalGeneration extends BartPretrainedModel {
}
/**
* Bart model with a sequence classification/head on top (a linear layer on top of the pooled output)
*/
export class BartForSequenceClassification extends BartPretrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
export class MBartPreTrainedModel extends PreTrainedModel {
}
/**
* The bare MBART Model outputting raw hidden-states without any specific head on top.
*/
export class MBartModel extends MBartPreTrainedModel {
}
/**
* The MBART Model with a language modeling head. Can be used for summarization, after fine-tuning the pretrained models.
*/
export class MBartForConditionalGeneration extends MBartPreTrainedModel {
}
/**
* MBart model with a sequence classification/head on top (a linear layer on top of the pooled output).
*/
export class MBartForSequenceClassification extends MBartPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} An object containing the model's output logits for sequence classification.
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
export class MBartForCausalLM extends MBartPreTrainedModel {
}
export class BlenderbotPreTrainedModel extends PreTrainedModel {
}
/**
* The bare Blenderbot Model outputting raw hidden-states without any specific head on top.
*/
export class BlenderbotModel extends BlenderbotPreTrainedModel {
}
/**
* The Blenderbot Model with a language modeling head. Can be used for summarization.
*/
export class BlenderbotForConditionalGeneration extends BlenderbotPreTrainedModel {
}
export class BlenderbotSmallPreTrainedModel extends PreTrainedModel {
}
/**
* The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top.
*/
export class BlenderbotSmallModel extends BlenderbotSmallPreTrainedModel {
}
/**
* The BlenderbotSmall Model with a language modeling head. Can be used for summarization.
*/
export class BlenderbotSmallForConditionalGeneration extends BlenderbotSmallPreTrainedModel {
}
export class RobertaPreTrainedModel extends PreTrainedModel {
}
export class RobertaModel extends RobertaPreTrainedModel {
}
/**
* RobertaForMaskedLM class for performing masked language modeling on Roberta models.
*/
export class RobertaForMaskedLM extends RobertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} returned object
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* RobertaForSequenceClassification class for performing sequence classification on Roberta models.
*/
export class RobertaForSequenceClassification extends RobertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} returned object
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* RobertaForTokenClassification class for performing token classification on Roberta models.
*/
export class RobertaForTokenClassification extends RobertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* RobertaForQuestionAnswering class for performing question answering on Roberta models.
*/
export class RobertaForQuestionAnswering extends RobertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} returned object
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
/**
* An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models.
*/
export class XLMPreTrainedModel extends PreTrainedModel {
}
/**
* The bare XLM Model transformer outputting raw hidden-states without any specific head on top.
*/
export class XLMModel extends XLMPreTrainedModel {
}
/**
* The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings).
*/
export class XLMWithLMHeadModel extends XLMPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} returned object
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output)
*/
export class XLMForSequenceClassification extends XLMPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} returned object
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* XLM Model with a token classification head on top (a linear layer on top of the hidden-states output)
*/
export class XLMForTokenClassification extends XLMPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* XLM Model with a span classification head on top for extractive question-answering tasks
*/
export class XLMForQuestionAnswering extends XLMPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} returned object
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class XLMRobertaPreTrainedModel extends PreTrainedModel {
}
export class XLMRobertaModel extends XLMRobertaPreTrainedModel {
}
/**
* XLMRobertaForMaskedLM class for performing masked language modeling on XLMRoberta models.
*/
export class XLMRobertaForMaskedLM extends XLMRobertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<MaskedLMOutput>} returned object
*/
_call(model_inputs: any): Promise<MaskedLMOutput>;
}
/**
* XLMRobertaForSequenceClassification class for performing sequence classification on XLMRoberta models.
*/
export class XLMRobertaForSequenceClassification extends XLMRobertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<SequenceClassifierOutput>} returned object
*/
_call(model_inputs: any): Promise<SequenceClassifierOutput>;
}
/**
* XLMRobertaForTokenClassification class for performing token classification on XLMRoberta models.
*/
export class XLMRobertaForTokenClassification extends XLMRobertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<TokenClassifierOutput>} An object containing the model's output logits for token classification.
*/
_call(model_inputs: any): Promise<TokenClassifierOutput>;
}
/**
* XLMRobertaForQuestionAnswering class for performing question answering on XLMRoberta models.
*/
export class XLMRobertaForQuestionAnswering extends XLMRobertaPreTrainedModel {
/**
* Calls the model on new inputs.
*
* @param {Object} model_inputs The inputs to the model.
* @returns {Promise<QuestionAnsweringModelOutput>} returned object
*/
_call(model_inputs: any): Promise<QuestionAnsweringModelOutput>;
}
export class ASTPreTrainedModel extends PreTrainedModel {
}
/**
* The bare AST Model transformer outputting raw hidden-states without any specific head on top.
*/
export class ASTModel extends ASTPreTrainedModel {
}
/**
* Audio Spectrogram Transformer model with an audio classification head on top
* (a linear layer on top of the pooled output) e.g. for datasets like AudioSet, Speech Commands v2.
*/
export class ASTForAudioClassification extends ASTPreTrainedModel {
}
export class WhisperPreTrainedModel extends PreTrainedModel {
requires_attention_mask: boolean;
}
/**
* WhisperModel class for training Whisper models without a language model head.
*/
export class WhisperModel extends WhisperPreTrainedModel {
}
/**
* WhisperForConditionalGeneration class for generating conditional outputs from Whisper models.
*/
export class WhisperForConditionalGeneration extends WhisperPreTrainedModel {
_prepare_generation_config(generation_config: any, kwargs: any): WhisperGenerationConfig;
/**
*
* @param {WhisperGenerationConfig} generation_config
*/
_retrieve_init_tokens(generation_config: WhisperGenerationConfig): number[];
/**
* Calculates token-level timestamps using the encoder-decoder cross-attentions and
* dynamic time-warping (DTW) to map each output token to a position in the input audio.
* If `num_frames` is specified, the encoder-decoder cross-attentions will be cropped before applying DTW.
* @param {Object} generate_outputs Outputs generated by the model
* @param {Tensor[][]} generate_outputs.cross_attentions The cross attentions output by the model
* @param {Tensor} generate_outputs.sequences The sequences output by the model
* @param {number[][]} alignment_heads Alignment heads of the model
* @param {number} [num_frames=null] Number of frames in the input audio.
* @param {number} [time_precision=0.02] Precision of the timestamps in seconds
* @returns {Tensor} tensor containing the timestamps in seconds for each predicted token
*/
_extract_token_timestamps(generate_outputs: {
cross_attentions: Tensor[][];
sequences: Tensor;
}, alignment_heads: number[][], num_frames?: number, time_precision?: number): Tensor;
}
/**
* Vision Encoder-Decoder model based on OpenAI's GPT architecture for image captioning and other vision tasks
*/
export class VisionEncoderDecoderModel extends PreTrainedModel {
}
export class LlavaPreTrainedModel extends PreTrainedModel {
}
/**
* The LLAVA model which consists of a vision backbone and a language model.
*/
export class LlavaForConditionalGeneration extends LlavaPreTrainedModel {
_merge_input_ids_with_image_features({ inputs_embeds, image_features, input_ids, attention_mask, }: {
inputs_embeds: any;
image_features: any;
input_ids: any;
attention_mask: any;
}): {
inputs_embeds: any;
attention_mask: any;
};
}
export class LlavaOnevisionForConditionalGeneration extends LlavaForConditionalGeneration {
}
export class Moondream1ForConditionalGeneration extends LlavaForConditionalGeneration {
}
export class Florence2PreTrainedModel extends PreTrainedModel {
}
export class Florence2ForConditionalGeneration extends Florence2PreTrainedModel {
_merge_input_ids_with_image_features({ inputs_embeds, image_features, input_ids, attention_mask, }: {
inputs_embeds: any;
image_features: any;
input_ids: any;
attention_mask: any;
}): {
inputs_embeds: Tensor;
attention_mask: Tensor;
};
_prepare_inputs_embeds({ input_ids, pixel_values, inputs_embeds, atte