palm-api
Version:
A PaLM API for JS & TS
293 lines • 10.2 kB
JavaScript
var __classPrivateFieldSet = (this && this.__classPrivateFieldSet) || function (receiver, state, value, kind, f) {
if (kind === "m") throw new TypeError("Private method is not writable");
if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a setter");
if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot write private member to an object whose class did not declare it");
return (kind === "a" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;
};
var __classPrivateFieldGet = (this && this.__classPrivateFieldGet) || function (receiver, state, kind, f) {
if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a getter");
if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot read private member from an object whose class did not declare it");
return kind === "m" ? f : kind === "a" ? f.call(receiver) : f ? f.value : state.get(receiver);
};
var _PaLM_fetch;
/**
* PaLM API.
*
* @class PaLM
* @typedef {PaLM}
*/
class PaLM {
/**
* @constructor
* @public
* @param {string} key
* @param {{ fetch?: typeof fetch }} [rawConfig={}]
*/
constructor(key, rawConfig = {}) {
/**
* Fetch for requests.
*
* @type {typeof fetch}
*/
_PaLM_fetch.set(this, void 0);
let defaultFetch;
try {
defaultFetch = fetch;
}
catch { }
const config = this.parseConfig({ fetch: defaultFetch }, rawConfig);
if (!config.fetch) {
throw new Error("Fetch was not found in environment, and no polyfill was provided. Please install a polyfill, and put it in the `fetch` property of the PaLM configuration.");
}
__classPrivateFieldSet(this, _PaLM_fetch, config.fetch, "f");
this.key = key;
}
/**
* Parses a configuration and merges it with the defaults.
*
* @internal
* @private
* @template {{}} T
* @param {T} defaults
* @param {Partial<T>} [raw={}]
* @returns {T}
*/
parseConfig(defaults, raw = {}) {
const extras = Object.keys(raw).filter((item) => !Object.keys(defaults).includes(item));
if (extras.length) {
throw new Error(`These following configurations are not available on this function: ${extras.join(", ")}`);
}
return { ...defaults, ...raw };
}
/**
* Executes a query to the Google PaLM API.
*
* @internal
* @private
* @async
* @template {keyof QueryType} K
* @param {string} model
* @param {K} command
* @param {object} body
* @returns {Promise<QueryType[K]>}
*/
async query(model, command, body) {
const opts = {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(body),
};
const response = await __classPrivateFieldGet(this, _PaLM_fetch, "f").call(this, `https://generativelanguage.googleapis.com/v1beta2/models/${model}:${command}?key=${this.key}`, opts);
const json = await response.json();
if (!response.ok) {
throw new Error(json.error.message);
}
return json;
}
/**
* Uses the `generateText` capable models to let PaLM generate text.
*
* @public
* @async
* @template {Format} [F='markdown'] response format.
* @param {string} message
* @param {Partial<GenerateTextConfig<F>>} [rawConfig={}]
* @returns {Promise<GeneratedTextResponseFormat[F]>}
*/
async generateText(message, rawConfig = {}) {
const config = this.parseConfig({
candidate_count: 1,
temperature: 0,
top_p: 0.95,
top_k: 40,
model: "text-bison-001",
format: PaLM.FORMATS.MD,
}, rawConfig);
const response = await this.query(config.model, "generateText", {
prompt: { text: message },
candidate_count: config.candidate_count,
temperature: config.temperature,
top_p: config.top_p,
top_k: config.top_k,
});
switch (config.format) {
case PaLM.FORMATS.MD:
return response.candidates[0]
.output;
case PaLM.FORMATS.JSON:
return response;
default:
throw new Error(`${config.format} is not a valid format. Use PaLM.FORMATS.MD or PaLM.FORMATS.JSON.`);
}
}
/**
* Uses the `generateMessage` capable models to provide a high-quality LLM experience, with context, examples, and more.
*
* @public
* @async
* @template {Format} [F='markdown'] response format.
* @param {string} message
* @param {Partial<AskConfig<F>>} [rawConfig={}]
* @returns {Promise<ResponseByFormat<AskResponse>[F]>}
*/
async ask(message, rawConfig = {}) {
const config = this.parseConfig({
candidate_count: 1,
temperature: 0.7,
top_p: 0.95,
top_k: 40,
model: "chat-bison-001",
format: PaLM.FORMATS.MD,
context: "",
examples: [],
}, rawConfig);
const response = await this.query(config.model, "generateMessage", {
prompt: {
context: config.context,
messages: [{ content: message }],
examples: config.examples.map((example) => ({
input: { content: example[0] },
output: { content: example[1] },
})),
},
candidate_count: config.candidate_count,
temperature: config.temperature,
top_p: config.top_p,
top_k: config.top_k,
});
try {
switch (config.format) {
case PaLM.FORMATS.MD:
return response.candidates[0]
.content;
case PaLM.FORMATS.JSON:
return response;
default:
throw new Error(`${config.format} is not a valid format. Use PaLM.FORMATS.MD or PaLM.FORMATS.JSON.`);
}
}
catch {
throw new Error(`Request rejected. Got ${response.filters} instead of response.`);
}
}
/**
* Uses PaLM to embed your text into a float matrix with `embedText` enabled models, that you can use for various complex tasks.
*
* @public
* @async
* @param {string} message
* @param {Partial<EmbedTextConfig>} [rawConfig={}]
* @returns {Promise<number[]>}
*/
async embed(message, rawConfig = {}) {
const config = this.parseConfig({ model: "embedding-gecko-001" }, rawConfig);
const response = await this.query(config.model, "embedText", {
text: message,
});
return response.embedding.value;
}
/**
* Uses `generateMessage` capable models to create a chat interface that's simple, fast, and easy to use.
*
* @public
* @param {Partial<CreateChatConfig>} [rawConfig={}]
* @returns {Chat}
*/
createChat(rawConfig = {}) {
return new Chat(this, rawConfig);
}
}
_PaLM_fetch = new WeakMap();
/**
* Available response formats.
*
* @public
* @static
* @type {{ readonly JSON: "json"; readonly MD: "markdown"; }}
*/
PaLM.FORMATS = {
JSON: "json",
MD: "markdown",
};
/**
* Chat wrapper interface.
*
* @export
* @class Chat
* @typedef {Chat}
*/
class Chat {
/**
* @constructor
* @param {PaLM} PaLM
* @param {Partial<CreateChatConfig>} [rawConfig={}]
*/
constructor(PaLM, rawConfig = {}) {
this.PaLM = PaLM;
this.config = this.PaLM["parseConfig"]({
context: "",
messages: [],
examples: [],
temperature: 0.5,
candidate_count: 1,
top_p: 0.95,
top_k: 40,
model: "chat-bison-001",
max_output_tokens: 1024,
}, rawConfig);
this.messages = this.config.messages;
}
/**
* Same as {@link PaLM.ask()} but remembers previous messages and responses, to enable continued conversations.
*
* @async
* @template {Format} [F='markdown'] response format.
* @param {string} message
* @param {Partial<ChatAskConfig<F>>} [rawConfig={}]
* @returns {Promise<ResponseByFormat<AskResponse>[F]>}
*/
async ask(message, rawConfig = {}) {
const config = {
...this.config,
...this.PaLM["parseConfig"]({ format: PaLM.FORMATS.MD }, rawConfig),
};
const response = await this.PaLM["query"](config.model, "generateMessage", {
prompt: {
context: config.context,
messages: [...this.messages, { content: message }],
examples: config.examples.map((example) => ({
input: { content: example[0] },
output: { content: example[1] },
})),
},
candidate_count: config.candidate_count,
temperature: config.temperature,
top_p: config.top_p,
top_k: config.top_k,
});
if (response.filters) {
throw new Error(`Request rejected. Got ${response.filters} instead of response.`);
}
this.messages.push({ content: message });
this.messages.push({ content: response.candidates[0].content });
switch (config.format) {
case PaLM.FORMATS.MD:
return response.candidates[0]
.content;
case PaLM.FORMATS.JSON:
return response;
default:
throw new Error(`${config.format} is not a valid format. Use PaLM.FORMATS.MD or PaLM.FORMATS.JSON.`);
}
}
/**
* Exports the message hystory.
*
* @returns {Message[]}
*/
export() {
return this.messages;
}
}
export default PaLM;
//# sourceMappingURL=index.js.map