@burncloud/inference
Version:
Typescript client for the Hugging Face Inference Providers and Inference Endpoints
79 lines (78 loc) • 22.8 kB
text/typescript
// Generated file - do not edit directly
export const templates: Record<string, Record<string, Record<string, string>>> = {
"js": {
"fetch": {
"basic": "async function query(data) {\r\n\tconst response = await fetch(\r\n\t\t\"{{ fullUrl }}\",\r\n\t\t{\r\n\t\t\theaders: {\r\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\r\n\t\t\t\t\"Content-Type\": \"application/json\",\r\n{% if billTo %}\r\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\r\n{% endif %}\t\t\t},\r\n\t\t\tmethod: \"POST\",\r\n\t\t\tbody: JSON.stringify(data),\r\n\t\t}\r\n\t);\r\n\tconst result = await response.json();\r\n\treturn result;\r\n}\r\n\r\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\r\n console.log(JSON.stringify(response));\r\n});",
"basicAudio": "async function query(data) {\r\n\tconst response = await fetch(\r\n\t\t\"{{ fullUrl }}\",\r\n\t\t{\r\n\t\t\theaders: {\r\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\r\n\t\t\t\t\"Content-Type\": \"audio/flac\",\r\n{% if billTo %}\r\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\r\n{% endif %}\t\t\t},\r\n\t\t\tmethod: \"POST\",\r\n\t\t\tbody: JSON.stringify(data),\r\n\t\t}\r\n\t);\r\n\tconst result = await response.json();\r\n\treturn result;\r\n}\r\n\r\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\r\n console.log(JSON.stringify(response));\r\n});",
"basicImage": "async function query(data) {\r\n\tconst response = await fetch(\r\n\t\t\"{{ fullUrl }}\",\r\n\t\t{\r\n\t\t\theaders: {\r\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\r\n\t\t\t\t\"Content-Type\": \"image/jpeg\",\r\n{% if billTo %}\r\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\r\n{% endif %}\t\t\t},\r\n\t\t\tmethod: \"POST\",\r\n\t\t\tbody: JSON.stringify(data),\r\n\t\t}\r\n\t);\r\n\tconst result = await response.json();\r\n\treturn result;\r\n}\r\n\r\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\r\n console.log(JSON.stringify(response));\r\n});",
"textToAudio": "{% if model.library_name == \"transformers\" %}\r\nasync function query(data) {\r\n\tconst response = await fetch(\r\n\t\t\"{{ fullUrl }}\",\r\n\t\t{\r\n\t\t\theaders: {\r\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\r\n\t\t\t\t\"Content-Type\": \"application/json\",\r\n{% if billTo %}\r\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\r\n{% endif %}\t\t\t},\r\n\t\t\tmethod: \"POST\",\r\n\t\t\tbody: JSON.stringify(data),\r\n\t\t}\r\n\t);\r\n\tconst result = await response.blob();\r\n return result;\r\n}\r\n\r\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\r\n // Returns a byte object of the Audio wavform. Use it directly!\r\n});\r\n{% else %}\r\nasync function query(data) {\r\n\tconst response = await fetch(\r\n\t\t\"{{ fullUrl }}\",\r\n\t\t{\r\n\t\t\theaders: {\r\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\r\n\t\t\t\t\"Content-Type\": \"application/json\",\r\n\t\t\t},\r\n\t\t\tmethod: \"POST\",\r\n\t\t\tbody: JSON.stringify(data),\r\n\t\t}\r\n\t);\r\n const result = await response.json();\r\n return result;\r\n}\r\n\r\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\r\n console.log(JSON.stringify(response));\r\n});\r\n{% endif %} ",
"textToImage": "async function query(data) {\r\n\tconst response = await fetch(\r\n\t\t\"{{ fullUrl }}\",\r\n\t\t{\r\n\t\t\theaders: {\r\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\r\n\t\t\t\t\"Content-Type\": \"application/json\",\r\n{% if billTo %}\r\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\r\n{% endif %}\t\t\t},\r\n\t\t\tmethod: \"POST\",\r\n\t\t\tbody: JSON.stringify(data),\r\n\t\t}\r\n\t);\r\n\tconst result = await response.blob();\r\n\treturn result;\r\n}\r\n\r\n\r\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\r\n // Use image\r\n});",
"textToSpeech": "{% if model.library_name == \"transformers\" %}\r\nasync function query(data) {\r\n\tconst response = await fetch(\r\n\t\t\"{{ fullUrl }}\",\r\n\t\t{\r\n\t\t\theaders: {\r\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\r\n\t\t\t\t\"Content-Type\": \"application/json\",\r\n{% if billTo %}\r\n\t\t\t\t\"X-HF-Bill-To\": \"{{ billTo }}\",\r\n{% endif %}\t\t\t},\r\n\t\t\tmethod: \"POST\",\r\n\t\t\tbody: JSON.stringify(data),\r\n\t\t}\r\n\t);\r\n\tconst result = await response.blob();\r\n return result;\r\n}\r\n\r\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\r\n // Returns a byte object of the Audio wavform. Use it directly!\r\n});\r\n{% else %}\r\nasync function query(data) {\r\n\tconst response = await fetch(\r\n\t\t\"{{ fullUrl }}\",\r\n\t\t{\r\n\t\t\theaders: {\r\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\r\n\t\t\t\t\"Content-Type\": \"application/json\",\r\n\t\t\t},\r\n\t\t\tmethod: \"POST\",\r\n\t\t\tbody: JSON.stringify(data),\r\n\t\t}\r\n\t);\r\n const result = await response.json();\r\n return result;\r\n}\r\n\r\nquery({ text: {{ inputs.asObj.inputs }} }).then((response) => {\r\n console.log(JSON.stringify(response));\r\n});\r\n{% endif %} ",
"zeroShotClassification": "async function query(data) {\r\n const response = await fetch(\r\n\t\t\"{{ fullUrl }}\",\r\n {\r\n headers: {\r\n\t\t\t\tAuthorization: \"{{ authorizationHeader }}\",\r\n \"Content-Type\": \"application/json\",\r\n{% if billTo %}\r\n \"X-HF-Bill-To\": \"{{ billTo }}\",\r\n{% endif %} },\r\n method: \"POST\",\r\n body: JSON.stringify(data),\r\n }\r\n );\r\n const result = await response.json();\r\n return result;\r\n}\r\n\r\nquery({\r\n inputs: {{ providerInputs.asObj.inputs }},\r\n parameters: { candidate_labels: [\"refund\", \"legal\", \"faq\"] }\r\n}).then((response) => {\r\n console.log(JSON.stringify(response));\r\n});"
},
"huggingface.js": {
"basic": "import { InferenceClient } from \"@huggingface/inference\";\r\n\r\nconst client = new InferenceClient(\"{{ accessToken }}\");\r\n\r\nconst output = await client.{{ methodName }}({\r\n\tmodel: \"{{ model.id }}\",\r\n\tinputs: {{ inputs.asObj.inputs }},\r\n\tprovider: \"{{ provider }}\",\r\n}{% if billTo %}, {\r\n\tbillTo: \"{{ billTo }}\",\r\n}{% endif %});\r\n\r\nconsole.log(output);",
"basicAudio": "import { InferenceClient } from \"@huggingface/inference\";\r\n\r\nconst client = new InferenceClient(\"{{ accessToken }}\");\r\n\r\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\r\n\r\nconst output = await client.{{ methodName }}({\r\n\tdata,\r\n\tmodel: \"{{ model.id }}\",\r\n\tprovider: \"{{ provider }}\",\r\n}{% if billTo %}, {\r\n\tbillTo: \"{{ billTo }}\",\r\n}{% endif %});\r\n\r\nconsole.log(output);",
"basicImage": "import { InferenceClient } from \"@huggingface/inference\";\r\n\r\nconst client = new InferenceClient(\"{{ accessToken }}\");\r\n\r\nconst data = fs.readFileSync({{inputs.asObj.inputs}});\r\n\r\nconst output = await client.{{ methodName }}({\r\n\tdata,\r\n\tmodel: \"{{ model.id }}\",\r\n\tprovider: \"{{ provider }}\",\r\n}{% if billTo %}, {\r\n\tbillTo: \"{{ billTo }}\",\r\n}{% endif %});\r\n\r\nconsole.log(output);",
"conversational": "import { InferenceClient } from \"@huggingface/inference\";\r\n\r\nconst client = new InferenceClient(\"{{ accessToken }}\");\r\n\r\nconst chatCompletion = await client.chatCompletion({\r\n provider: \"{{ provider }}\",\r\n model: \"{{ model.id }}\",\r\n{{ inputs.asTsString }}\r\n}{% if billTo %}, {\r\n billTo: \"{{ billTo }}\",\r\n}{% endif %});\r\n\r\nconsole.log(chatCompletion.choices[0].message);",
"conversationalStream": "import { InferenceClient } from \"@huggingface/inference\";\r\n\r\nconst client = new InferenceClient(\"{{ accessToken }}\");\r\n\r\nlet out = \"\";\r\n\r\nconst stream = client.chatCompletionStream({\r\n provider: \"{{ provider }}\",\r\n model: \"{{ model.id }}\",\r\n{{ inputs.asTsString }}\r\n}{% if billTo %}, {\r\n billTo: \"{{ billTo }}\",\r\n}{% endif %});\r\n\r\nfor await (const chunk of stream) {\r\n\tif (chunk.choices && chunk.choices.length > 0) {\r\n\t\tconst newContent = chunk.choices[0].delta.content;\r\n\t\tout += newContent;\r\n\t\tconsole.log(newContent);\r\n\t}\r\n}",
"textToImage": "import { InferenceClient } from \"@huggingface/inference\";\r\n\r\nconst client = new InferenceClient(\"{{ accessToken }}\");\r\n\r\nconst image = await client.textToImage({\r\n provider: \"{{ provider }}\",\r\n model: \"{{ model.id }}\",\r\n\tinputs: {{ inputs.asObj.inputs }},\r\n\tparameters: { num_inference_steps: 5 },\r\n}{% if billTo %}, {\r\n billTo: \"{{ billTo }}\",\r\n}{% endif %});\r\n/// Use the generated image (it's a Blob)",
"textToSpeech": "import { InferenceClient } from \"@huggingface/inference\";\r\n\r\nconst client = new InferenceClient(\"{{ accessToken }}\");\r\n\r\nconst audio = await client.textToSpeech({\r\n provider: \"{{ provider }}\",\r\n model: \"{{ model.id }}\",\r\n\tinputs: {{ inputs.asObj.inputs }},\r\n}{% if billTo %}, {\r\n billTo: \"{{ billTo }}\",\r\n}{% endif %});\r\n// Use the generated audio (it's a Blob)",
"textToVideo": "import { InferenceClient } from \"@huggingface/inference\";\r\n\r\nconst client = new InferenceClient(\"{{ accessToken }}\");\r\n\r\nconst video = await client.textToVideo({\r\n provider: \"{{ provider }}\",\r\n model: \"{{ model.id }}\",\r\n\tinputs: {{ inputs.asObj.inputs }},\r\n}{% if billTo %}, {\r\n billTo: \"{{ billTo }}\",\r\n}{% endif %});\r\n// Use the generated video (it's a Blob)"
},
"openai": {
"conversational": "import { OpenAI } from \"openai\";\r\n\r\nconst client = new OpenAI({\r\n\tbaseURL: \"{{ baseUrl }}\",\r\n\tapiKey: \"{{ accessToken }}\",\r\n{% if billTo %}\r\n\tdefaultHeaders: {\r\n\t\t\"X-HF-Bill-To\": \"{{ billTo }}\" \r\n\t}\r\n{% endif %}\r\n});\r\n\r\nconst chatCompletion = await client.chat.completions.create({\r\n\tmodel: \"{{ providerModelId }}\",\r\n{{ inputs.asTsString }}\r\n});\r\n\r\nconsole.log(chatCompletion.choices[0].message);",
"conversationalStream": "import { OpenAI } from \"openai\";\r\n\r\nconst client = new OpenAI({\r\n\tbaseURL: \"{{ baseUrl }}\",\r\n\tapiKey: \"{{ accessToken }}\",\r\n{% if billTo %}\r\n defaultHeaders: {\r\n\t\t\"X-HF-Bill-To\": \"{{ billTo }}\" \r\n\t}\r\n{% endif %}\r\n});\r\n\r\nconst stream = await client.chat.completions.create({\r\n model: \"{{ providerModelId }}\",\r\n{{ inputs.asTsString }}\r\n stream: true,\r\n});\r\n\r\nfor await (const chunk of stream) {\r\n process.stdout.write(chunk.choices[0]?.delta?.content || \"\");\r\n}"
}
},
"python": {
"fal_client": {
"textToImage": "{% if provider == \"fal-ai\" %}\r\nimport fal_client\r\n\r\n{% if providerInputs.asObj.loras is defined and providerInputs.asObj.loras != none %}\r\nresult = fal_client.subscribe(\r\n \"{{ providerModelId }}\",\r\n arguments={\r\n \"prompt\": {{ inputs.asObj.inputs }},\r\n \"loras\":{{ providerInputs.asObj.loras | tojson }},\r\n },\r\n)\r\n{% else %}\r\nresult = fal_client.subscribe(\r\n \"{{ providerModelId }}\",\r\n arguments={\r\n \"prompt\": {{ inputs.asObj.inputs }},\r\n },\r\n)\r\n{% endif %} \r\nprint(result)\r\n{% endif %} "
},
"huggingface_hub": {
"basic": "result = client.{{ methodName }}(\r\n {{ inputs.asObj.inputs }},\r\n model=\"{{ model.id }}\",\r\n)",
"basicAudio": "output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model=\"{{ model.id }}\")",
"basicImage": "output = client.{{ methodName }}({{ inputs.asObj.inputs }}, model=\"{{ model.id }}\")",
"conversational": "completion = client.chat.completions.create(\r\n model=\"{{ model.id }}\",\r\n{{ inputs.asPythonString }}\r\n)\r\n\r\nprint(completion.choices[0].message) ",
"conversationalStream": "stream = client.chat.completions.create(\r\n model=\"{{ model.id }}\",\r\n{{ inputs.asPythonString }}\r\n stream=True,\r\n)\r\n\r\nfor chunk in stream:\r\n print(chunk.choices[0].delta.content, end=\"\") ",
"documentQuestionAnswering": "output = client.document_question_answering(\r\n \"{{ inputs.asObj.image }}\",\r\n question=\"{{ inputs.asObj.question }}\",\r\n model=\"{{ model.id }}\",\r\n) ",
"imageToImage": "# output is a PIL.Image object\r\nimage = client.image_to_image(\r\n \"{{ inputs.asObj.inputs }}\",\r\n prompt=\"{{ inputs.asObj.parameters.prompt }}\",\r\n model=\"{{ model.id }}\",\r\n) ",
"importInferenceClient": "from huggingface_hub import InferenceClient\r\n\r\nclient = InferenceClient(\r\n provider=\"{{ provider }}\",\r\n api_key=\"{{ accessToken }}\",\r\n{% if billTo %}\r\n bill_to=\"{{ billTo }}\",\r\n{% endif %}\r\n)",
"questionAnswering": "answer = client.question_answering(\r\n question=\"{{ inputs.asObj.question }}\",\r\n context=\"{{ inputs.asObj.context }}\",\r\n model=\"{{ model.id }}\",\r\n) ",
"tableQuestionAnswering": "answer = client.question_answering(\r\n query=\"{{ inputs.asObj.query }}\",\r\n table={{ inputs.asObj.table }},\r\n model=\"{{ model.id }}\",\r\n) ",
"textToImage": "# output is a PIL.Image object\r\nimage = client.text_to_image(\r\n {{ inputs.asObj.inputs }},\r\n model=\"{{ model.id }}\",\r\n) ",
"textToSpeech": "# audio is returned as bytes\r\naudio = client.text_to_speech(\r\n {{ inputs.asObj.inputs }},\r\n model=\"{{ model.id }}\",\r\n) \r\n",
"textToVideo": "video = client.text_to_video(\r\n {{ inputs.asObj.inputs }},\r\n model=\"{{ model.id }}\",\r\n) "
},
"openai": {
"conversational": "from openai import OpenAI\r\n\r\nclient = OpenAI(\r\n base_url=\"{{ baseUrl }}\",\r\n api_key=\"{{ accessToken }}\",\r\n{% if billTo %}\r\n default_headers={\r\n \"X-HF-Bill-To\": \"{{ billTo }}\"\r\n }\r\n{% endif %}\r\n)\r\n\r\ncompletion = client.chat.completions.create(\r\n model=\"{{ providerModelId }}\",\r\n{{ inputs.asPythonString }}\r\n)\r\n\r\nprint(completion.choices[0].message) ",
"conversationalStream": "from openai import OpenAI\r\n\r\nclient = OpenAI(\r\n base_url=\"{{ baseUrl }}\",\r\n api_key=\"{{ accessToken }}\",\r\n{% if billTo %}\r\n default_headers={\r\n \"X-HF-Bill-To\": \"{{ billTo }}\"\r\n }\r\n{% endif %}\r\n)\r\n\r\nstream = client.chat.completions.create(\r\n model=\"{{ providerModelId }}\",\r\n{{ inputs.asPythonString }}\r\n stream=True,\r\n)\r\n\r\nfor chunk in stream:\r\n print(chunk.choices[0].delta.content, end=\"\")"
},
"requests": {
"basic": "def query(payload):\r\n response = requests.post(API_URL, headers=headers, json=payload)\r\n return response.json()\r\n\r\noutput = query({\r\n \"inputs\": {{ providerInputs.asObj.inputs }},\r\n}) ",
"basicAudio": "def query(filename):\r\n with open(filename, \"rb\") as f:\r\n data = f.read()\r\n response = requests.post(API_URL, headers={\"Content-Type\": \"audio/flac\", **headers}, data=data)\r\n return response.json()\r\n\r\noutput = query({{ providerInputs.asObj.inputs }})",
"basicImage": "def query(filename):\r\n with open(filename, \"rb\") as f:\r\n data = f.read()\r\n response = requests.post(API_URL, headers={\"Content-Type\": \"image/jpeg\", **headers}, data=data)\r\n return response.json()\r\n\r\noutput = query({{ providerInputs.asObj.inputs }})",
"conversational": "def query(payload):\r\n response = requests.post(API_URL, headers=headers, json=payload)\r\n return response.json()\r\n\r\nresponse = query({\r\n{{ providerInputs.asJsonString }}\r\n})\r\n\r\nprint(response[\"choices\"][0][\"message\"])",
"conversationalStream": "def query(payload):\r\n response = requests.post(API_URL, headers=headers, json=payload, stream=True)\r\n for line in response.iter_lines():\r\n if not line.startswith(b\"data:\"):\r\n continue\r\n if line.strip() == b\"data: [DONE]\":\r\n return\r\n yield json.loads(line.decode(\"utf-8\").lstrip(\"data:\").rstrip(\"/n\"))\r\n\r\nchunks = query({\r\n{{ providerInputs.asJsonString }},\r\n \"stream\": True,\r\n})\r\n\r\nfor chunk in chunks:\r\n print(chunk[\"choices\"][0][\"delta\"][\"content\"], end=\"\")",
"documentQuestionAnswering": "def query(payload):\r\n with open(payload[\"image\"], \"rb\") as f:\r\n img = f.read()\r\n payload[\"image\"] = base64.b64encode(img).decode(\"utf-8\")\r\n response = requests.post(API_URL, headers=headers, json=payload)\r\n return response.json()\r\n\r\noutput = query({\r\n \"inputs\": {\r\n \"image\": \"{{ inputs.asObj.image }}\",\r\n \"question\": \"{{ inputs.asObj.question }}\",\r\n },\r\n}) ",
"imageToImage": "def query(payload):\r\n with open(payload[\"inputs\"], \"rb\") as f:\r\n img = f.read()\r\n payload[\"inputs\"] = base64.b64encode(img).decode(\"utf-8\")\r\n response = requests.post(API_URL, headers=headers, json=payload)\r\n return response.content\r\n\r\nimage_bytes = query({\r\n{{ providerInputs.asJsonString }}\r\n})\r\n\r\n# You can access the image with PIL.Image for example\r\nimport io\r\nfrom PIL import Image\r\nimage = Image.open(io.BytesIO(image_bytes)) ",
"importRequests": "{% if importBase64 %}\r\nimport base64\r\n{% endif %}\r\n{% if importJson %}\r\nimport json\r\n{% endif %}\r\nimport requests\r\n\r\nAPI_URL = \"{{ fullUrl }}\"\r\nheaders = {\r\n \"Authorization\": \"{{ authorizationHeader }}\",\r\n{% if billTo %}\r\n \"X-HF-Bill-To\": \"{{ billTo }}\"\r\n{% endif %}\r\n}",
"tabular": "def query(payload):\r\n response = requests.post(API_URL, headers=headers, json=payload)\r\n return response.content\r\n\r\nresponse = query({\r\n \"inputs\": {\r\n \"data\": {{ providerInputs.asObj.inputs }}\r\n },\r\n}) ",
"textToAudio": "{% if model.library_name == \"transformers\" %}\r\ndef query(payload):\r\n response = requests.post(API_URL, headers=headers, json=payload)\r\n return response.content\r\n\r\naudio_bytes = query({\r\n \"inputs\": {{ inputs.asObj.inputs }},\r\n})\r\n# You can access the audio with IPython.display for example\r\nfrom IPython.display import Audio\r\nAudio(audio_bytes)\r\n{% else %}\r\ndef query(payload):\r\n response = requests.post(API_URL, headers=headers, json=payload)\r\n return response.json()\r\n\r\naudio, sampling_rate = query({\r\n \"inputs\": {{ inputs.asObj.inputs }},\r\n})\r\n# You can access the audio with IPython.display for example\r\nfrom IPython.display import Audio\r\nAudio(audio, rate=sampling_rate)\r\n{% endif %} ",
"textToImage": "{% if provider == \"hf-inference\" %}\r\ndef query(payload):\r\n response = requests.post(API_URL, headers=headers, json=payload)\r\n return response.content\r\n\r\nimage_bytes = query({\r\n \"inputs\": {{ providerInputs.asObj.inputs }},\r\n})\r\n\r\n# You can access the image with PIL.Image for example\r\nimport io\r\nfrom PIL import Image\r\nimage = Image.open(io.BytesIO(image_bytes))\r\n{% endif %}",
"textToSpeech": "{% if model.library_name == \"transformers\" %}\r\ndef query(payload):\r\n response = requests.post(API_URL, headers=headers, json=payload)\r\n return response.content\r\n\r\naudio_bytes = query({\r\n \"text\": {{ inputs.asObj.inputs }},\r\n})\r\n# You can access the audio with IPython.display for example\r\nfrom IPython.display import Audio\r\nAudio(audio_bytes)\r\n{% else %}\r\ndef query(payload):\r\n response = requests.post(API_URL, headers=headers, json=payload)\r\n return response.json()\r\n\r\naudio, sampling_rate = query({\r\n \"text\": {{ inputs.asObj.inputs }},\r\n})\r\n# You can access the audio with IPython.display for example\r\nfrom IPython.display import Audio\r\nAudio(audio, rate=sampling_rate)\r\n{% endif %} ",
"zeroShotClassification": "def query(payload):\r\n response = requests.post(API_URL, headers=headers, json=payload)\r\n return response.json()\r\n\r\noutput = query({\r\n \"inputs\": {{ providerInputs.asObj.inputs }},\r\n \"parameters\": {\"candidate_labels\": [\"refund\", \"legal\", \"faq\"]},\r\n}) ",
"zeroShotImageClassification": "def query(data):\r\n with open(data[\"image_path\"], \"rb\") as f:\r\n img = f.read()\r\n payload={\r\n \"parameters\": data[\"parameters\"],\r\n \"inputs\": base64.b64encode(img).decode(\"utf-8\")\r\n }\r\n response = requests.post(API_URL, headers=headers, json=payload)\r\n return response.json()\r\n\r\noutput = query({\r\n \"image_path\": {{ providerInputs.asObj.inputs }},\r\n \"parameters\": {\"candidate_labels\": [\"cat\", \"dog\", \"llama\"]},\r\n}) "
}
},
"sh": {
"curl": {
"basic": "curl {{ fullUrl }} \\\r\n -X POST \\\r\n -H 'Authorization: {{ authorizationHeader }}' \\\r\n -H 'Content-Type: application/json' \\\r\n{% if billTo %}\r\n -H 'X-HF-Bill-To: {{ billTo }}' \\\r\n{% endif %}\r\n -d '{\r\n{{ providerInputs.asCurlString }}\r\n }'",
"basicAudio": "curl {{ fullUrl }} \\\r\n -X POST \\\r\n -H 'Authorization: {{ authorizationHeader }}' \\\r\n -H 'Content-Type: audio/flac' \\\r\n{% if billTo %}\r\n -H 'X-HF-Bill-To: {{ billTo }}' \\\r\n{% endif %}\r\n --data-binary @{{ providerInputs.asObj.inputs }}",
"basicImage": "curl {{ fullUrl }} \\\r\n -X POST \\\r\n -H 'Authorization: {{ authorizationHeader }}' \\\r\n -H 'Content-Type: image/jpeg' \\\r\n{% if billTo %}\r\n -H 'X-HF-Bill-To: {{ billTo }}' \\\r\n{% endif %}\r\n --data-binary @{{ providerInputs.asObj.inputs }}",
"conversational": "curl {{ fullUrl }} \\\r\n -H 'Authorization: {{ authorizationHeader }}' \\\r\n -H 'Content-Type: application/json' \\\r\n{% if billTo %}\r\n -H 'X-HF-Bill-To: {{ billTo }}' \\\r\n{% endif %}\r\n -d '{\r\n{{ providerInputs.asCurlString }},\r\n \"stream\": false\r\n }'",
"conversationalStream": "curl {{ fullUrl }} \\\r\n -H 'Authorization: {{ authorizationHeader }}' \\\r\n -H 'Content-Type: application/json' \\\r\n{% if billTo %}\r\n -H 'X-HF-Bill-To: {{ billTo }}' \\\r\n{% endif %}\r\n -d '{\r\n{{ providerInputs.asCurlString }},\r\n \"stream\": true\r\n }'",
"zeroShotClassification": "curl {{ fullUrl }} \\\r\n -X POST \\\r\n -d '{\"inputs\": {{ providerInputs.asObj.inputs }}, \"parameters\": {\"candidate_labels\": [\"refund\", \"legal\", \"faq\"]}}' \\\r\n -H 'Content-Type: application/json' \\\r\n -H 'Authorization: {{ authorizationHeader }}'\r\n{% if billTo %} \\\r\n -H 'X-HF-Bill-To: {{ billTo }}'\r\n{% endif %}"
}
}
} as const;